• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 17189880600

24 Aug 2025 02:23PM UTC coverage: 66.762% (+9.4%) from 57.321%
17189880600

Pull #9432

github

web-flow
Merge a4a00e732 into 0c2f045f5
Pull Request #9432: multi: add upfront-shutdown-address to lnd.conf.

31 of 39 new or added lines in 4 files covered. (79.49%)

22 existing lines in 8 files now uncovered.

136012 of 203728 relevant lines covered (66.76%)

21523.06 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

83.46
/contractcourt/channel_arbitrator.go
1
package contractcourt
2

3
import (
4
        "bytes"
5
        "context"
6
        "errors"
7
        "fmt"
8
        "math"
9
        "sync"
10
        "sync/atomic"
11
        "time"
12

13
        "github.com/btcsuite/btcd/btcutil"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/txscript"
16
        "github.com/btcsuite/btcd/wire"
17
        "github.com/lightningnetwork/lnd/chainio"
18
        "github.com/lightningnetwork/lnd/channeldb"
19
        "github.com/lightningnetwork/lnd/fn/v2"
20
        "github.com/lightningnetwork/lnd/graph/db/models"
21
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
22
        "github.com/lightningnetwork/lnd/input"
23
        "github.com/lightningnetwork/lnd/invoices"
24
        "github.com/lightningnetwork/lnd/kvdb"
25
        "github.com/lightningnetwork/lnd/labels"
26
        "github.com/lightningnetwork/lnd/lntypes"
27
        "github.com/lightningnetwork/lnd/lnutils"
28
        "github.com/lightningnetwork/lnd/lnwallet"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/sweep"
31
)
32

33
var (
34
        // errAlreadyForceClosed is an error returned when we attempt to force
35
        // close a channel that's already in the process of doing so.
36
        errAlreadyForceClosed = errors.New("channel is already in the " +
37
                "process of being force closed")
38
)
39

40
const (
41
        // arbitratorBlockBufferSize is the size of the buffer we give to each
42
        // channel arbitrator.
43
        arbitratorBlockBufferSize = 20
44

45
        // AnchorOutputValue is the output value for the anchor output of an
46
        // anchor channel.
47
        // See BOLT 03 for more details:
48
        // https://github.com/lightning/bolts/blob/master/03-transactions.md
49
        AnchorOutputValue = btcutil.Amount(330)
50
)
51

52
// WitnessSubscription represents an intent to be notified once new witnesses
53
// are discovered by various active contract resolvers. A contract resolver may
54
// use this to be notified of when it can satisfy an incoming contract after we
55
// discover the witness for an outgoing contract.
56
type WitnessSubscription struct {
57
        // WitnessUpdates is a channel that newly discovered witnesses will be
58
        // sent over.
59
        //
60
        // TODO(roasbeef): couple with WitnessType?
61
        WitnessUpdates <-chan lntypes.Preimage
62

63
        // CancelSubscription is a function closure that should be used by a
64
        // client to cancel the subscription once they are no longer interested
65
        // in receiving new updates.
66
        CancelSubscription func()
67
}
68

69
// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
70
// this interface to lookup witnesses (preimages typically) of contracts
71
// they're trying to resolve, add new preimages they resolve, and finally
72
// receive new updates each new time a preimage is discovered.
73
//
74
// TODO(roasbeef): need to delete the pre-images once we've used them
75
// and have been sufficiently confirmed?
76
type WitnessBeacon interface {
77
        // SubscribeUpdates returns a channel that will be sent upon *each* time
78
        // a new preimage is discovered.
79
        SubscribeUpdates(chanID lnwire.ShortChannelID, htlc *channeldb.HTLC,
80
                payload *hop.Payload,
81
                nextHopOnionBlob []byte) (*WitnessSubscription, error)
82

83
        // LookupPreimage attempts to lookup a preimage in the global cache.
84
        // True is returned for the second argument if the preimage is found.
85
        LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool)
86

87
        // AddPreimages adds a batch of newly discovered preimages to the global
88
        // cache, and also signals any subscribers of the newly discovered
89
        // witness.
90
        AddPreimages(preimages ...lntypes.Preimage) error
91
}
92

93
// ArbChannel is an abstraction that allows the channel arbitrator to interact
94
// with an open channel.
95
type ArbChannel interface {
96
        // ForceCloseChan should force close the contract that this attendant
97
        // is watching over. We'll use this when we decide that we need to go
98
        // to chain. It should in addition tell the switch to remove the
99
        // corresponding link, such that we won't accept any new updates. The
100
        // returned summary contains all items needed to eventually resolve all
101
        // outputs on chain.
102
        ForceCloseChan() (*wire.MsgTx, error)
103

104
        // NewAnchorResolutions returns the anchor resolutions for currently
105
        // valid commitment transactions.
106
        NewAnchorResolutions() (*lnwallet.AnchorResolutions, error)
107
}
108

109
// ChannelArbitratorConfig contains all the functionality that the
110
// ChannelArbitrator needs in order to properly arbitrate any contract dispute
111
// on chain.
112
type ChannelArbitratorConfig struct {
113
        // ChanPoint is the channel point that uniquely identifies this
114
        // channel.
115
        ChanPoint wire.OutPoint
116

117
        // Channel is the full channel data structure. For legacy channels, this
118
        // field may not always be set after a restart.
119
        Channel ArbChannel
120

121
        // ShortChanID describes the exact location of the channel within the
122
        // chain. We'll use this to address any messages that we need to send
123
        // to the switch during contract resolution.
124
        ShortChanID lnwire.ShortChannelID
125

126
        // ChainEvents is an active subscription to the chain watcher for this
127
        // channel to be notified of any on-chain activity related to this
128
        // channel.
129
        ChainEvents *ChainEventSubscription
130

131
        // MarkCommitmentBroadcasted should mark the channel as the commitment
132
        // being broadcast, and we are waiting for the commitment to confirm.
133
        MarkCommitmentBroadcasted func(*wire.MsgTx, lntypes.ChannelParty) error
134

135
        // MarkChannelClosed marks the channel closed in the database, with the
136
        // passed close summary. After this method successfully returns we can
137
        // no longer expect to receive chain events for this channel, and must
138
        // be able to recover from a failure without getting the close event
139
        // again. It takes an optional channel status which will update the
140
        // channel status in the record that we keep of historical channels.
141
        MarkChannelClosed func(*channeldb.ChannelCloseSummary,
142
                ...channeldb.ChannelStatus) error
143

144
        // IsPendingClose is a boolean indicating whether the channel is marked
145
        // as pending close in the database.
146
        IsPendingClose bool
147

148
        // ClosingHeight is the height at which the channel was closed. Note
149
        // that this value is only valid if IsPendingClose is true.
150
        ClosingHeight uint32
151

152
        // CloseType is the type of the close event in case IsPendingClose is
153
        // true. Otherwise this value is unset.
154
        CloseType channeldb.ClosureType
155

156
        // NotifyChannelResolved is used by the channel arbitrator to signal
157
        // that a given channel has been resolved.
158
        NotifyChannelResolved func()
159

160
        // PutResolverReport records a resolver report for the channel. If the
161
        // transaction provided is nil, the function should write the report
162
        // in a new transaction.
163
        PutResolverReport func(tx kvdb.RwTx,
164
                report *channeldb.ResolverReport) error
165

166
        // FetchHistoricalChannel retrieves the historical state of a channel.
167
        // This is mostly used to supplement the ContractResolvers with
168
        // additional information required for proper contract resolution.
169
        FetchHistoricalChannel func() (*channeldb.OpenChannel, error)
170

171
        // FindOutgoingHTLCDeadline returns the deadline in absolute block
172
        // height for the specified outgoing HTLC. For an outgoing HTLC, its
173
        // deadline is defined by the timeout height of its corresponding
174
        // incoming HTLC - this is the expiry height the that remote peer can
175
        // spend his/her outgoing HTLC via the timeout path.
176
        FindOutgoingHTLCDeadline func(htlc channeldb.HTLC) fn.Option[int32]
177

178
        ChainArbitratorConfig
179
}
180

181
// ReportOutputType describes the type of output that is being reported
182
// on.
183
type ReportOutputType uint8
184

185
const (
186
        // ReportOutputIncomingHtlc is an incoming hash time locked contract on
187
        // the commitment tx.
188
        ReportOutputIncomingHtlc ReportOutputType = iota
189

190
        // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on
191
        // the commitment tx.
192
        ReportOutputOutgoingHtlc
193

194
        // ReportOutputUnencumbered is an uncontested output on the commitment
195
        // transaction paying to us directly.
196
        ReportOutputUnencumbered
197

198
        // ReportOutputAnchor is an anchor output on the commitment tx.
199
        ReportOutputAnchor
200
)
201

202
// ContractReport provides a summary of a commitment tx output.
203
type ContractReport struct {
204
        // Outpoint is the final output that will be swept back to the wallet.
205
        Outpoint wire.OutPoint
206

207
        // Type indicates the type of the reported output.
208
        Type ReportOutputType
209

210
        // Amount is the final value that will be swept in back to the wallet.
211
        Amount btcutil.Amount
212

213
        // MaturityHeight is the absolute block height that this output will
214
        // mature at.
215
        MaturityHeight uint32
216

217
        // Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
218
        // the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
219
        // to its expiry height, while a stage 2 htlc's maturity height will be
220
        // set to its confirmation height plus the maturity requirement.
221
        Stage uint32
222

223
        // LimboBalance is the total number of frozen coins within this
224
        // contract.
225
        LimboBalance btcutil.Amount
226

227
        // RecoveredBalance is the total value that has been successfully swept
228
        // back to the user's wallet.
229
        RecoveredBalance btcutil.Amount
230
}
231

232
// resolverReport creates a resolve report using some of the information in the
233
// contract report.
234
func (c *ContractReport) resolverReport(spendTx *chainhash.Hash,
235
        resolverType channeldb.ResolverType,
236
        outcome channeldb.ResolverOutcome) *channeldb.ResolverReport {
13✔
237

13✔
238
        return &channeldb.ResolverReport{
13✔
239
                OutPoint:        c.Outpoint,
13✔
240
                Amount:          c.Amount,
13✔
241
                ResolverType:    resolverType,
13✔
242
                ResolverOutcome: outcome,
13✔
243
                SpendTxID:       spendTx,
13✔
244
        }
13✔
245
}
13✔
246

247
// htlcSet represents the set of active HTLCs on a given commitment
248
// transaction.
249
type htlcSet struct {
250
        // incomingHTLCs is a map of all incoming HTLCs on the target
251
        // commitment transaction. We may potentially go onchain to claim the
252
        // funds sent to us within this set.
253
        incomingHTLCs map[uint64]channeldb.HTLC
254

255
        // outgoingHTLCs is a map of all outgoing HTLCs on the target
256
        // commitment transaction. We may potentially go onchain to reclaim the
257
        // funds that are currently in limbo.
258
        outgoingHTLCs map[uint64]channeldb.HTLC
259
}
260

261
// newHtlcSet constructs a new HTLC set from a slice of HTLC's.
262
func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
49✔
263
        outHTLCs := make(map[uint64]channeldb.HTLC)
49✔
264
        inHTLCs := make(map[uint64]channeldb.HTLC)
49✔
265
        for _, htlc := range htlcs {
83✔
266
                if htlc.Incoming {
43✔
267
                        inHTLCs[htlc.HtlcIndex] = htlc
9✔
268
                        continue
9✔
269
                }
270

271
                outHTLCs[htlc.HtlcIndex] = htlc
28✔
272
        }
273

274
        return htlcSet{
49✔
275
                incomingHTLCs: inHTLCs,
49✔
276
                outgoingHTLCs: outHTLCs,
49✔
277
        }
49✔
278
}
279

280
// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
281
// commitment transaction.
282
type HtlcSetKey struct {
283
        // IsRemote denotes if the HTLCs are on the remote commitment
284
        // transaction.
285
        IsRemote bool
286

287
        // IsPending denotes if the commitment transaction that HTLCS are on
288
        // are pending (the higher of two unrevoked commitments).
289
        IsPending bool
290
}
291

292
var (
293
        // LocalHtlcSet is the HtlcSetKey used for local commitments.
294
        LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
295

296
        // RemoteHtlcSet is the HtlcSetKey used for remote commitments.
297
        RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
298

299
        // RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
300
        // commitment transactions.
301
        RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
302
)
303

304
// String returns a human readable string describing the target HtlcSetKey.
305
func (h HtlcSetKey) String() string {
11✔
306
        switch h {
11✔
307
        case LocalHtlcSet:
7✔
308
                return "LocalHtlcSet"
7✔
309
        case RemoteHtlcSet:
5✔
310
                return "RemoteHtlcSet"
5✔
311
        case RemotePendingHtlcSet:
5✔
312
                return "RemotePendingHtlcSet"
5✔
313
        default:
×
314
                return "unknown HtlcSetKey"
×
315
        }
316
}
317

318
// ChannelArbitrator is the on-chain arbitrator for a particular channel. The
319
// struct will keep in sync with the current set of HTLCs on the commitment
320
// transaction. The job of the attendant is to go on-chain to either settle or
321
// cancel an HTLC as necessary iff: an HTLC times out, or we known the
322
// pre-image to an HTLC, but it wasn't settled by the link off-chain. The
323
// ChannelArbitrator will factor in an expected confirmation delta when
324
// broadcasting to ensure that we avoid any possibility of race conditions, and
325
// sweep the output(s) without contest.
326
type ChannelArbitrator struct {
327
        started int32 // To be used atomically.
328
        stopped int32 // To be used atomically.
329

330
        // Embed the blockbeat consumer struct to get access to the method
331
        // `NotifyBlockProcessed` and the `BlockbeatChan`.
332
        chainio.BeatConsumer
333

334
        // startTimestamp is the time when this ChannelArbitrator was started.
335
        startTimestamp time.Time
336

337
        // log is a persistent log that the attendant will use to checkpoint
338
        // its next action, and the state of any unresolved contracts.
339
        log ArbitratorLog
340

341
        // activeHTLCs is the set of active incoming/outgoing HTLC's on all
342
        // currently valid commitment transactions.
343
        activeHTLCs map[HtlcSetKey]htlcSet
344

345
        // unmergedSet is used to update the activeHTLCs map in two callsites:
346
        // checkLocalChainActions and sweepAnchors. It contains the latest
347
        // updates from the link. It is not deleted from, its entries may be
348
        // replaced on subsequent calls to notifyContractUpdate.
349
        unmergedSet map[HtlcSetKey]htlcSet
350
        unmergedMtx sync.RWMutex
351

352
        // cfg contains all the functionality that the ChannelArbitrator requires
353
        // to do its duty.
354
        cfg ChannelArbitratorConfig
355

356
        // signalUpdates is a channel that any new live signals for the channel
357
        // we're watching over will be sent.
358
        signalUpdates chan *signalUpdateMsg
359

360
        // activeResolvers is a slice of any active resolvers. This is used to
361
        // be able to signal them for shutdown in the case that we shutdown.
362
        activeResolvers []ContractResolver
363

364
        // activeResolversLock prevents simultaneous read and write to the
365
        // resolvers slice.
366
        activeResolversLock sync.RWMutex
367

368
        // resolutionSignal is a channel that will be sent upon by contract
369
        // resolvers once their contract has been fully resolved. With each
370
        // send, we'll check to see if the contract is fully resolved.
371
        resolutionSignal chan struct{}
372

373
        // forceCloseReqs is a channel that requests to forcibly close the
374
        // contract will be sent over.
375
        forceCloseReqs chan *forceCloseReq
376

377
        // state is the current state of the arbitrator. This state is examined
378
        // upon start up to decide which actions to take.
379
        state ArbitratorState
380

381
        wg   sync.WaitGroup
382
        quit chan struct{}
383
}
384

385
// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
386
// the passed config struct.
387
func NewChannelArbitrator(cfg ChannelArbitratorConfig,
388
        htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
54✔
389

54✔
390
        // Create a new map for unmerged HTLC's as we will overwrite the values
54✔
391
        // and want to avoid modifying activeHTLCs directly. This soft copying
54✔
392
        // is done to ensure that activeHTLCs isn't reset as an empty map later
54✔
393
        // on.
54✔
394
        unmerged := make(map[HtlcSetKey]htlcSet)
54✔
395
        unmerged[LocalHtlcSet] = htlcSets[LocalHtlcSet]
54✔
396
        unmerged[RemoteHtlcSet] = htlcSets[RemoteHtlcSet]
54✔
397

54✔
398
        // If the pending set exists, write that as well.
54✔
399
        if _, ok := htlcSets[RemotePendingHtlcSet]; ok {
54✔
400
                unmerged[RemotePendingHtlcSet] = htlcSets[RemotePendingHtlcSet]
×
401
        }
×
402

403
        c := &ChannelArbitrator{
54✔
404
                log:              log,
54✔
405
                signalUpdates:    make(chan *signalUpdateMsg),
54✔
406
                resolutionSignal: make(chan struct{}),
54✔
407
                forceCloseReqs:   make(chan *forceCloseReq),
54✔
408
                activeHTLCs:      htlcSets,
54✔
409
                unmergedSet:      unmerged,
54✔
410
                cfg:              cfg,
54✔
411
                quit:             make(chan struct{}),
54✔
412
        }
54✔
413

54✔
414
        // Mount the block consumer.
54✔
415
        c.BeatConsumer = chainio.NewBeatConsumer(c.quit, c.Name())
54✔
416

54✔
417
        return c
54✔
418
}
419

420
// Compile-time check for the chainio.Consumer interface.
421
var _ chainio.Consumer = (*ChannelArbitrator)(nil)
422

423
// chanArbStartState contains the information from disk that we need to start
424
// up a channel arbitrator.
425
type chanArbStartState struct {
426
        currentState ArbitratorState
427
        commitSet    *CommitSet
428
}
429

430
// getStartState retrieves the information from disk that our channel arbitrator
431
// requires to start.
432
func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
433
        error) {
51✔
434

51✔
435
        // First, we'll read our last state from disk, so our internal state
51✔
436
        // machine can act accordingly.
51✔
437
        state, err := c.log.CurrentState(tx)
51✔
438
        if err != nil {
51✔
439
                return nil, err
×
440
        }
×
441

442
        // Next we'll fetch our confirmed commitment set. This will only exist
443
        // if the channel has been closed out on chain for modern nodes. For
444
        // older nodes, this won't be found at all, and will rely on the
445
        // existing written chain actions. Additionally, if this channel hasn't
446
        // logged any actions in the log, then this field won't be present.
447
        commitSet, err := c.log.FetchConfirmedCommitSet(tx)
51✔
448
        if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
51✔
449
                return nil, err
×
450
        }
×
451

452
        return &chanArbStartState{
51✔
453
                currentState: state,
51✔
454
                commitSet:    commitSet,
51✔
455
        }, nil
51✔
456
}
457

458
// Start starts all the goroutines that the ChannelArbitrator needs to operate.
459
// If takes a start state, which will be looked up on disk if it is not
460
// provided.
461
func (c *ChannelArbitrator) Start(state *chanArbStartState,
462
        beat chainio.Blockbeat) error {
51✔
463

51✔
464
        if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
51✔
465
                return nil
×
466
        }
×
467
        c.startTimestamp = c.cfg.Clock.Now()
51✔
468

51✔
469
        // If the state passed in is nil, we look it up now.
51✔
470
        if state == nil {
91✔
471
                var err error
40✔
472
                state, err = c.getStartState(nil)
40✔
473
                if err != nil {
40✔
474
                        return err
×
475
                }
×
476
        }
477

478
        log.Tracef("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
51✔
479
                c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs),
51✔
480
                state.currentState)
51✔
481

51✔
482
        // Set our state from our starting state.
51✔
483
        c.state = state.currentState
51✔
484

51✔
485
        // Get the starting height.
51✔
486
        bestHeight := beat.Height()
51✔
487

51✔
488
        c.wg.Add(1)
51✔
489
        go c.channelAttendant(bestHeight, state.commitSet)
51✔
490

51✔
491
        return nil
51✔
492
}
493

494
// progressStateMachineAfterRestart attempts to progress the state machine
495
// after a restart. This makes sure that if the state transition failed, we
496
// will try to progress the state machine again. Moreover it will relaunch
497
// resolvers if the channel is still in the pending close state and has not
498
// been fully resolved yet.
499
func (c *ChannelArbitrator) progressStateMachineAfterRestart(bestHeight int32,
500
        commitSet *CommitSet) error {
51✔
501

51✔
502
        // If the channel has been marked pending close in the database, and we
51✔
503
        // haven't transitioned the state machine to StateContractClosed (or a
51✔
504
        // succeeding state), then a state transition most likely failed. We'll
51✔
505
        // try to recover from this by manually advancing the state by setting
51✔
506
        // the corresponding close trigger.
51✔
507
        trigger := chainTrigger
51✔
508
        triggerHeight := uint32(bestHeight)
51✔
509
        if c.cfg.IsPendingClose {
59✔
510
                switch c.state {
8✔
511
                case StateDefault:
4✔
512
                        fallthrough
4✔
513
                case StateBroadcastCommit:
5✔
514
                        fallthrough
5✔
515
                case StateCommitmentBroadcasted:
5✔
516
                        switch c.cfg.CloseType {
5✔
517

518
                        case channeldb.CooperativeClose:
1✔
519
                                trigger = coopCloseTrigger
1✔
520

521
                        case channeldb.BreachClose:
1✔
522
                                trigger = breachCloseTrigger
1✔
523

524
                        case channeldb.LocalForceClose:
1✔
525
                                trigger = localCloseTrigger
1✔
526

527
                        case channeldb.RemoteForceClose:
2✔
528
                                trigger = remoteCloseTrigger
2✔
529
                        }
530

531
                        log.Warnf("ChannelArbitrator(%v): detected stalled "+
5✔
532
                                "state=%v for closed channel",
5✔
533
                                c.cfg.ChanPoint, c.state)
5✔
534
                }
535

536
                triggerHeight = c.cfg.ClosingHeight
8✔
537
        }
538

539
        log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
51✔
540
                "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
51✔
541
                triggerHeight)
51✔
542

51✔
543
        // We'll now attempt to advance our state forward based on the current
51✔
544
        // on-chain state, and our set of active contracts.
51✔
545
        startingState := c.state
51✔
546
        nextState, _, err := c.advanceState(
51✔
547
                triggerHeight, trigger, commitSet,
51✔
548
        )
51✔
549
        if err != nil {
53✔
550
                switch err {
2✔
551

552
                // If we detect that we tried to fetch resolutions, but failed,
553
                // this channel was marked closed in the database before
554
                // resolutions successfully written. In this case there is not
555
                // much we can do, so we don't return the error.
556
                case errScopeBucketNoExist:
×
557
                        fallthrough
×
558
                case errNoResolutions:
1✔
559
                        log.Warnf("ChannelArbitrator(%v): detected closed"+
1✔
560
                                "channel with no contract resolutions written.",
1✔
561
                                c.cfg.ChanPoint)
1✔
562

563
                default:
1✔
564
                        return err
1✔
565
                }
566
        }
567

568
        // If we start and ended at the awaiting full resolution state, then
569
        // we'll relaunch our set of unresolved contracts.
570
        if startingState == StateWaitingFullResolution &&
50✔
571
                nextState == StateWaitingFullResolution {
54✔
572

4✔
573
                // In order to relaunch the resolvers, we'll need to fetch the
4✔
574
                // set of HTLCs that were present in the commitment transaction
4✔
575
                // at the time it was confirmed. commitSet.ConfCommitKey can't
4✔
576
                // be nil at this point since we're in
4✔
577
                // StateWaitingFullResolution. We can only be in
4✔
578
                // StateWaitingFullResolution after we've transitioned from
4✔
579
                // StateContractClosed which can only be triggered by the local
4✔
580
                // or remote close trigger. This trigger is only fired when we
4✔
581
                // receive a chain event from the chain watcher that the
4✔
582
                // commitment has been confirmed on chain, and before we
4✔
583
                // advance our state step, we call InsertConfirmedCommitSet.
4✔
584
                err := c.relaunchResolvers(commitSet, triggerHeight)
4✔
585
                if err != nil {
4✔
586
                        return err
×
587
                }
×
588
        }
589

590
        return nil
50✔
591
}
592

593
// maybeAugmentTaprootResolvers will update the contract resolution information
594
// for taproot channels. This ensures that all the resolvers have the latest
595
// resolution, which may also include data such as the control block and tap
596
// tweaks.
597
func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
598
        resolver ContractResolver,
599
        contractResolutions *ContractResolutions) {
4✔
600

4✔
601
        if !chanType.IsTaproot() {
8✔
602
                return
4✔
603
        }
4✔
604

605
        // The on disk resolutions contains all the ctrl block
606
        // information, so we'll set that now for the relevant
607
        // resolvers.
608
        switch r := resolver.(type) {
3✔
609
        case *commitSweepResolver:
2✔
610
                if contractResolutions.CommitResolution != nil {
4✔
611
                        //nolint:ll
2✔
612
                        r.commitResolution = *contractResolutions.CommitResolution
2✔
613
                }
2✔
614
        case *htlcOutgoingContestResolver:
3✔
615
                //nolint:ll
3✔
616
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
3✔
617
                for _, htlcRes := range htlcResolutions {
6✔
618
                        htlcRes := htlcRes
3✔
619

3✔
620
                        if r.htlcResolution.ClaimOutpoint ==
3✔
621
                                htlcRes.ClaimOutpoint {
6✔
622

3✔
623
                                r.htlcResolution = htlcRes
3✔
624
                        }
3✔
625
                }
626

627
        case *htlcTimeoutResolver:
2✔
628
                //nolint:ll
2✔
629
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
2✔
630
                for _, htlcRes := range htlcResolutions {
4✔
631
                        htlcRes := htlcRes
2✔
632

2✔
633
                        if r.htlcResolution.ClaimOutpoint ==
2✔
634
                                htlcRes.ClaimOutpoint {
4✔
635

2✔
636
                                r.htlcResolution = htlcRes
2✔
637
                        }
2✔
638
                }
639

640
        case *htlcIncomingContestResolver:
3✔
641
                //nolint:ll
3✔
642
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
3✔
643
                for _, htlcRes := range htlcResolutions {
6✔
644
                        htlcRes := htlcRes
3✔
645

3✔
646
                        if r.htlcResolution.ClaimOutpoint ==
3✔
647
                                htlcRes.ClaimOutpoint {
6✔
648

3✔
649
                                r.htlcResolution = htlcRes
3✔
650
                        }
3✔
651
                }
652
        case *htlcSuccessResolver:
×
653
                //nolint:ll
×
654
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
×
655
                for _, htlcRes := range htlcResolutions {
×
656
                        htlcRes := htlcRes
×
657

×
658
                        if r.htlcResolution.ClaimOutpoint ==
×
659
                                htlcRes.ClaimOutpoint {
×
660

×
661
                                r.htlcResolution = htlcRes
×
662
                        }
×
663
                }
664
        }
665
}
666

667
// relauchResolvers relaunches the set of resolvers for unresolved contracts in
668
// order to provide them with information that's not immediately available upon
669
// starting the ChannelArbitrator. This information should ideally be stored in
670
// the database, so this only serves as a intermediate work-around to prevent a
671
// migration.
672
func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
673
        heightHint uint32) error {
4✔
674

4✔
675
        // We'll now query our log to see if there are any active unresolved
4✔
676
        // contracts. If this is the case, then we'll relaunch all contract
4✔
677
        // resolvers.
4✔
678
        unresolvedContracts, err := c.log.FetchUnresolvedContracts()
4✔
679
        if err != nil {
4✔
680
                return err
×
681
        }
×
682

683
        // Retrieve the commitment tx hash from the log.
684
        contractResolutions, err := c.log.FetchContractResolutions()
4✔
685
        if err != nil {
4✔
686
                log.Errorf("unable to fetch contract resolutions: %v",
×
687
                        err)
×
688
                return err
×
689
        }
×
690
        commitHash := contractResolutions.CommitHash
4✔
691

4✔
692
        // In prior versions of lnd, the information needed to supplement the
4✔
693
        // resolvers (in most cases, the full amount of the HTLC) was found in
4✔
694
        // the chain action map, which is now deprecated.  As a result, if the
4✔
695
        // commitSet is nil (an older node with unresolved HTLCs at time of
4✔
696
        // upgrade), then we'll use the chain action information in place. The
4✔
697
        // chain actions may exclude some information, but we cannot recover it
4✔
698
        // for these older nodes at the moment.
4✔
699
        var confirmedHTLCs []channeldb.HTLC
4✔
700
        if commitSet != nil && commitSet.ConfCommitKey.IsSome() {
8✔
701
                confCommitKey, err := commitSet.ConfCommitKey.UnwrapOrErr(
4✔
702
                        fmt.Errorf("no commitKey available"),
4✔
703
                )
4✔
704
                if err != nil {
4✔
705
                        return err
×
706
                }
×
707
                confirmedHTLCs = commitSet.HtlcSets[confCommitKey]
4✔
708
        } else {
×
709
                chainActions, err := c.log.FetchChainActions()
×
710
                if err != nil {
×
711
                        log.Errorf("unable to fetch chain actions: %v", err)
×
712
                        return err
×
713
                }
×
714
                for _, htlcs := range chainActions {
×
715
                        confirmedHTLCs = append(confirmedHTLCs, htlcs...)
×
716
                }
×
717
        }
718

719
        // Reconstruct the htlc outpoints and data from the chain action log.
720
        // The purpose of the constructed htlc map is to supplement to
721
        // resolvers restored from database with extra data. Ideally this data
722
        // is stored as part of the resolver in the log. This is a workaround
723
        // to prevent a db migration. We use all available htlc sets here in
724
        // order to ensure we have complete coverage.
725
        htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
4✔
726
        for _, htlc := range confirmedHTLCs {
10✔
727
                htlc := htlc
6✔
728
                outpoint := wire.OutPoint{
6✔
729
                        Hash:  commitHash,
6✔
730
                        Index: uint32(htlc.OutputIndex),
6✔
731
                }
6✔
732
                htlcMap[outpoint] = &htlc
6✔
733
        }
6✔
734

735
        // We'll also fetch the historical state of this channel, as it should
736
        // have been marked as closed by now, and supplement it to each resolver
737
        // such that we can properly resolve our pending contracts.
738
        var chanState *channeldb.OpenChannel
4✔
739
        chanState, err = c.cfg.FetchHistoricalChannel()
4✔
740
        switch {
4✔
741
        // If we don't find this channel, then it may be the case that it
742
        // was closed before we started to retain the final state
743
        // information for open channels.
744
        case err == channeldb.ErrNoHistoricalBucket:
×
745
                fallthrough
×
746
        case err == channeldb.ErrChannelNotFound:
×
747
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
748
                        "state", c.cfg.ChanPoint)
×
749

750
        case err != nil:
×
751
                return err
×
752
        }
753

754
        log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
4✔
755
                "resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
4✔
756

4✔
757
        for i := range unresolvedContracts {
8✔
758
                resolver := unresolvedContracts[i]
4✔
759

4✔
760
                if chanState != nil {
8✔
761
                        resolver.SupplementState(chanState)
4✔
762

4✔
763
                        // For taproot channels, we'll need to also make sure
4✔
764
                        // the control block information was set properly.
4✔
765
                        maybeAugmentTaprootResolvers(
4✔
766
                                chanState.ChanType, resolver,
4✔
767
                                contractResolutions,
4✔
768
                        )
4✔
769
                }
4✔
770

771
                unresolvedContracts[i] = resolver
4✔
772

4✔
773
                htlcResolver, ok := resolver.(htlcContractResolver)
4✔
774
                if !ok {
7✔
775
                        continue
3✔
776
                }
777

778
                htlcPoint := htlcResolver.HtlcPoint()
4✔
779
                htlc, ok := htlcMap[htlcPoint]
4✔
780
                if !ok {
4✔
781
                        return fmt.Errorf(
×
782
                                "htlc resolver %T unavailable", resolver,
×
783
                        )
×
784
                }
×
785

786
                htlcResolver.Supplement(*htlc)
4✔
787

4✔
788
                // If this is an outgoing HTLC, we will also need to supplement
4✔
789
                // the resolver with the expiry block height of its
4✔
790
                // corresponding incoming HTLC.
4✔
791
                if !htlc.Incoming {
8✔
792
                        deadline := c.cfg.FindOutgoingHTLCDeadline(*htlc)
4✔
793
                        htlcResolver.SupplementDeadline(deadline)
4✔
794
                }
4✔
795
        }
796

797
        // The anchor resolver is stateless and can always be re-instantiated.
798
        if contractResolutions.AnchorResolution != nil {
7✔
799
                anchorResolver := newAnchorResolver(
3✔
800
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
3✔
801
                        contractResolutions.AnchorResolution.CommitAnchor,
3✔
802
                        heightHint, c.cfg.ChanPoint,
3✔
803
                        ResolverConfig{
3✔
804
                                ChannelArbitratorConfig: c.cfg,
3✔
805
                        },
3✔
806
                )
3✔
807

3✔
808
                anchorResolver.SupplementState(chanState)
3✔
809

3✔
810
                unresolvedContracts = append(unresolvedContracts, anchorResolver)
3✔
811

3✔
812
                // TODO(roasbeef): this isn't re-launched?
3✔
813
        }
3✔
814

815
        c.resolveContracts(unresolvedContracts)
4✔
816

4✔
817
        return nil
4✔
818
}
819

820
// Report returns htlc reports for the active resolvers.
821
func (c *ChannelArbitrator) Report() []*ContractReport {
3✔
822
        c.activeResolversLock.RLock()
3✔
823
        defer c.activeResolversLock.RUnlock()
3✔
824

3✔
825
        var reports []*ContractReport
3✔
826
        for _, resolver := range c.activeResolvers {
6✔
827
                r, ok := resolver.(reportingContractResolver)
3✔
828
                if !ok {
3✔
829
                        continue
×
830
                }
831

832
                report := r.report()
3✔
833
                if report == nil {
6✔
834
                        continue
3✔
835
                }
836

837
                reports = append(reports, report)
3✔
838
        }
839

840
        return reports
3✔
841
}
842

843
// Stop signals the ChannelArbitrator for a graceful shutdown.
844
func (c *ChannelArbitrator) Stop() error {
54✔
845
        if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
60✔
846
                return nil
6✔
847
        }
6✔
848

849
        log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
48✔
850

48✔
851
        if c.cfg.ChainEvents.Cancel != nil {
62✔
852
                go c.cfg.ChainEvents.Cancel()
14✔
853
        }
14✔
854

855
        c.activeResolversLock.RLock()
48✔
856
        for _, activeResolver := range c.activeResolvers {
57✔
857
                activeResolver.Stop()
9✔
858
        }
9✔
859
        c.activeResolversLock.RUnlock()
48✔
860

48✔
861
        close(c.quit)
48✔
862
        c.wg.Wait()
48✔
863

48✔
864
        return nil
48✔
865
}
866

867
// transitionTrigger is an enum that denotes exactly *why* a state transition
868
// was initiated. This is useful as depending on the initial trigger, we may
869
// skip certain states as those actions are expected to have already taken
870
// place as a result of the external trigger.
871
type transitionTrigger uint8
872

873
const (
874
        // chainTrigger is a transition trigger that has been attempted due to
875
        // changing on-chain conditions such as a block which times out HTLC's
876
        // being attached.
877
        chainTrigger transitionTrigger = iota
878

879
        // userTrigger is a transition trigger driven by user action. Examples
880
        // of such a trigger include a user requesting a force closure of the
881
        // channel.
882
        userTrigger
883

884
        // remoteCloseTrigger is a transition trigger driven by the remote
885
        // peer's commitment being confirmed.
886
        remoteCloseTrigger
887

888
        // localCloseTrigger is a transition trigger driven by our commitment
889
        // being confirmed.
890
        localCloseTrigger
891

892
        // coopCloseTrigger is a transition trigger driven by a cooperative
893
        // close transaction being confirmed.
894
        coopCloseTrigger
895

896
        // breachCloseTrigger is a transition trigger driven by a remote breach
897
        // being confirmed. In this case the channel arbitrator will wait for
898
        // the BreachArbitrator to finish and then clean up gracefully.
899
        breachCloseTrigger
900
)
901

902
// String returns a human readable string describing the passed
903
// transitionTrigger.
904
func (t transitionTrigger) String() string {
3✔
905
        switch t {
3✔
906
        case chainTrigger:
3✔
907
                return "chainTrigger"
3✔
908

909
        case remoteCloseTrigger:
3✔
910
                return "remoteCloseTrigger"
3✔
911

912
        case userTrigger:
3✔
913
                return "userTrigger"
3✔
914

915
        case localCloseTrigger:
3✔
916
                return "localCloseTrigger"
3✔
917

918
        case coopCloseTrigger:
3✔
919
                return "coopCloseTrigger"
3✔
920

921
        case breachCloseTrigger:
3✔
922
                return "breachCloseTrigger"
3✔
923

924
        default:
×
925
                return "unknown trigger"
×
926
        }
927
}
928

929
// stateStep is a help method that examines our internal state, and attempts
930
// the appropriate state transition if necessary. The next state we transition
931
// to is returned, Additionally, if the next transition results in a commitment
932
// broadcast, the commitment transaction itself is returned.
933
func (c *ChannelArbitrator) stateStep(
934
        triggerHeight uint32, trigger transitionTrigger,
935
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
177✔
936

177✔
937
        var (
177✔
938
                nextState ArbitratorState
177✔
939
                closeTx   *wire.MsgTx
177✔
940
        )
177✔
941
        switch c.state {
177✔
942

943
        // If we're in the default state, then we'll check our set of actions
944
        // to see if while we were down, conditions have changed.
945
        case StateDefault:
66✔
946
                log.Debugf("ChannelArbitrator(%v): examining active HTLCs in "+
66✔
947
                        "block %v, confCommitSet: %v", c.cfg.ChanPoint,
66✔
948
                        triggerHeight, lnutils.LogClosure(confCommitSet.String))
66✔
949

66✔
950
                // As a new block has been connected to the end of the main
66✔
951
                // chain, we'll check to see if we need to make any on-chain
66✔
952
                // claims on behalf of the channel contract that we're
66✔
953
                // arbitrating for. If a commitment has confirmed, then we'll
66✔
954
                // use the set snapshot from the chain, otherwise we'll use our
66✔
955
                // current set.
66✔
956
                var (
66✔
957
                        chainActions ChainActionMap
66✔
958
                        err          error
66✔
959
                )
66✔
960

66✔
961
                // Normally if we force close the channel locally we will have
66✔
962
                // no confCommitSet. However when the remote commitment confirms
66✔
963
                // without us ever broadcasting our local commitment we need to
66✔
964
                // make sure we cancel all upstream HTLCs for outgoing dust
66✔
965
                // HTLCs as well hence we need to fetch the chain actions here
66✔
966
                // as well.
66✔
967
                if confCommitSet == nil {
123✔
968
                        // Update the set of activeHTLCs so
57✔
969
                        // checkLocalChainActions has an up-to-date view of the
57✔
970
                        // commitments.
57✔
971
                        c.updateActiveHTLCs()
57✔
972
                        htlcs := c.activeHTLCs
57✔
973
                        chainActions, err = c.checkLocalChainActions(
57✔
974
                                triggerHeight, trigger, htlcs, false,
57✔
975
                        )
57✔
976
                        if err != nil {
57✔
977
                                return StateDefault, nil, err
×
978
                        }
×
979
                } else {
12✔
980
                        chainActions, err = c.constructChainActions(
12✔
981
                                confCommitSet, triggerHeight, trigger,
12✔
982
                        )
12✔
983
                        if err != nil {
12✔
984
                                return StateDefault, nil, err
×
985
                        }
×
986
                }
987

988
                // If there are no actions to be made, then we'll remain in the
989
                // default state. If this isn't a self initiated event (we're
990
                // checking due to a chain update), then we'll exit now.
991
                if len(chainActions) == 0 && trigger == chainTrigger {
105✔
992
                        log.Debugf("ChannelArbitrator(%v): no actions for "+
39✔
993
                                "chain trigger, terminating", c.cfg.ChanPoint)
39✔
994

39✔
995
                        return StateDefault, closeTx, nil
39✔
996
                }
39✔
997

998
                // Otherwise, we'll log that we checked the HTLC actions as the
999
                // commitment transaction has already been broadcast.
1000
                log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
30✔
1001
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
30✔
1002

30✔
1003
                // Cancel upstream HTLCs for all outgoing dust HTLCs available
30✔
1004
                // either on the local or the remote/remote pending commitment
30✔
1005
                // transaction.
30✔
1006
                dustHTLCs := chainActions[HtlcFailDustAction]
30✔
1007
                if len(dustHTLCs) > 0 {
34✔
1008
                        log.Debugf("ChannelArbitrator(%v): canceling %v dust "+
4✔
1009
                                "HTLCs backwards", c.cfg.ChanPoint,
4✔
1010
                                len(dustHTLCs))
4✔
1011

4✔
1012
                        getIdx := func(htlc channeldb.HTLC) uint64 {
8✔
1013
                                return htlc.HtlcIndex
4✔
1014
                        }
4✔
1015
                        dustHTLCSet := fn.NewSet(fn.Map(dustHTLCs, getIdx)...)
4✔
1016
                        err = c.abandonForwards(dustHTLCSet)
4✔
1017
                        if err != nil {
4✔
1018
                                return StateError, closeTx, err
×
1019
                        }
×
1020
                }
1021

1022
                // Depending on the type of trigger, we'll either "tunnel"
1023
                // through to a farther state, or just proceed linearly to the
1024
                // next state.
1025
                switch trigger {
30✔
1026

1027
                // If this is a chain trigger, then we'll go straight to the
1028
                // next state, as we still need to broadcast the commitment
1029
                // transaction.
1030
                case chainTrigger:
8✔
1031
                        fallthrough
8✔
1032
                case userTrigger:
18✔
1033
                        nextState = StateBroadcastCommit
18✔
1034

1035
                // If the trigger is a cooperative close being confirmed, then
1036
                // we can go straight to StateFullyResolved, as there won't be
1037
                // any contracts to resolve.
1038
                case coopCloseTrigger:
6✔
1039
                        nextState = StateFullyResolved
6✔
1040

1041
                // Otherwise, if this state advance was triggered by a
1042
                // commitment being confirmed on chain, then we'll jump
1043
                // straight to the state where the contract has already been
1044
                // closed, and we will inspect the set of unresolved contracts.
1045
                case localCloseTrigger:
5✔
1046
                        log.Errorf("ChannelArbitrator(%v): unexpected local "+
5✔
1047
                                "commitment confirmed while in StateDefault",
5✔
1048
                                c.cfg.ChanPoint)
5✔
1049
                        fallthrough
5✔
1050
                case remoteCloseTrigger:
11✔
1051
                        nextState = StateContractClosed
11✔
1052

1053
                case breachCloseTrigger:
4✔
1054
                        nextContractState, err := c.checkLegacyBreach()
4✔
1055
                        if nextContractState == StateError {
4✔
1056
                                return nextContractState, nil, err
×
1057
                        }
×
1058

1059
                        nextState = nextContractState
4✔
1060
                }
1061

1062
        // If we're in this state, then we've decided to broadcast the
1063
        // commitment transaction. We enter this state either due to an outside
1064
        // sub-system, or because an on-chain action has been triggered.
1065
        case StateBroadcastCommit:
23✔
1066
                // Under normal operation, we can only enter
23✔
1067
                // StateBroadcastCommit via a user or chain trigger. On restart,
23✔
1068
                // this state may be reexecuted after closing the channel, but
23✔
1069
                // failing to commit to StateContractClosed or
23✔
1070
                // StateFullyResolved. In that case, one of the four close
23✔
1071
                // triggers will be presented, signifying that we should skip
23✔
1072
                // rebroadcasting, and go straight to resolving the on-chain
23✔
1073
                // contract or marking the channel resolved.
23✔
1074
                switch trigger {
23✔
1075
                case localCloseTrigger, remoteCloseTrigger:
×
1076
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1077
                                "close after closing channel, fast-forwarding "+
×
1078
                                "to %s to resolve contract",
×
1079
                                c.cfg.ChanPoint, trigger, StateContractClosed)
×
1080
                        return StateContractClosed, closeTx, nil
×
1081

1082
                case breachCloseTrigger:
4✔
1083
                        nextContractState, err := c.checkLegacyBreach()
4✔
1084
                        if nextContractState == StateError {
4✔
1085
                                log.Infof("ChannelArbitrator(%v): unable to "+
×
1086
                                        "advance breach close resolution: %v",
×
1087
                                        c.cfg.ChanPoint, nextContractState)
×
1088
                                return StateError, closeTx, err
×
1089
                        }
×
1090

1091
                        log.Infof("ChannelArbitrator(%v): detected %s close "+
4✔
1092
                                "after closing channel, fast-forwarding to %s"+
4✔
1093
                                " to resolve contract", c.cfg.ChanPoint,
4✔
1094
                                trigger, nextContractState)
4✔
1095

4✔
1096
                        return nextContractState, closeTx, nil
4✔
1097

1098
                case coopCloseTrigger:
×
1099
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1100
                                "close after closing channel, fast-forwarding "+
×
1101
                                "to %s to resolve contract",
×
1102
                                c.cfg.ChanPoint, trigger, StateFullyResolved)
×
1103
                        return StateFullyResolved, closeTx, nil
×
1104
                }
1105

1106
                log.Infof("ChannelArbitrator(%v): force closing "+
22✔
1107
                        "chan", c.cfg.ChanPoint)
22✔
1108

22✔
1109
                // Now that we have all the actions decided for the set of
22✔
1110
                // HTLC's, we'll broadcast the commitment transaction, and
22✔
1111
                // signal the link to exit.
22✔
1112

22✔
1113
                // We'll tell the switch that it should remove the link for
22✔
1114
                // this channel, in addition to fetching the force close
22✔
1115
                // summary needed to close this channel on chain.
22✔
1116
                forceCloseTx, err := c.cfg.Channel.ForceCloseChan()
22✔
1117
                if err != nil {
23✔
1118
                        log.Errorf("ChannelArbitrator(%v): unable to "+
1✔
1119
                                "force close: %v", c.cfg.ChanPoint, err)
1✔
1120

1✔
1121
                        // We tried to force close (HTLC may be expiring from
1✔
1122
                        // our PoV, etc), but we think we've lost data. In this
1✔
1123
                        // case, we'll not force close, but terminate the state
1✔
1124
                        // machine here to wait to see what confirms on chain.
1✔
1125
                        if errors.Is(err, lnwallet.ErrForceCloseLocalDataLoss) {
2✔
1126
                                log.Error("ChannelArbitrator(%v): broadcast "+
1✔
1127
                                        "failed due to local data loss, "+
1✔
1128
                                        "waiting for on chain confimation...",
1✔
1129
                                        c.cfg.ChanPoint)
1✔
1130

1✔
1131
                                return StateBroadcastCommit, nil, nil
1✔
1132
                        }
1✔
1133

1134
                        return StateError, closeTx, err
×
1135
                }
1136
                closeTx = forceCloseTx
21✔
1137

21✔
1138
                // Before publishing the transaction, we store it to the
21✔
1139
                // database, such that we can re-publish later in case it
21✔
1140
                // didn't propagate. We initiated the force close, so we
21✔
1141
                // mark broadcast with local initiator set to true.
21✔
1142
                err = c.cfg.MarkCommitmentBroadcasted(closeTx, lntypes.Local)
21✔
1143
                if err != nil {
21✔
1144
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1145
                                "mark commitment broadcasted: %v",
×
1146
                                c.cfg.ChanPoint, err)
×
1147
                        return StateError, closeTx, err
×
1148
                }
×
1149

1150
                // With the close transaction in hand, broadcast the
1151
                // transaction to the network, thereby entering the post
1152
                // channel resolution state.
1153
                log.Infof("Broadcasting force close transaction %v, "+
21✔
1154
                        "ChannelPoint(%v): %v", closeTx.TxHash(),
21✔
1155
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(closeTx))
21✔
1156

21✔
1157
                // At this point, we'll now broadcast the commitment
21✔
1158
                // transaction itself.
21✔
1159
                label := labels.MakeLabel(
21✔
1160
                        labels.LabelTypeChannelClose, &c.cfg.ShortChanID,
21✔
1161
                )
21✔
1162
                if err := c.cfg.PublishTx(closeTx, label); err != nil {
29✔
1163
                        log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
8✔
1164
                                "close tx: %v", c.cfg.ChanPoint, err)
8✔
1165

8✔
1166
                        // This makes sure we don't fail at startup if the
8✔
1167
                        // commitment transaction has too low fees to make it
8✔
1168
                        // into mempool. The rebroadcaster makes sure this
8✔
1169
                        // transaction is republished regularly until confirmed
8✔
1170
                        // or replaced.
8✔
1171
                        if !errors.Is(err, lnwallet.ErrDoubleSpend) &&
8✔
1172
                                !errors.Is(err, lnwallet.ErrMempoolFee) {
13✔
1173

5✔
1174
                                return StateError, closeTx, err
5✔
1175
                        }
5✔
1176
                }
1177

1178
                // We go to the StateCommitmentBroadcasted state, where we'll
1179
                // be waiting for the commitment to be confirmed.
1180
                nextState = StateCommitmentBroadcasted
19✔
1181

1182
        // In this state we have broadcasted our own commitment, and will need
1183
        // to wait for a commitment (not necessarily the one we broadcasted!)
1184
        // to be confirmed.
1185
        case StateCommitmentBroadcasted:
33✔
1186
                switch trigger {
33✔
1187

1188
                // We are waiting for a commitment to be confirmed.
1189
                case chainTrigger, userTrigger:
20✔
1190
                        // The commitment transaction has been broadcast, but it
20✔
1191
                        // doesn't necessarily need to be the commitment
20✔
1192
                        // transaction version that is going to be confirmed. To
20✔
1193
                        // be sure that any of those versions can be anchored
20✔
1194
                        // down, we now submit all anchor resolutions to the
20✔
1195
                        // sweeper. The sweeper will keep trying to sweep all of
20✔
1196
                        // them.
20✔
1197
                        //
20✔
1198
                        // Note that the sweeper is idempotent. If we ever
20✔
1199
                        // happen to end up at this point in the code again, no
20✔
1200
                        // harm is done by re-offering the anchors to the
20✔
1201
                        // sweeper.
20✔
1202
                        anchors, err := c.cfg.Channel.NewAnchorResolutions()
20✔
1203
                        if err != nil {
20✔
1204
                                return StateError, closeTx, err
×
1205
                        }
×
1206

1207
                        err = c.sweepAnchors(anchors, triggerHeight)
20✔
1208
                        if err != nil {
20✔
1209
                                return StateError, closeTx, err
×
1210
                        }
×
1211

1212
                        nextState = StateCommitmentBroadcasted
20✔
1213

1214
                // If this state advance was triggered by any of the
1215
                // commitments being confirmed, then we'll jump to the state
1216
                // where the contract has been closed.
1217
                case localCloseTrigger, remoteCloseTrigger:
16✔
1218
                        nextState = StateContractClosed
16✔
1219

1220
                // If a coop close was confirmed, jump straight to the fully
1221
                // resolved state.
1222
                case coopCloseTrigger:
×
1223
                        nextState = StateFullyResolved
×
1224

1225
                case breachCloseTrigger:
×
1226
                        nextContractState, err := c.checkLegacyBreach()
×
1227
                        if nextContractState == StateError {
×
1228
                                return nextContractState, closeTx, err
×
1229
                        }
×
1230

1231
                        nextState = nextContractState
×
1232
                }
1233

1234
                log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
33✔
1235
                        "state %v to %v", c.cfg.ChanPoint, trigger, c.state,
33✔
1236
                        nextState)
33✔
1237

1238
        // If we're in this state, then the contract has been fully closed to
1239
        // outside sub-systems, so we'll process the prior set of on-chain
1240
        // contract actions and launch a set of resolvers.
1241
        case StateContractClosed:
25✔
1242
                // First, we'll fetch our chain actions, and both sets of
25✔
1243
                // resolutions so we can process them.
25✔
1244
                contractResolutions, err := c.log.FetchContractResolutions()
25✔
1245
                if err != nil {
27✔
1246
                        log.Errorf("unable to fetch contract resolutions: %v",
2✔
1247
                                err)
2✔
1248
                        return StateError, closeTx, err
2✔
1249
                }
2✔
1250

1251
                // If the resolution is empty, and we have no HTLCs at all to
1252
                // send to, then we're done here. We don't need to launch any
1253
                // resolvers, and can go straight to our final state.
1254
                if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
34✔
1255
                        log.Infof("ChannelArbitrator(%v): contract "+
11✔
1256
                                "resolutions empty, marking channel as fully resolved!",
11✔
1257
                                c.cfg.ChanPoint)
11✔
1258
                        nextState = StateFullyResolved
11✔
1259
                        break
11✔
1260
                }
1261

1262
                // First, we'll reconstruct a fresh set of chain actions as the
1263
                // set of actions we need to act on may differ based on if it
1264
                // was our commitment, or they're commitment that hit the chain.
1265
                htlcActions, err := c.constructChainActions(
15✔
1266
                        confCommitSet, triggerHeight, trigger,
15✔
1267
                )
15✔
1268
                if err != nil {
15✔
1269
                        return StateError, closeTx, err
×
1270
                }
×
1271

1272
                // In case its a breach transaction we fail back all upstream
1273
                // HTLCs for their corresponding outgoing HTLCs on the remote
1274
                // commitment set (remote and remote pending set).
1275
                if contractResolutions.BreachResolution != nil {
20✔
1276
                        // cancelBreachedHTLCs is a set which holds HTLCs whose
5✔
1277
                        // corresponding incoming HTLCs will be failed back
5✔
1278
                        // because the peer broadcasted an old state.
5✔
1279
                        cancelBreachedHTLCs := fn.NewSet[uint64]()
5✔
1280

5✔
1281
                        // We'll use the CommitSet, we'll fail back all
5✔
1282
                        // upstream HTLCs for their corresponding outgoing
5✔
1283
                        // HTLC that exist on either of the remote commitments.
5✔
1284
                        // The map is used to deduplicate any shared HTLC's.
5✔
1285
                        for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
10✔
1286
                                if !htlcSetKey.IsRemote {
8✔
1287
                                        continue
3✔
1288
                                }
1289

1290
                                for _, htlc := range htlcs {
10✔
1291
                                        // Only outgoing HTLCs have a
5✔
1292
                                        // corresponding incoming HTLC.
5✔
1293
                                        if htlc.Incoming {
9✔
1294
                                                continue
4✔
1295
                                        }
1296

1297
                                        cancelBreachedHTLCs.Add(htlc.HtlcIndex)
4✔
1298
                                }
1299
                        }
1300

1301
                        err := c.abandonForwards(cancelBreachedHTLCs)
5✔
1302
                        if err != nil {
5✔
1303
                                return StateError, closeTx, err
×
1304
                        }
×
1305
                } else {
13✔
1306
                        // If it's not a breach, we resolve all incoming dust
13✔
1307
                        // HTLCs immediately after the commitment is confirmed.
13✔
1308
                        err = c.failIncomingDust(
13✔
1309
                                htlcActions[HtlcIncomingDustFinalAction],
13✔
1310
                        )
13✔
1311
                        if err != nil {
13✔
1312
                                return StateError, closeTx, err
×
1313
                        }
×
1314

1315
                        // We fail the upstream HTLCs for all remote pending
1316
                        // outgoing HTLCs as soon as the commitment is
1317
                        // confirmed. The upstream HTLCs for outgoing dust
1318
                        // HTLCs have already been resolved before we reach
1319
                        // this point.
1320
                        getIdx := func(htlc channeldb.HTLC) uint64 {
24✔
1321
                                return htlc.HtlcIndex
11✔
1322
                        }
11✔
1323
                        remoteDangling := fn.NewSet(fn.Map(
13✔
1324
                                htlcActions[HtlcFailDanglingAction], getIdx,
13✔
1325
                        )...)
13✔
1326
                        err := c.abandonForwards(remoteDangling)
13✔
1327
                        if err != nil {
13✔
1328
                                return StateError, closeTx, err
×
1329
                        }
×
1330
                }
1331

1332
                // Now that we know we'll need to act, we'll process all the
1333
                // resolvers, then create the structures we need to resolve all
1334
                // outstanding contracts.
1335
                resolvers, err := c.prepContractResolutions(
15✔
1336
                        contractResolutions, triggerHeight, htlcActions,
15✔
1337
                )
15✔
1338
                if err != nil {
15✔
1339
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1340
                                "resolve contracts: %v", c.cfg.ChanPoint, err)
×
1341
                        return StateError, closeTx, err
×
1342
                }
×
1343

1344
                log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
15✔
1345
                        "resolvers", c.cfg.ChanPoint, len(resolvers))
15✔
1346

15✔
1347
                err = c.log.InsertUnresolvedContracts(nil, resolvers...)
15✔
1348
                if err != nil {
15✔
1349
                        return StateError, closeTx, err
×
1350
                }
×
1351

1352
                // Finally, we'll launch all the required contract resolvers.
1353
                // Once they're all resolved, we're no longer needed.
1354
                c.resolveContracts(resolvers)
15✔
1355

15✔
1356
                nextState = StateWaitingFullResolution
15✔
1357

1358
        // This is our terminal state. We'll keep returning this state until
1359
        // all contracts are fully resolved.
1360
        case StateWaitingFullResolution:
20✔
1361
                log.Infof("ChannelArbitrator(%v): still awaiting contract "+
20✔
1362
                        "resolution", c.cfg.ChanPoint)
20✔
1363

20✔
1364
                unresolved, err := c.log.FetchUnresolvedContracts()
20✔
1365
                if err != nil {
20✔
1366
                        return StateError, closeTx, err
×
1367
                }
×
1368

1369
                // If we have no unresolved contracts, then we can move to the
1370
                // final state.
1371
                if len(unresolved) == 0 {
35✔
1372
                        nextState = StateFullyResolved
15✔
1373
                        break
15✔
1374
                }
1375

1376
                // Otherwise we still have unresolved contracts, then we'll
1377
                // stay alive to oversee their resolution.
1378
                nextState = StateWaitingFullResolution
8✔
1379

8✔
1380
                // Add debug logs.
8✔
1381
                for _, r := range unresolved {
16✔
1382
                        log.Debugf("ChannelArbitrator(%v): still have "+
8✔
1383
                                "unresolved contract: %T", c.cfg.ChanPoint, r)
8✔
1384
                }
8✔
1385

1386
        // If we start as fully resolved, then we'll end as fully resolved.
1387
        case StateFullyResolved:
25✔
1388
                // To ensure that the state of the contract in persistent
25✔
1389
                // storage is properly reflected, we'll mark the contract as
25✔
1390
                // fully resolved now.
25✔
1391
                nextState = StateFullyResolved
25✔
1392

25✔
1393
                log.Infof("ChannelPoint(%v) has been fully resolved "+
25✔
1394
                        "on-chain at height=%v", c.cfg.ChanPoint, triggerHeight)
25✔
1395

25✔
1396
                c.cfg.NotifyChannelResolved()
25✔
1397
        }
1398

1399
        log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
135✔
1400
                nextState)
135✔
1401

135✔
1402
        return nextState, closeTx, nil
135✔
1403
}
1404

1405
// sweepAnchors offers all given anchor resolutions to the sweeper. It requests
1406
// sweeping at the minimum fee rate. This fee rate can be upped manually by the
1407
// user via the BumpFee rpc.
1408
func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
1409
        heightHint uint32) error {
22✔
1410

22✔
1411
        // Update the set of activeHTLCs so that the sweeping routine has an
22✔
1412
        // up-to-date view of the set of commitments.
22✔
1413
        c.updateActiveHTLCs()
22✔
1414

22✔
1415
        // Prepare the sweeping requests for all possible versions of
22✔
1416
        // commitments.
22✔
1417
        sweepReqs, err := c.prepareAnchorSweeps(heightHint, anchors)
22✔
1418
        if err != nil {
22✔
1419
                return err
×
1420
        }
×
1421

1422
        // Send out the sweeping requests to the sweeper.
1423
        for _, req := range sweepReqs {
33✔
1424
                _, err = c.cfg.Sweeper.SweepInput(req.input, req.params)
11✔
1425
                if err != nil {
11✔
1426
                        return err
×
1427
                }
×
1428
        }
1429

1430
        return nil
22✔
1431
}
1432

1433
// findCommitmentDeadlineAndValue finds the deadline (relative block height)
1434
// for a commitment transaction by extracting the minimum CLTV from its HTLCs.
1435
// From our PoV, the deadline delta is defined to be the smaller of,
1436
//   - half of the least CLTV from outgoing HTLCs' corresponding incoming
1437
//     HTLCs,  or,
1438
//   - half of the least CLTV from incoming HTLCs if the preimage is available.
1439
//
1440
// We use half of the CTLV value to ensure that we have enough time to sweep
1441
// the second-level HTLCs.
1442
//
1443
// It also finds the total value that are time-sensitive, which is the sum of
1444
// all the outgoing HTLCs plus incoming HTLCs whose preimages are known. It
1445
// then returns the value left after subtracting the budget used for sweeping
1446
// the time-sensitive HTLCs.
1447
//
1448
// NOTE: when the deadline turns out to be 0 blocks, we will replace it with 1
1449
// block because our fee estimator doesn't allow a 0 conf target. This also
1450
// means we've left behind and should increase our fee to make the transaction
1451
// confirmed asap.
1452
func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
1453
        htlcs htlcSet) (fn.Option[int32], btcutil.Amount, error) {
16✔
1454

16✔
1455
        deadlineMinHeight := uint32(math.MaxUint32)
16✔
1456
        totalValue := btcutil.Amount(0)
16✔
1457

16✔
1458
        // First, iterate through the outgoingHTLCs to find the lowest CLTV
16✔
1459
        // value.
16✔
1460
        for _, htlc := range htlcs.outgoingHTLCs {
29✔
1461
                // Skip if the HTLC is dust.
13✔
1462
                if htlc.OutputIndex < 0 {
19✔
1463
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
6✔
1464
                                "for dust htlc=%x",
6✔
1465
                                c.cfg.ChanPoint, htlc.RHash[:])
6✔
1466

6✔
1467
                        continue
6✔
1468
                }
1469

1470
                value := htlc.Amt.ToSatoshis()
10✔
1471

10✔
1472
                // Find the expiry height for this outgoing HTLC's incoming
10✔
1473
                // HTLC.
10✔
1474
                deadlineOpt := c.cfg.FindOutgoingHTLCDeadline(htlc)
10✔
1475

10✔
1476
                // The deadline is default to the current deadlineMinHeight,
10✔
1477
                // and it's overwritten when it's not none.
10✔
1478
                deadline := deadlineMinHeight
10✔
1479
                deadlineOpt.WhenSome(func(d int32) {
18✔
1480
                        deadline = uint32(d)
8✔
1481

8✔
1482
                        // We only consider the value is under protection when
8✔
1483
                        // it's time-sensitive.
8✔
1484
                        totalValue += value
8✔
1485
                })
8✔
1486

1487
                if deadline < deadlineMinHeight {
18✔
1488
                        deadlineMinHeight = deadline
8✔
1489

8✔
1490
                        log.Tracef("ChannelArbitrator(%v): outgoing HTLC has "+
8✔
1491
                                "deadline=%v, value=%v", c.cfg.ChanPoint,
8✔
1492
                                deadlineMinHeight, value)
8✔
1493
                }
8✔
1494
        }
1495

1496
        // Then going through the incomingHTLCs, and update the minHeight when
1497
        // conditions met.
1498
        for _, htlc := range htlcs.incomingHTLCs {
28✔
1499
                // Skip if the HTLC is dust.
12✔
1500
                if htlc.OutputIndex < 0 {
13✔
1501
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
1✔
1502
                                "for dust htlc=%x",
1✔
1503
                                c.cfg.ChanPoint, htlc.RHash[:])
1✔
1504

1✔
1505
                        continue
1✔
1506
                }
1507

1508
                // Since it's an HTLC sent to us, check if we have preimage for
1509
                // this HTLC.
1510
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
11✔
1511
                if err != nil {
11✔
1512
                        return fn.None[int32](), 0, err
×
1513
                }
×
1514

1515
                if !preimageAvailable {
16✔
1516
                        continue
5✔
1517
                }
1518

1519
                value := htlc.Amt.ToSatoshis()
9✔
1520
                totalValue += value
9✔
1521

9✔
1522
                if htlc.RefundTimeout < deadlineMinHeight {
17✔
1523
                        deadlineMinHeight = htlc.RefundTimeout
8✔
1524

8✔
1525
                        log.Tracef("ChannelArbitrator(%v): incoming HTLC has "+
8✔
1526
                                "deadline=%v, amt=%v", c.cfg.ChanPoint,
8✔
1527
                                deadlineMinHeight, value)
8✔
1528
                }
8✔
1529
        }
1530

1531
        // Calculate the deadline. There are two cases to be handled here,
1532
        //   - when the deadlineMinHeight never gets updated, which could
1533
        //     happen when we have no outgoing HTLCs, and, for incoming HTLCs,
1534
        //       * either we have none, or,
1535
        //       * none of the HTLCs are preimageAvailable.
1536
        //   - when our deadlineMinHeight is no greater than the heightHint,
1537
        //     which means we are behind our schedule.
1538
        var deadline uint32
16✔
1539
        switch {
16✔
1540
        // When we couldn't find a deadline height from our HTLCs, we will fall
1541
        // back to the default value as there's no time pressure here.
1542
        case deadlineMinHeight == math.MaxUint32:
7✔
1543
                return fn.None[int32](), 0, nil
7✔
1544

1545
        // When the deadline is passed, we will fall back to the smallest conf
1546
        // target (1 block).
1547
        case deadlineMinHeight <= heightHint:
4✔
1548
                log.Warnf("ChannelArbitrator(%v): deadline is passed with "+
4✔
1549
                        "deadlineMinHeight=%d, heightHint=%d",
4✔
1550
                        c.cfg.ChanPoint, deadlineMinHeight, heightHint)
4✔
1551
                deadline = 1
4✔
1552

1553
        // Use half of the deadline delta, and leave the other half to be used
1554
        // to sweep the HTLCs.
1555
        default:
11✔
1556
                deadline = (deadlineMinHeight - heightHint) / 2
11✔
1557
        }
1558

1559
        // Calculate the value left after subtracting the budget used for
1560
        // sweeping the time-sensitive HTLCs.
1561
        valueLeft := totalValue - calculateBudget(
12✔
1562
                totalValue, c.cfg.Budget.DeadlineHTLCRatio,
12✔
1563
                c.cfg.Budget.DeadlineHTLC,
12✔
1564
        )
12✔
1565

12✔
1566
        log.Debugf("ChannelArbitrator(%v): calculated valueLeft=%v, "+
12✔
1567
                "deadline=%d, using deadlineMinHeight=%d, heightHint=%d",
12✔
1568
                c.cfg.ChanPoint, valueLeft, deadline, deadlineMinHeight,
12✔
1569
                heightHint)
12✔
1570

12✔
1571
        return fn.Some(int32(deadline)), valueLeft, nil
12✔
1572
}
1573

1574
// resolveContracts updates the activeResolvers list and starts to resolve each
1575
// contract concurrently, and launches them.
1576
func (c *ChannelArbitrator) resolveContracts(resolvers []ContractResolver) {
16✔
1577
        c.activeResolversLock.Lock()
16✔
1578
        c.activeResolvers = resolvers
16✔
1579
        c.activeResolversLock.Unlock()
16✔
1580

16✔
1581
        // Launch all resolvers.
16✔
1582
        c.launchResolvers()
16✔
1583

16✔
1584
        for _, contract := range resolvers {
25✔
1585
                c.wg.Add(1)
9✔
1586
                go c.resolveContract(contract)
9✔
1587
        }
9✔
1588
}
1589

1590
// launchResolvers launches all the active resolvers concurrently.
1591
func (c *ChannelArbitrator) launchResolvers() {
22✔
1592
        c.activeResolversLock.Lock()
22✔
1593
        resolvers := c.activeResolvers
22✔
1594
        c.activeResolversLock.Unlock()
22✔
1595

22✔
1596
        // errChans is a map of channels that will be used to receive errors
22✔
1597
        // returned from launching the resolvers.
22✔
1598
        errChans := make(map[ContractResolver]chan error, len(resolvers))
22✔
1599

22✔
1600
        // Launch each resolver in goroutines.
22✔
1601
        for _, r := range resolvers {
31✔
1602
                // If the contract is already resolved, there's no need to
9✔
1603
                // launch it again.
9✔
1604
                if r.IsResolved() {
12✔
1605
                        log.Debugf("ChannelArbitrator(%v): skipping resolver "+
3✔
1606
                                "%T as it's already resolved", c.cfg.ChanPoint,
3✔
1607
                                r)
3✔
1608

3✔
1609
                        continue
3✔
1610
                }
1611

1612
                // Create a signal chan.
1613
                errChan := make(chan error, 1)
9✔
1614
                errChans[r] = errChan
9✔
1615

9✔
1616
                go func() {
18✔
1617
                        err := r.Launch()
9✔
1618
                        errChan <- err
9✔
1619
                }()
9✔
1620
        }
1621

1622
        // Wait for all resolvers to finish launching.
1623
        for r, errChan := range errChans {
31✔
1624
                select {
9✔
1625
                case err := <-errChan:
9✔
1626
                        if err == nil {
18✔
1627
                                continue
9✔
1628
                        }
1629

1630
                        log.Errorf("ChannelArbitrator(%v): unable to launch "+
×
1631
                                "contract resolver(%T): %v", c.cfg.ChanPoint, r,
×
1632
                                err)
×
1633

1634
                case <-c.quit:
×
1635
                        log.Debugf("ChannelArbitrator quit signal received, " +
×
1636
                                "exit launchResolvers")
×
1637

×
1638
                        return
×
1639
                }
1640
        }
1641
}
1642

1643
// advanceState is the main driver of our state machine. This method is an
1644
// iterative function which repeatedly attempts to advance the internal state
1645
// of the channel arbitrator. The state will be advanced until we reach a
1646
// redundant transition, meaning that the state transition is a noop. The final
1647
// param is a callback that allows the caller to execute an arbitrary action
1648
// after each state transition.
1649
func (c *ChannelArbitrator) advanceState(
1650
        triggerHeight uint32, trigger transitionTrigger,
1651
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
91✔
1652

91✔
1653
        var (
91✔
1654
                priorState   ArbitratorState
91✔
1655
                forceCloseTx *wire.MsgTx
91✔
1656
        )
91✔
1657

91✔
1658
        // We'll continue to advance our state forward until the state we
91✔
1659
        // transition to is that same state that we started at.
91✔
1660
        for {
268✔
1661
                priorState = c.state
177✔
1662
                log.Debugf("ChannelArbitrator(%v): attempting state step with "+
177✔
1663
                        "trigger=%v from state=%v at height=%v",
177✔
1664
                        c.cfg.ChanPoint, trigger, priorState, triggerHeight)
177✔
1665

177✔
1666
                nextState, closeTx, err := c.stateStep(
177✔
1667
                        triggerHeight, trigger, confCommitSet,
177✔
1668
                )
177✔
1669
                if err != nil {
184✔
1670
                        log.Errorf("ChannelArbitrator(%v): unable to advance "+
7✔
1671
                                "state: %v", c.cfg.ChanPoint, err)
7✔
1672
                        return priorState, nil, err
7✔
1673
                }
7✔
1674

1675
                if forceCloseTx == nil && closeTx != nil {
192✔
1676
                        forceCloseTx = closeTx
19✔
1677
                }
19✔
1678

1679
                // Our termination transition is a noop transition. If we get
1680
                // our prior state back as the next state, then we'll
1681
                // terminate.
1682
                if nextState == priorState {
257✔
1683
                        log.Debugf("ChannelArbitrator(%v): terminating at "+
84✔
1684
                                "state=%v", c.cfg.ChanPoint, nextState)
84✔
1685
                        return nextState, forceCloseTx, nil
84✔
1686
                }
84✔
1687

1688
                // As the prior state was successfully executed, we can now
1689
                // commit the next state. This ensures that we will re-execute
1690
                // the prior state if anything fails.
1691
                if err := c.log.CommitState(nextState); err != nil {
95✔
1692
                        log.Errorf("ChannelArbitrator(%v): unable to commit "+
3✔
1693
                                "next state(%v): %v", c.cfg.ChanPoint,
3✔
1694
                                nextState, err)
3✔
1695
                        return priorState, nil, err
3✔
1696
                }
3✔
1697
                c.state = nextState
89✔
1698
        }
1699
}
1700

1701
// ChainAction is an enum that encompasses all possible on-chain actions
1702
// we'll take for a set of HTLC's.
1703
type ChainAction uint8
1704

1705
const (
1706
        // NoAction is the min chainAction type, indicating that no action
1707
        // needs to be taken for a given HTLC.
1708
        NoAction ChainAction = 0
1709

1710
        // HtlcTimeoutAction indicates that the HTLC will timeout soon. As a
1711
        // result, we should get ready to sweep it on chain after the timeout.
1712
        HtlcTimeoutAction = 1
1713

1714
        // HtlcClaimAction indicates that we should claim the HTLC on chain
1715
        // before its timeout period.
1716
        HtlcClaimAction = 2
1717

1718
        // HtlcFailDustAction indicates that we should fail the upstream HTLC
1719
        // for an outgoing dust HTLC immediately (even before the commitment
1720
        // transaction is confirmed) because it has no output on the commitment
1721
        // transaction. This also includes remote pending outgoing dust HTLCs.
1722
        HtlcFailDustAction = 3
1723

1724
        // HtlcOutgoingWatchAction indicates that we can't yet timeout this
1725
        // HTLC, but we had to go to chain on order to resolve an existing
1726
        // HTLC.  In this case, we'll either: time it out once it expires, or
1727
        // will learn the pre-image if the remote party claims the output. In
1728
        // this case, well add the pre-image to our global store.
1729
        HtlcOutgoingWatchAction = 4
1730

1731
        // HtlcIncomingWatchAction indicates that we don't yet have the
1732
        // pre-image to claim incoming HTLC, but we had to go to chain in order
1733
        // to resolve and existing HTLC. In this case, we'll either: let the
1734
        // other party time it out, or eventually learn of the pre-image, in
1735
        // which case we'll claim on chain.
1736
        HtlcIncomingWatchAction = 5
1737

1738
        // HtlcIncomingDustFinalAction indicates that we should mark an incoming
1739
        // dust htlc as final because it can't be claimed on-chain.
1740
        HtlcIncomingDustFinalAction = 6
1741

1742
        // HtlcFailDanglingAction indicates that we should fail the upstream
1743
        // HTLC for an outgoing HTLC immediately after the commitment
1744
        // transaction has confirmed because it has no corresponding output on
1745
        // the commitment transaction. This category does NOT include any dust
1746
        // HTLCs which are mapped in the "HtlcFailDustAction" category.
1747
        HtlcFailDanglingAction = 7
1748
)
1749

1750
// String returns a human readable string describing a chain action.
1751
func (c ChainAction) String() string {
×
1752
        switch c {
×
1753
        case NoAction:
×
1754
                return "NoAction"
×
1755

1756
        case HtlcTimeoutAction:
×
1757
                return "HtlcTimeoutAction"
×
1758

1759
        case HtlcClaimAction:
×
1760
                return "HtlcClaimAction"
×
1761

1762
        case HtlcFailDustAction:
×
1763
                return "HtlcFailDustAction"
×
1764

1765
        case HtlcOutgoingWatchAction:
×
1766
                return "HtlcOutgoingWatchAction"
×
1767

1768
        case HtlcIncomingWatchAction:
×
1769
                return "HtlcIncomingWatchAction"
×
1770

1771
        case HtlcIncomingDustFinalAction:
×
1772
                return "HtlcIncomingDustFinalAction"
×
1773

1774
        case HtlcFailDanglingAction:
×
1775
                return "HtlcFailDanglingAction"
×
1776

1777
        default:
×
1778
                return "<unknown action>"
×
1779
        }
1780
}
1781

1782
// ChainActionMap is a map of a chain action, to the set of HTLC's that need to
1783
// be acted upon for a given action type. The channel
1784
type ChainActionMap map[ChainAction][]channeldb.HTLC
1785

1786
// Merge merges the passed chain actions with the target chain action map.
1787
func (c ChainActionMap) Merge(actions ChainActionMap) {
71✔
1788
        for chainAction, htlcs := range actions {
87✔
1789
                c[chainAction] = append(c[chainAction], htlcs...)
16✔
1790
        }
16✔
1791
}
1792

1793
// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
1794
// confirmation delta that we need is close, and returns a bool indicating if
1795
// we should go on chain to claim.  We do this rather than waiting up until the
1796
// last minute as we want to ensure that when we *need* (HTLC is timed out) to
1797
// sweep, the commitment is already confirmed.
1798
func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
1799
        broadcastDelta, currentHeight uint32) bool {
29✔
1800

29✔
1801
        // We'll calculate the broadcast cut off for this HTLC. This is the
29✔
1802
        // height that (based on our current fee estimation) we should
29✔
1803
        // broadcast in order to ensure the commitment transaction is confirmed
29✔
1804
        // before the HTLC fully expires.
29✔
1805
        broadcastCutOff := htlc.RefundTimeout - broadcastDelta
29✔
1806

29✔
1807
        log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
29✔
1808
                "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
29✔
1809
                broadcastCutOff, currentHeight)
29✔
1810

29✔
1811
        // TODO(roasbeef): take into account default HTLC delta, don't need to
29✔
1812
        // broadcast immediately
29✔
1813
        //  * can then batch with SINGLE | ANYONECANPAY
29✔
1814

29✔
1815
        // We should on-chain for this HTLC, iff we're within out broadcast
29✔
1816
        // cutoff window.
29✔
1817
        if currentHeight < broadcastCutOff {
51✔
1818
                return false
22✔
1819
        }
22✔
1820

1821
        // In case of incoming htlc we should go to chain.
1822
        if htlc.Incoming {
13✔
1823
                return true
3✔
1824
        }
3✔
1825

1826
        // For htlcs that are result of our initiated payments we give some grace
1827
        // period before force closing the channel. During this time we expect
1828
        // both nodes to connect and give a chance to the other node to send its
1829
        // updates and cancel the htlc.
1830
        // This shouldn't add any security risk as there is no incoming htlc to
1831
        // fulfill at this case and the expectation is that when the channel is
1832
        // active the other node will send update_fail_htlc to remove the htlc
1833
        // without closing the channel. It is up to the user to force close the
1834
        // channel if the peer misbehaves and doesn't send the update_fail_htlc.
1835
        // It is useful when this node is most of the time not online and is
1836
        // likely to miss the time slot where the htlc may be cancelled.
1837
        isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex)
10✔
1838
        upTime := c.cfg.Clock.Now().Sub(c.startTimestamp)
10✔
1839
        return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod
10✔
1840
}
1841

1842
// checkCommitChainActions is called for each new block connected to the end of
1843
// the main chain. Given the new block height, this new method will examine all
1844
// active HTLC's, and determine if we need to go on-chain to claim any of them.
1845
// A map of action -> []htlc is returned, detailing what action (if any) should
1846
// be performed for each HTLC. For timed out HTLC's, once the commitment has
1847
// been sufficiently confirmed, the HTLC's should be canceled backwards. For
1848
// redeemed HTLC's, we should send the pre-image back to the incoming link.
1849
func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
1850
        trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, error) {
71✔
1851

71✔
1852
        // TODO(roasbeef): would need to lock channel? channel totem?
71✔
1853
        //  * race condition if adding and we broadcast, etc
71✔
1854
        //  * or would make each instance sync?
71✔
1855

71✔
1856
        log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
71✔
1857
                "height=%v, in_htlc_count=%v, out_htlc_count=%v",
71✔
1858
                c.cfg.ChanPoint, height,
71✔
1859
                len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
71✔
1860

71✔
1861
        actionMap := make(ChainActionMap)
71✔
1862

71✔
1863
        // First, we'll make an initial pass over the set of incoming and
71✔
1864
        // outgoing HTLC's to decide if we need to go on chain at all.
71✔
1865
        haveChainActions := false
71✔
1866
        for _, htlc := range htlcs.outgoingHTLCs {
81✔
1867
                // We'll need to go on-chain for an outgoing HTLC if it was
10✔
1868
                // never resolved downstream, and it's "close" to timing out.
10✔
1869
                //
10✔
1870
                // TODO(yy): If there's no corresponding incoming HTLC, it
10✔
1871
                // means we are the first hop, hence the payer. This is a
10✔
1872
                // tricky case - unlike a forwarding hop, we don't have an
10✔
1873
                // incoming HTLC that will time out, which means as long as we
10✔
1874
                // can learn the preimage, we can settle the invoice (before it
10✔
1875
                // expires?).
10✔
1876
                toChain := c.shouldGoOnChain(
10✔
1877
                        htlc, c.cfg.OutgoingBroadcastDelta, height,
10✔
1878
                )
10✔
1879

10✔
1880
                if toChain {
13✔
1881
                        // Convert to int64 in case of overflow.
3✔
1882
                        remainingBlocks := int64(htlc.RefundTimeout) -
3✔
1883
                                int64(height)
3✔
1884

3✔
1885
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
3✔
1886
                                "outgoing htlc %x: timeout=%v, amount=%v, "+
3✔
1887
                                "blocks_until_expiry=%v, broadcast_delta=%v",
3✔
1888
                                c.cfg.ChanPoint, htlc.RHash[:],
3✔
1889
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
3✔
1890
                                c.cfg.OutgoingBroadcastDelta,
3✔
1891
                        )
3✔
1892
                }
3✔
1893

1894
                haveChainActions = haveChainActions || toChain
10✔
1895
        }
1896

1897
        for _, htlc := range htlcs.incomingHTLCs {
79✔
1898
                // We'll need to go on-chain to pull an incoming HTLC iff we
8✔
1899
                // know the pre-image and it's close to timing out. We need to
8✔
1900
                // ensure that we claim the funds that are rightfully ours
8✔
1901
                // on-chain.
8✔
1902
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
8✔
1903
                if err != nil {
8✔
1904
                        return nil, err
×
1905
                }
×
1906

1907
                if !preimageAvailable {
15✔
1908
                        continue
7✔
1909
                }
1910

1911
                toChain := c.shouldGoOnChain(
4✔
1912
                        htlc, c.cfg.IncomingBroadcastDelta, height,
4✔
1913
                )
4✔
1914

4✔
1915
                if toChain {
7✔
1916
                        // Convert to int64 in case of overflow.
3✔
1917
                        remainingBlocks := int64(htlc.RefundTimeout) -
3✔
1918
                                int64(height)
3✔
1919

3✔
1920
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
3✔
1921
                                "incoming htlc %x: timeout=%v, amount=%v, "+
3✔
1922
                                "blocks_until_expiry=%v, broadcast_delta=%v",
3✔
1923
                                c.cfg.ChanPoint, htlc.RHash[:],
3✔
1924
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
3✔
1925
                                c.cfg.IncomingBroadcastDelta,
3✔
1926
                        )
3✔
1927
                }
3✔
1928

1929
                haveChainActions = haveChainActions || toChain
4✔
1930
        }
1931

1932
        // If we don't have any actions to make, then we'll return an empty
1933
        // action map. We only do this if this was a chain trigger though, as
1934
        // if we're going to broadcast the commitment (or the remote party did)
1935
        // we're *forced* to act on each HTLC.
1936
        if !haveChainActions && trigger == chainTrigger {
114✔
1937
                log.Tracef("ChannelArbitrator(%v): no actions to take at "+
43✔
1938
                        "height=%v", c.cfg.ChanPoint, height)
43✔
1939
                return actionMap, nil
43✔
1940
        }
43✔
1941

1942
        // Now that we know we'll need to go on-chain, we'll examine all of our
1943
        // active outgoing HTLC's to see if we either need to: sweep them after
1944
        // a timeout (then cancel backwards), cancel them backwards
1945
        // immediately, or watch them as they're still active contracts.
1946
        for _, htlc := range htlcs.outgoingHTLCs {
41✔
1947
                switch {
10✔
1948
                // If the HTLC is dust, then we can cancel it backwards
1949
                // immediately as there's no matching contract to arbitrate
1950
                // on-chain. We know the HTLC is dust, if the OutputIndex
1951
                // negative.
1952
                case htlc.OutputIndex < 0:
5✔
1953
                        log.Tracef("ChannelArbitrator(%v): immediately "+
5✔
1954
                                "failing dust htlc=%x", c.cfg.ChanPoint,
5✔
1955
                                htlc.RHash[:])
5✔
1956

5✔
1957
                        actionMap[HtlcFailDustAction] = append(
5✔
1958
                                actionMap[HtlcFailDustAction], htlc,
5✔
1959
                        )
5✔
1960

1961
                // If we don't need to immediately act on this HTLC, then we'll
1962
                // mark it still "live". After we broadcast, we'll monitor it
1963
                // until the HTLC times out to see if we can also redeem it
1964
                // on-chain.
1965
                case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
1966
                        height,
1967
                ):
8✔
1968
                        // TODO(roasbeef): also need to be able to query
8✔
1969
                        // circuit map to see if HTLC hasn't been fully
8✔
1970
                        // resolved
8✔
1971
                        //
8✔
1972
                        //  * can't fail incoming until if outgoing not yet
8✔
1973
                        //  failed
8✔
1974

8✔
1975
                        log.Tracef("ChannelArbitrator(%v): watching chain to "+
8✔
1976
                                "decide action for outgoing htlc=%x",
8✔
1977
                                c.cfg.ChanPoint, htlc.RHash[:])
8✔
1978

8✔
1979
                        actionMap[HtlcOutgoingWatchAction] = append(
8✔
1980
                                actionMap[HtlcOutgoingWatchAction], htlc,
8✔
1981
                        )
8✔
1982

1983
                // Otherwise, we'll update our actionMap to mark that we need
1984
                // to sweep this HTLC on-chain
1985
                default:
3✔
1986
                        log.Tracef("ChannelArbitrator(%v): going on-chain to "+
3✔
1987
                                "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
3✔
1988

3✔
1989
                        actionMap[HtlcTimeoutAction] = append(
3✔
1990
                                actionMap[HtlcTimeoutAction], htlc,
3✔
1991
                        )
3✔
1992
                }
1993
        }
1994

1995
        // Similarly, for each incoming HTLC, now that we need to go on-chain,
1996
        // we'll either: sweep it immediately if we know the pre-image, or
1997
        // observe the output on-chain if we don't In this last, case we'll
1998
        // either learn of it eventually from the outgoing HTLC, or the sender
1999
        // will timeout the HTLC.
2000
        for _, htlc := range htlcs.incomingHTLCs {
39✔
2001
                // If the HTLC is dust, there is no action to be taken.
8✔
2002
                if htlc.OutputIndex < 0 {
13✔
2003
                        log.Debugf("ChannelArbitrator(%v): no resolution "+
5✔
2004
                                "needed for incoming dust htlc=%x",
5✔
2005
                                c.cfg.ChanPoint, htlc.RHash[:])
5✔
2006

5✔
2007
                        actionMap[HtlcIncomingDustFinalAction] = append(
5✔
2008
                                actionMap[HtlcIncomingDustFinalAction], htlc,
5✔
2009
                        )
5✔
2010

5✔
2011
                        continue
5✔
2012
                }
2013

2014
                log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
6✔
2015
                        "action for incoming htlc=%x", c.cfg.ChanPoint,
6✔
2016
                        htlc.RHash[:])
6✔
2017

6✔
2018
                actionMap[HtlcIncomingWatchAction] = append(
6✔
2019
                        actionMap[HtlcIncomingWatchAction], htlc,
6✔
2020
                )
6✔
2021
        }
2022

2023
        return actionMap, nil
31✔
2024
}
2025

2026
// isPreimageAvailable returns whether the hash preimage is available in either
2027
// the preimage cache or the invoice database.
2028
func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
2029
        error) {
29✔
2030

29✔
2031
        // Start by checking the preimage cache for preimages of
29✔
2032
        // forwarded HTLCs.
29✔
2033
        _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
29✔
2034
                hash,
29✔
2035
        )
29✔
2036
        if preimageAvailable {
39✔
2037
                return true, nil
10✔
2038
        }
10✔
2039

2040
        // Then check if we have an invoice that can be settled by this HTLC.
2041
        //
2042
        // TODO(joostjager): Check that there are still more blocks remaining
2043
        // than the invoice cltv delta. We don't want to go to chain only to
2044
        // have the incoming contest resolver decide that we don't want to
2045
        // settle this invoice.
2046
        invoice, err := c.cfg.Registry.LookupInvoice(context.Background(), hash)
22✔
2047
        switch {
22✔
2048
        case err == nil:
3✔
2049
        case errors.Is(err, invoices.ErrInvoiceNotFound) ||
2050
                errors.Is(err, invoices.ErrNoInvoicesCreated):
22✔
2051

22✔
2052
                return false, nil
22✔
2053
        default:
×
2054
                return false, err
×
2055
        }
2056

2057
        preimageAvailable = invoice.Terms.PaymentPreimage != nil
3✔
2058

3✔
2059
        return preimageAvailable, nil
3✔
2060
}
2061

2062
// checkLocalChainActions is similar to checkCommitChainActions, but it also
2063
// examines the set of HTLCs on the remote party's commitment. This allows us
2064
// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
2065
// outgoing HTLCs.
2066
func (c *ChannelArbitrator) checkLocalChainActions(
2067
        height uint32, trigger transitionTrigger,
2068
        activeHTLCs map[HtlcSetKey]htlcSet,
2069
        commitsConfirmed bool) (ChainActionMap, error) {
63✔
2070

63✔
2071
        // First, we'll check our local chain actions as normal. This will only
63✔
2072
        // examine HTLCs on our local commitment (timeout or settle).
63✔
2073
        localCommitActions, err := c.checkCommitChainActions(
63✔
2074
                height, trigger, activeHTLCs[LocalHtlcSet],
63✔
2075
        )
63✔
2076
        if err != nil {
63✔
2077
                return nil, err
×
2078
        }
×
2079

2080
        // Next, we'll examine the remote commitment (and maybe a dangling one)
2081
        // to see if the set difference of our HTLCs is non-empty. If so, then
2082
        // we may need to cancel back some HTLCs if we decide go to chain.
2083
        remoteDanglingActions := c.checkRemoteDanglingActions(
63✔
2084
                height, activeHTLCs, commitsConfirmed,
63✔
2085
        )
63✔
2086

63✔
2087
        // Finally, we'll merge the two set of chain actions.
63✔
2088
        localCommitActions.Merge(remoteDanglingActions)
63✔
2089

63✔
2090
        return localCommitActions, nil
63✔
2091
}
2092

2093
// checkRemoteDanglingActions examines the set of remote commitments for any
2094
// HTLCs that are close to timing out. If we find any, then we'll return a set
2095
// of chain actions for HTLCs that are on our commitment, but not theirs to
2096
// cancel immediately.
2097
func (c *ChannelArbitrator) checkRemoteDanglingActions(
2098
        height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
2099
        commitsConfirmed bool) ChainActionMap {
63✔
2100

63✔
2101
        var (
63✔
2102
                pendingRemoteHTLCs []channeldb.HTLC
63✔
2103
                localHTLCs         = make(map[uint64]struct{})
63✔
2104
                remoteHTLCs        = make(map[uint64]channeldb.HTLC)
63✔
2105
                actionMap          = make(ChainActionMap)
63✔
2106
        )
63✔
2107

63✔
2108
        // First, we'll construct two sets of the outgoing HTLCs: those on our
63✔
2109
        // local commitment, and those that are on the remote commitment(s).
63✔
2110
        for htlcSetKey, htlcs := range activeHTLCs {
183✔
2111
                if htlcSetKey.IsRemote {
185✔
2112
                        for _, htlc := range htlcs.outgoingHTLCs {
82✔
2113
                                remoteHTLCs[htlc.HtlcIndex] = htlc
17✔
2114
                        }
17✔
2115
                } else {
58✔
2116
                        for _, htlc := range htlcs.outgoingHTLCs {
66✔
2117
                                localHTLCs[htlc.HtlcIndex] = struct{}{}
8✔
2118
                        }
8✔
2119
                }
2120
        }
2121

2122
        // With both sets constructed, we'll now compute the set difference of
2123
        // our two sets of HTLCs. This'll give us the HTLCs that exist on the
2124
        // remote commitment transaction, but not on ours.
2125
        for htlcIndex, htlc := range remoteHTLCs {
80✔
2126
                if _, ok := localHTLCs[htlcIndex]; ok {
21✔
2127
                        continue
4✔
2128
                }
2129

2130
                pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
16✔
2131
        }
2132

2133
        // Finally, we'll examine all the pending remote HTLCs for those that
2134
        // have expired. If we find any, then we'll recommend that they be
2135
        // failed now so we can free up the incoming HTLC.
2136
        for _, htlc := range pendingRemoteHTLCs {
79✔
2137
                // We'll now check if we need to go to chain in order to cancel
16✔
2138
                // the incoming HTLC.
16✔
2139
                goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
16✔
2140
                        height,
16✔
2141
                )
16✔
2142

16✔
2143
                // If we don't need to go to chain, and no commitments have
16✔
2144
                // been confirmed, then we can move on. Otherwise, if
16✔
2145
                // commitments have been confirmed, then we need to cancel back
16✔
2146
                // *all* of the pending remote HTLCS.
16✔
2147
                if !goToChain && !commitsConfirmed {
23✔
2148
                        continue
7✔
2149
                }
2150

2151
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
12✔
2152
                if err != nil {
12✔
2153
                        log.Errorf("ChannelArbitrator(%v): failed to query "+
×
2154
                                "preimage for dangling htlc=%x from remote "+
×
2155
                                "commitments diff", c.cfg.ChanPoint,
×
2156
                                htlc.RHash[:])
×
2157

×
2158
                        continue
×
2159
                }
2160

2161
                if preimageAvailable {
12✔
2162
                        continue
×
2163
                }
2164

2165
                // Dust htlcs can be canceled back even before the commitment
2166
                // transaction confirms. Dust htlcs are not enforceable onchain.
2167
                // If another version of the commit tx would confirm we either
2168
                // gain or lose those dust amounts but there is no other way
2169
                // than cancelling the incoming back because we will never learn
2170
                // the preimage.
2171
                if htlc.OutputIndex < 0 {
12✔
2172
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2173
                                "htlc=%x from local/remote commitments diff",
×
2174
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2175

×
2176
                        actionMap[HtlcFailDustAction] = append(
×
2177
                                actionMap[HtlcFailDustAction], htlc,
×
2178
                        )
×
2179

×
2180
                        continue
×
2181
                }
2182

2183
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
12✔
2184
                        "local/remote commitments diff",
12✔
2185
                        c.cfg.ChanPoint, htlc.RHash[:])
12✔
2186

12✔
2187
                actionMap[HtlcFailDanglingAction] = append(
12✔
2188
                        actionMap[HtlcFailDanglingAction], htlc,
12✔
2189
                )
12✔
2190
        }
2191

2192
        return actionMap
63✔
2193
}
2194

2195
// checkRemoteChainActions examines the two possible remote commitment chains
2196
// and returns the set of chain actions we need to carry out if the remote
2197
// commitment (non pending) confirms. The pendingConf indicates if the pending
2198
// remote commitment confirmed. This is similar to checkCommitChainActions, but
2199
// we'll immediately fail any HTLCs on the pending remote commit, but not the
2200
// remote commit (or the other way around).
2201
func (c *ChannelArbitrator) checkRemoteChainActions(
2202
        height uint32, trigger transitionTrigger,
2203
        activeHTLCs map[HtlcSetKey]htlcSet,
2204
        pendingConf bool) (ChainActionMap, error) {
11✔
2205

11✔
2206
        // First, we'll examine all the normal chain actions on the remote
11✔
2207
        // commitment that confirmed.
11✔
2208
        confHTLCs := activeHTLCs[RemoteHtlcSet]
11✔
2209
        if pendingConf {
13✔
2210
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
2✔
2211
        }
2✔
2212
        remoteCommitActions, err := c.checkCommitChainActions(
11✔
2213
                height, trigger, confHTLCs,
11✔
2214
        )
11✔
2215
        if err != nil {
11✔
2216
                return nil, err
×
2217
        }
×
2218

2219
        // With these actions computed, we'll now check the diff of the HTLCs on
2220
        // the commitments, and cancel back any that are on the pending but not
2221
        // the non-pending.
2222
        remoteDiffActions := c.checkRemoteDiffActions(
11✔
2223
                activeHTLCs, pendingConf,
11✔
2224
        )
11✔
2225

11✔
2226
        // Finally, we'll merge all the chain actions and the final set of
11✔
2227
        // chain actions.
11✔
2228
        remoteCommitActions.Merge(remoteDiffActions)
11✔
2229
        return remoteCommitActions, nil
11✔
2230
}
2231

2232
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
2233
// confirmed commit and remote pending commit for HTLCS that we need to cancel
2234
// back. If we find any HTLCs on the remote pending but not the remote, then
2235
// we'll mark them to be failed immediately.
2236
func (c *ChannelArbitrator) checkRemoteDiffActions(
2237
        activeHTLCs map[HtlcSetKey]htlcSet,
2238
        pendingConf bool) ChainActionMap {
11✔
2239

11✔
2240
        // First, we'll partition the HTLCs into those that are present on the
11✔
2241
        // confirmed commitment, and those on the dangling commitment.
11✔
2242
        confHTLCs := activeHTLCs[RemoteHtlcSet]
11✔
2243
        danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
11✔
2244
        if pendingConf {
13✔
2245
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
2✔
2246
                danglingHTLCs = activeHTLCs[RemoteHtlcSet]
2✔
2247
        }
2✔
2248

2249
        // Next, we'll create a set of all the HTLCs confirmed commitment.
2250
        remoteHtlcs := make(map[uint64]struct{})
11✔
2251
        for _, htlc := range confHTLCs.outgoingHTLCs {
16✔
2252
                remoteHtlcs[htlc.HtlcIndex] = struct{}{}
5✔
2253
        }
5✔
2254

2255
        // With the remote HTLCs assembled, we'll mark any HTLCs only on the
2256
        // remote pending commitment to be failed asap.
2257
        actionMap := make(ChainActionMap)
11✔
2258
        for _, htlc := range danglingHTLCs.outgoingHTLCs {
16✔
2259
                if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
6✔
2260
                        continue
1✔
2261
                }
2262

2263
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
4✔
2264
                if err != nil {
4✔
2265
                        log.Errorf("ChannelArbitrator(%v): failed to query "+
×
2266
                                "preimage for dangling htlc=%x from remote "+
×
2267
                                "commitments diff", c.cfg.ChanPoint,
×
2268
                                htlc.RHash[:])
×
2269

×
2270
                        continue
×
2271
                }
2272

2273
                if preimageAvailable {
4✔
2274
                        continue
×
2275
                }
2276

2277
                // Dust HTLCs on the remote commitment can be failed back.
2278
                if htlc.OutputIndex < 0 {
4✔
2279
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2280
                                "htlc=%x from remote commitments diff",
×
2281
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2282

×
2283
                        actionMap[HtlcFailDustAction] = append(
×
2284
                                actionMap[HtlcFailDustAction], htlc,
×
2285
                        )
×
2286

×
2287
                        continue
×
2288
                }
2289

2290
                actionMap[HtlcFailDanglingAction] = append(
4✔
2291
                        actionMap[HtlcFailDanglingAction], htlc,
4✔
2292
                )
4✔
2293

4✔
2294
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
4✔
2295
                        "remote commitments diff",
4✔
2296
                        c.cfg.ChanPoint, htlc.RHash[:])
4✔
2297
        }
2298

2299
        return actionMap
11✔
2300
}
2301

2302
// constructChainActions returns the set of actions that should be taken for
2303
// confirmed HTLCs at the specified height. Our actions will depend on the set
2304
// of HTLCs that were active across all channels at the time of channel
2305
// closure.
2306
func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
2307
        height uint32, trigger transitionTrigger) (ChainActionMap, error) {
24✔
2308

24✔
2309
        // If we've reached this point and have not confirmed commitment set,
24✔
2310
        // then this is an older node that had a pending close channel before
24✔
2311
        // the CommitSet was introduced. In this case, we'll just return the
24✔
2312
        // existing ChainActionMap they had on disk.
24✔
2313
        if confCommitSet == nil || confCommitSet.ConfCommitKey.IsNone() {
31✔
2314
                return c.log.FetchChainActions()
7✔
2315
        }
7✔
2316

2317
        // Otherwise, we have the full commitment set written to disk, and can
2318
        // proceed as normal.
2319
        htlcSets := confCommitSet.toActiveHTLCSets()
17✔
2320
        confCommitKey, err := confCommitSet.ConfCommitKey.UnwrapOrErr(
17✔
2321
                fmt.Errorf("no commitKey available"),
17✔
2322
        )
17✔
2323
        if err != nil {
17✔
2324
                return nil, err
×
2325
        }
×
2326

2327
        switch confCommitKey {
17✔
2328
        // If the local commitment transaction confirmed, then we'll examine
2329
        // that as well as their commitments to the set of chain actions.
2330
        case LocalHtlcSet:
9✔
2331
                return c.checkLocalChainActions(
9✔
2332
                        height, trigger, htlcSets, true,
9✔
2333
                )
9✔
2334

2335
        // If the remote commitment confirmed, then we'll grab all the chain
2336
        // actions for the remote commit, and check the pending commit for any
2337
        // HTLCS we need to handle immediately (dust).
2338
        case RemoteHtlcSet:
9✔
2339
                return c.checkRemoteChainActions(
9✔
2340
                        height, trigger, htlcSets, false,
9✔
2341
                )
9✔
2342

2343
        // Otherwise, the remote pending commitment confirmed, so we'll examine
2344
        // the HTLCs on that unrevoked dangling commitment.
2345
        case RemotePendingHtlcSet:
2✔
2346
                return c.checkRemoteChainActions(
2✔
2347
                        height, trigger, htlcSets, true,
2✔
2348
                )
2✔
2349
        }
2350

2351
        return nil, fmt.Errorf("unable to locate chain actions")
×
2352
}
2353

2354
// prepContractResolutions is called either in the case that we decide we need
2355
// to go to chain, or the remote party goes to chain. Given a set of actions we
2356
// need to take for each HTLC, this method will return a set of contract
2357
// resolvers that will resolve the contracts on-chain if needed, and also a set
2358
// of packets to send to the htlcswitch in order to ensure all incoming HTLC's
2359
// are properly resolved.
2360
func (c *ChannelArbitrator) prepContractResolutions(
2361
        contractResolutions *ContractResolutions, height uint32,
2362
        htlcActions ChainActionMap) ([]ContractResolver, error) {
15✔
2363

15✔
2364
        // We'll also fetch the historical state of this channel, as it should
15✔
2365
        // have been marked as closed by now, and supplement it to each resolver
15✔
2366
        // such that we can properly resolve our pending contracts.
15✔
2367
        var chanState *channeldb.OpenChannel
15✔
2368
        chanState, err := c.cfg.FetchHistoricalChannel()
15✔
2369
        switch {
15✔
2370
        // If we don't find this channel, then it may be the case that it
2371
        // was closed before we started to retain the final state
2372
        // information for open channels.
2373
        case err == channeldb.ErrNoHistoricalBucket:
×
2374
                fallthrough
×
2375
        case err == channeldb.ErrChannelNotFound:
×
2376
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
2377
                        "state", c.cfg.ChanPoint)
×
2378

2379
        case err != nil:
×
2380
                return nil, err
×
2381
        }
2382

2383
        incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
15✔
2384
        outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
15✔
2385

15✔
2386
        // We'll use these two maps to quickly look up an active HTLC with its
15✔
2387
        // matching HTLC resolution.
15✔
2388
        outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution)
15✔
2389
        inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution)
15✔
2390
        for i := 0; i < len(incomingResolutions); i++ {
18✔
2391
                inRes := incomingResolutions[i]
3✔
2392
                inResolutionMap[inRes.HtlcPoint()] = inRes
3✔
2393
        }
3✔
2394
        for i := 0; i < len(outgoingResolutions); i++ {
19✔
2395
                outRes := outgoingResolutions[i]
4✔
2396
                outResolutionMap[outRes.HtlcPoint()] = outRes
4✔
2397
        }
4✔
2398

2399
        // We'll create the resolver kit that we'll be cloning for each
2400
        // resolver so they each can do their duty.
2401
        resolverCfg := ResolverConfig{
15✔
2402
                ChannelArbitratorConfig: c.cfg,
15✔
2403
                Checkpoint: func(res ContractResolver,
15✔
2404
                        reports ...*channeldb.ResolverReport) error {
20✔
2405

5✔
2406
                        return c.log.InsertUnresolvedContracts(reports, res)
5✔
2407
                },
5✔
2408
        }
2409

2410
        commitHash := contractResolutions.CommitHash
15✔
2411

15✔
2412
        var htlcResolvers []ContractResolver
15✔
2413

15✔
2414
        // We instantiate an anchor resolver if the commitment tx has an
15✔
2415
        // anchor.
15✔
2416
        if contractResolutions.AnchorResolution != nil {
20✔
2417
                anchorResolver := newAnchorResolver(
5✔
2418
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
5✔
2419
                        contractResolutions.AnchorResolution.CommitAnchor,
5✔
2420
                        height, c.cfg.ChanPoint, resolverCfg,
5✔
2421
                )
5✔
2422
                anchorResolver.SupplementState(chanState)
5✔
2423

5✔
2424
                htlcResolvers = append(htlcResolvers, anchorResolver)
5✔
2425
        }
5✔
2426

2427
        // If this is a breach close, we'll create a breach resolver, determine
2428
        // the htlc's to fail back, and exit. This is done because the other
2429
        // steps taken for non-breach-closes do not matter for breach-closes.
2430
        if contractResolutions.BreachResolution != nil {
20✔
2431
                breachResolver := newBreachResolver(resolverCfg)
5✔
2432
                htlcResolvers = append(htlcResolvers, breachResolver)
5✔
2433

5✔
2434
                return htlcResolvers, nil
5✔
2435
        }
5✔
2436

2437
        // For each HTLC, we'll either act immediately, meaning we'll instantly
2438
        // fail the HTLC, or we'll act only once the transaction has been
2439
        // confirmed, in which case we'll need an HTLC resolver.
2440
        for htlcAction, htlcs := range htlcActions {
27✔
2441
                switch htlcAction {
14✔
2442
                // If we can claim this HTLC, we'll create an HTLC resolver to
2443
                // claim the HTLC (second-level or directly), then add the pre
2444
                case HtlcClaimAction:
×
2445
                        for _, htlc := range htlcs {
×
2446
                                htlc := htlc
×
2447

×
2448
                                htlcOp := wire.OutPoint{
×
2449
                                        Hash:  commitHash,
×
2450
                                        Index: uint32(htlc.OutputIndex),
×
2451
                                }
×
2452

×
2453
                                resolution, ok := inResolutionMap[htlcOp]
×
2454
                                if !ok {
×
2455
                                        // TODO(roasbeef): panic?
×
2456
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2457
                                                "incoming resolution: %v",
×
2458
                                                c.cfg.ChanPoint, htlcOp)
×
2459
                                        continue
×
2460
                                }
2461

2462
                                resolver := newSuccessResolver(
×
2463
                                        resolution, height, htlc, resolverCfg,
×
2464
                                )
×
2465
                                if chanState != nil {
×
2466
                                        resolver.SupplementState(chanState)
×
2467
                                }
×
2468
                                htlcResolvers = append(htlcResolvers, resolver)
×
2469
                        }
2470

2471
                // If we can timeout the HTLC directly, then we'll create the
2472
                // proper resolver to do so, who will then cancel the packet
2473
                // backwards.
2474
                case HtlcTimeoutAction:
3✔
2475
                        for _, htlc := range htlcs {
6✔
2476
                                htlc := htlc
3✔
2477

3✔
2478
                                htlcOp := wire.OutPoint{
3✔
2479
                                        Hash:  commitHash,
3✔
2480
                                        Index: uint32(htlc.OutputIndex),
3✔
2481
                                }
3✔
2482

3✔
2483
                                resolution, ok := outResolutionMap[htlcOp]
3✔
2484
                                if !ok {
3✔
2485
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2486
                                                "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
×
2487
                                        continue
×
2488
                                }
2489

2490
                                resolver := newTimeoutResolver(
3✔
2491
                                        resolution, height, htlc, resolverCfg,
3✔
2492
                                )
3✔
2493
                                if chanState != nil {
6✔
2494
                                        resolver.SupplementState(chanState)
3✔
2495
                                }
3✔
2496

2497
                                // For outgoing HTLCs, we will also need to
2498
                                // supplement the resolver with the expiry
2499
                                // block height of its corresponding incoming
2500
                                // HTLC.
2501
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
3✔
2502
                                resolver.SupplementDeadline(deadline)
3✔
2503

3✔
2504
                                htlcResolvers = append(htlcResolvers, resolver)
3✔
2505
                        }
2506

2507
                // If this is an incoming HTLC, but we can't act yet, then
2508
                // we'll create an incoming resolver to redeem the HTLC if we
2509
                // learn of the pre-image, or let the remote party time out.
2510
                case HtlcIncomingWatchAction:
3✔
2511
                        for _, htlc := range htlcs {
6✔
2512
                                htlc := htlc
3✔
2513

3✔
2514
                                htlcOp := wire.OutPoint{
3✔
2515
                                        Hash:  commitHash,
3✔
2516
                                        Index: uint32(htlc.OutputIndex),
3✔
2517
                                }
3✔
2518

3✔
2519
                                // TODO(roasbeef): need to handle incoming dust...
3✔
2520

3✔
2521
                                // TODO(roasbeef): can't be negative!!!
3✔
2522
                                resolution, ok := inResolutionMap[htlcOp]
3✔
2523
                                if !ok {
3✔
2524
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2525
                                                "incoming resolution: %v",
×
2526
                                                c.cfg.ChanPoint, htlcOp)
×
2527
                                        continue
×
2528
                                }
2529

2530
                                resolver := newIncomingContestResolver(
3✔
2531
                                        resolution, height, htlc,
3✔
2532
                                        resolverCfg,
3✔
2533
                                )
3✔
2534
                                if chanState != nil {
6✔
2535
                                        resolver.SupplementState(chanState)
3✔
2536
                                }
3✔
2537
                                htlcResolvers = append(htlcResolvers, resolver)
3✔
2538
                        }
2539

2540
                // Finally, if this is an outgoing HTLC we've sent, then we'll
2541
                // launch a resolver to watch for the pre-image (and settle
2542
                // backwards), or just timeout.
2543
                case HtlcOutgoingWatchAction:
4✔
2544
                        for _, htlc := range htlcs {
8✔
2545
                                htlc := htlc
4✔
2546

4✔
2547
                                htlcOp := wire.OutPoint{
4✔
2548
                                        Hash:  commitHash,
4✔
2549
                                        Index: uint32(htlc.OutputIndex),
4✔
2550
                                }
4✔
2551

4✔
2552
                                resolution, ok := outResolutionMap[htlcOp]
4✔
2553
                                if !ok {
4✔
2554
                                        log.Errorf("ChannelArbitrator(%v) "+
×
2555
                                                "unable to find outgoing "+
×
2556
                                                "resolution: %v",
×
2557
                                                c.cfg.ChanPoint, htlcOp)
×
2558

×
2559
                                        continue
×
2560
                                }
2561

2562
                                resolver := newOutgoingContestResolver(
4✔
2563
                                        resolution, height, htlc, resolverCfg,
4✔
2564
                                )
4✔
2565
                                if chanState != nil {
8✔
2566
                                        resolver.SupplementState(chanState)
4✔
2567
                                }
4✔
2568

2569
                                // For outgoing HTLCs, we will also need to
2570
                                // supplement the resolver with the expiry
2571
                                // block height of its corresponding incoming
2572
                                // HTLC.
2573
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
4✔
2574
                                resolver.SupplementDeadline(deadline)
4✔
2575

4✔
2576
                                htlcResolvers = append(htlcResolvers, resolver)
4✔
2577
                        }
2578
                }
2579
        }
2580

2581
        // If this is was an unilateral closure, then we'll also create a
2582
        // resolver to sweep our commitment output (but only if it wasn't
2583
        // trimmed).
2584
        if contractResolutions.CommitResolution != nil {
16✔
2585
                resolver := newCommitSweepResolver(
3✔
2586
                        *contractResolutions.CommitResolution, height,
3✔
2587
                        c.cfg.ChanPoint, resolverCfg,
3✔
2588
                )
3✔
2589
                if chanState != nil {
6✔
2590
                        resolver.SupplementState(chanState)
3✔
2591
                }
3✔
2592
                htlcResolvers = append(htlcResolvers, resolver)
3✔
2593
        }
2594

2595
        return htlcResolvers, nil
13✔
2596
}
2597

2598
// replaceResolver replaces a in the list of active resolvers. If the resolver
2599
// to be replaced is not found, it returns an error.
2600
func (c *ChannelArbitrator) replaceResolver(oldResolver,
2601
        newResolver ContractResolver) error {
4✔
2602

4✔
2603
        c.activeResolversLock.Lock()
4✔
2604
        defer c.activeResolversLock.Unlock()
4✔
2605

4✔
2606
        oldKey := oldResolver.ResolverKey()
4✔
2607
        for i, r := range c.activeResolvers {
8✔
2608
                if bytes.Equal(r.ResolverKey(), oldKey) {
8✔
2609
                        c.activeResolvers[i] = newResolver
4✔
2610
                        return nil
4✔
2611
                }
4✔
2612
        }
2613

2614
        return errors.New("resolver to be replaced not found")
×
2615
}
2616

2617
// resolveContract is a goroutine tasked with fully resolving an unresolved
2618
// contract. Either the initial contract will be resolved after a single step,
2619
// or the contract will itself create another contract to be resolved. In
2620
// either case, one the contract has been fully resolved, we'll signal back to
2621
// the main goroutine so it can properly keep track of the set of unresolved
2622
// contracts.
2623
//
2624
// NOTE: This MUST be run as a goroutine.
2625
func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) {
9✔
2626
        defer c.wg.Done()
9✔
2627

9✔
2628
        log.Tracef("ChannelArbitrator(%v): attempting to resolve %T",
9✔
2629
                c.cfg.ChanPoint, currentContract)
9✔
2630

9✔
2631
        // Until the contract is fully resolved, we'll continue to iteratively
9✔
2632
        // resolve the contract one step at a time.
9✔
2633
        for !currentContract.IsResolved() {
19✔
2634
                log.Tracef("ChannelArbitrator(%v): contract %T not yet "+
10✔
2635
                        "resolved", c.cfg.ChanPoint, currentContract)
10✔
2636

10✔
2637
                select {
10✔
2638

2639
                // If we've been signalled to quit, then we'll exit early.
2640
                case <-c.quit:
×
2641
                        return
×
2642

2643
                default:
10✔
2644
                        // Otherwise, we'll attempt to resolve the current
10✔
2645
                        // contract.
10✔
2646
                        nextContract, err := currentContract.Resolve()
10✔
2647
                        if err != nil {
14✔
2648
                                if err == errResolverShuttingDown {
8✔
2649
                                        return
4✔
2650
                                }
4✔
2651

2652
                                log.Errorf("ChannelArbitrator(%v): unable to "+
3✔
2653
                                        "progress %T: %v",
3✔
2654
                                        c.cfg.ChanPoint, currentContract, err)
3✔
2655
                                return
3✔
2656
                        }
2657

2658
                        switch {
9✔
2659
                        // If this contract produced another, then this means
2660
                        // the current contract was only able to be partially
2661
                        // resolved in this step. So we'll do a contract swap
2662
                        // within our logs: the new contract will take the
2663
                        // place of the old one.
2664
                        case nextContract != nil:
4✔
2665
                                log.Debugf("ChannelArbitrator(%v): swapping "+
4✔
2666
                                        "out contract %T for %T ",
4✔
2667
                                        c.cfg.ChanPoint, currentContract,
4✔
2668
                                        nextContract)
4✔
2669

4✔
2670
                                // Swap contract in log.
4✔
2671
                                err := c.log.SwapContract(
4✔
2672
                                        currentContract, nextContract,
4✔
2673
                                )
4✔
2674
                                if err != nil {
4✔
2675
                                        log.Errorf("unable to add recurse "+
×
2676
                                                "contract: %v", err)
×
2677
                                }
×
2678

2679
                                // Swap contract in resolvers list. This is to
2680
                                // make sure that reports are queried from the
2681
                                // new resolver.
2682
                                err = c.replaceResolver(
4✔
2683
                                        currentContract, nextContract,
4✔
2684
                                )
4✔
2685
                                if err != nil {
4✔
2686
                                        log.Errorf("unable to replace "+
×
2687
                                                "contract: %v", err)
×
2688
                                }
×
2689

2690
                                // As this contract produced another, we'll
2691
                                // re-assign, so we can continue our resolution
2692
                                // loop.
2693
                                currentContract = nextContract
4✔
2694

4✔
2695
                                // Launch the new contract.
4✔
2696
                                err = currentContract.Launch()
4✔
2697
                                if err != nil {
4✔
2698
                                        log.Errorf("Failed to launch %T: %v",
×
2699
                                                currentContract, err)
×
2700
                                }
×
2701

2702
                        // If this contract is actually fully resolved, then
2703
                        // we'll mark it as such within the database.
2704
                        case currentContract.IsResolved():
8✔
2705
                                log.Debugf("ChannelArbitrator(%v): marking "+
8✔
2706
                                        "contract %T fully resolved",
8✔
2707
                                        c.cfg.ChanPoint, currentContract)
8✔
2708

8✔
2709
                                err := c.log.ResolveContract(currentContract)
8✔
2710
                                if err != nil {
8✔
2711
                                        log.Errorf("unable to resolve contract: %v",
×
2712
                                                err)
×
2713
                                }
×
2714

2715
                                // Now that the contract has been resolved,
2716
                                // well signal to the main goroutine.
2717
                                select {
8✔
2718
                                case c.resolutionSignal <- struct{}{}:
7✔
2719
                                case <-c.quit:
4✔
2720
                                        return
4✔
2721
                                }
2722
                        }
2723

2724
                }
2725
        }
2726
}
2727

2728
// signalUpdateMsg is a struct that carries fresh signals to the
2729
// ChannelArbitrator. We need to receive a message like this each time the
2730
// channel becomes active, as it's internal state may change.
2731
type signalUpdateMsg struct {
2732
        // newSignals is the set of new active signals to be sent to the
2733
        // arbitrator.
2734
        newSignals *ContractSignals
2735

2736
        // doneChan is a channel that will be closed on the arbitrator has
2737
        // attached the new signals.
2738
        doneChan chan struct{}
2739
}
2740

2741
// UpdateContractSignals updates the set of signals the ChannelArbitrator needs
2742
// to receive from a channel in real-time in order to keep in sync with the
2743
// latest state of the contract.
2744
func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) {
14✔
2745
        done := make(chan struct{})
14✔
2746

14✔
2747
        select {
14✔
2748
        case c.signalUpdates <- &signalUpdateMsg{
2749
                newSignals: newSignals,
2750
                doneChan:   done,
2751
        }:
14✔
2752
        case <-c.quit:
×
2753
        }
2754

2755
        select {
14✔
2756
        case <-done:
14✔
2757
        case <-c.quit:
×
2758
        }
2759
}
2760

2761
// notifyContractUpdate updates the ChannelArbitrator's unmerged mappings such
2762
// that it can later be merged with activeHTLCs when calling
2763
// checkLocalChainActions or sweepAnchors. These are the only two places that
2764
// activeHTLCs is used.
2765
func (c *ChannelArbitrator) notifyContractUpdate(upd *ContractUpdate) {
15✔
2766
        c.unmergedMtx.Lock()
15✔
2767
        defer c.unmergedMtx.Unlock()
15✔
2768

15✔
2769
        // Update the mapping.
15✔
2770
        c.unmergedSet[upd.HtlcKey] = newHtlcSet(upd.Htlcs)
15✔
2771

15✔
2772
        log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
15✔
2773
                c.cfg.ChanPoint, lnutils.SpewLogClosure(upd))
15✔
2774
}
15✔
2775

2776
// updateActiveHTLCs merges the unmerged set of HTLCs from the link with
2777
// activeHTLCs.
2778
func (c *ChannelArbitrator) updateActiveHTLCs() {
76✔
2779
        c.unmergedMtx.RLock()
76✔
2780
        defer c.unmergedMtx.RUnlock()
76✔
2781

76✔
2782
        // Update the mapping.
76✔
2783
        c.activeHTLCs[LocalHtlcSet] = c.unmergedSet[LocalHtlcSet]
76✔
2784
        c.activeHTLCs[RemoteHtlcSet] = c.unmergedSet[RemoteHtlcSet]
76✔
2785

76✔
2786
        // If the pending set exists, update that as well.
76✔
2787
        if _, ok := c.unmergedSet[RemotePendingHtlcSet]; ok {
88✔
2788
                pendingSet := c.unmergedSet[RemotePendingHtlcSet]
12✔
2789
                c.activeHTLCs[RemotePendingHtlcSet] = pendingSet
12✔
2790
        }
12✔
2791
}
2792

2793
// channelAttendant is the primary goroutine that acts at the judicial
2794
// arbitrator between our channel state, the remote channel peer, and the
2795
// blockchain (Our judge). This goroutine will ensure that we faithfully execute
2796
// all clauses of our contract in the case that we need to go on-chain for a
2797
// dispute. Currently, two such conditions warrant our intervention: when an
2798
// outgoing HTLC is about to timeout, and when we know the pre-image for an
2799
// incoming HTLC, but it hasn't yet been settled off-chain. In these cases,
2800
// we'll: broadcast our commitment, cancel/settle any HTLC's backwards after
2801
// sufficient confirmation, and finally send our set of outputs to the UTXO
2802
// Nursery for incubation, and ultimate sweeping.
2803
//
2804
// NOTE: This MUST be run as a goroutine.
2805
func (c *ChannelArbitrator) channelAttendant(bestHeight int32,
2806
        commitSet *CommitSet) {
51✔
2807

51✔
2808
        // TODO(roasbeef): tell top chain arb we're done
51✔
2809
        defer func() {
99✔
2810
                c.wg.Done()
48✔
2811
        }()
48✔
2812

2813
        err := c.progressStateMachineAfterRestart(bestHeight, commitSet)
51✔
2814
        if err != nil {
52✔
2815
                // In case of an error, we return early but we do not shutdown
1✔
2816
                // LND, because there might be other channels that still can be
1✔
2817
                // resolved and we don't want to interfere with that.
1✔
2818
                // We continue to run the channel attendant in case the channel
1✔
2819
                // closes via other means for example the remote pary force
1✔
2820
                // closes the channel. So we log the error and continue.
1✔
2821
                log.Errorf("Unable to progress state machine after "+
1✔
2822
                        "restart: %v", err)
1✔
2823
        }
1✔
2824

2825
        for {
152✔
2826
                select {
101✔
2827

2828
                // A new block has arrived, we'll examine all the active HTLC's
2829
                // to see if any of them have expired, and also update our
2830
                // track of the best current height.
2831
                case beat := <-c.BlockbeatChan:
9✔
2832
                        bestHeight = beat.Height()
9✔
2833

9✔
2834
                        log.Debugf("ChannelArbitrator(%v): received new "+
9✔
2835
                                "block: height=%v, processing...",
9✔
2836
                                c.cfg.ChanPoint, bestHeight)
9✔
2837

9✔
2838
                        err := c.handleBlockbeat(beat)
9✔
2839
                        if err != nil {
9✔
2840
                                log.Errorf("Handle block=%v got err: %v",
×
2841
                                        bestHeight, err)
×
2842
                        }
×
2843

2844
                        // If as a result of this trigger, the contract is
2845
                        // fully resolved, then well exit.
2846
                        if c.state == StateFullyResolved {
12✔
2847
                                return
3✔
2848
                        }
3✔
2849

2850
                // A new signal update was just sent. This indicates that the
2851
                // channel under watch is now live, and may modify its internal
2852
                // state, so we'll get the most up to date signals to we can
2853
                // properly do our job.
2854
                case signalUpdate := <-c.signalUpdates:
14✔
2855
                        log.Tracef("ChannelArbitrator(%v): got new signal "+
14✔
2856
                                "update!", c.cfg.ChanPoint)
14✔
2857

14✔
2858
                        // We'll update the ShortChannelID.
14✔
2859
                        c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
14✔
2860

14✔
2861
                        // Now that the signal has been updated, we'll now
14✔
2862
                        // close the done channel to signal to the caller we've
14✔
2863
                        // registered the new ShortChannelID.
14✔
2864
                        close(signalUpdate.doneChan)
14✔
2865

2866
                // We've cooperatively closed the channel, so we're no longer
2867
                // needed. We'll mark the channel as resolved and exit.
2868
                case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
5✔
2869
                        err := c.handleCoopCloseEvent(closeInfo)
5✔
2870
                        if err != nil {
5✔
2871
                                log.Errorf("Failed to handle coop close: %v",
×
2872
                                        err)
×
2873

×
2874
                                return
×
2875
                        }
×
2876

2877
                // We have broadcasted our commitment, and it is now confirmed
2878
                // on-chain.
2879
                case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
15✔
2880
                        if c.state != StateCommitmentBroadcasted {
19✔
2881
                                log.Errorf("ChannelArbitrator(%v): unexpected "+
4✔
2882
                                        "local on-chain channel close",
4✔
2883
                                        c.cfg.ChanPoint)
4✔
2884
                        }
4✔
2885

2886
                        err := c.handleLocalForceCloseEvent(closeInfo)
15✔
2887
                        if err != nil {
15✔
2888
                                log.Errorf("Failed to handle local force "+
×
2889
                                        "close: %v", err)
×
2890

×
2891
                                return
×
2892
                        }
×
2893

2894
                // The remote party has broadcast the commitment on-chain.
2895
                // We'll examine our state to determine if we need to act at
2896
                // all.
2897
                case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
11✔
2898
                        err := c.handleRemoteForceCloseEvent(uniClosure)
11✔
2899
                        if err != nil {
13✔
2900
                                log.Errorf("Failed to handle remote force "+
2✔
2901
                                        "close: %v", err)
2✔
2902

2✔
2903
                                return
2✔
2904
                        }
2✔
2905

2906
                // The remote has breached the channel. As this is handled by
2907
                // the ChainWatcher and BreachArbitrator, we don't have to do
2908
                // anything in particular, so just advance our state and
2909
                // gracefully exit.
2910
                case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
4✔
2911
                        err := c.handleContractBreach(breachInfo)
4✔
2912
                        if err != nil {
4✔
2913
                                log.Errorf("Failed to handle contract breach: "+
×
2914
                                        "%v", err)
×
2915

×
2916
                                return
×
2917
                        }
×
2918

2919
                // A new contract has just been resolved, we'll now check our
2920
                // log to see if all contracts have been resolved. If so, then
2921
                // we can exit as the contract is fully resolved.
2922
                case <-c.resolutionSignal:
7✔
2923
                        log.Infof("ChannelArbitrator(%v): a contract has been "+
7✔
2924
                                "fully resolved!", c.cfg.ChanPoint)
7✔
2925

7✔
2926
                        nextState, _, err := c.advanceState(
7✔
2927
                                uint32(bestHeight), chainTrigger, nil,
7✔
2928
                        )
7✔
2929
                        if err != nil {
7✔
2930
                                log.Errorf("Unable to advance state: %v", err)
×
2931
                        }
×
2932

2933
                        // If we don't have anything further to do after
2934
                        // advancing our state, then we'll exit.
2935
                        if nextState == StateFullyResolved {
13✔
2936
                                log.Infof("ChannelArbitrator(%v): all "+
6✔
2937
                                        "contracts fully resolved, exiting",
6✔
2938
                                        c.cfg.ChanPoint)
6✔
2939

6✔
2940
                                return
6✔
2941
                        }
6✔
2942

2943
                // We've just received a request to forcibly close out the
2944
                // channel. We'll
2945
                case closeReq := <-c.forceCloseReqs:
14✔
2946
                        log.Infof("ChannelArbitrator(%v): received force "+
14✔
2947
                                "close request", c.cfg.ChanPoint)
14✔
2948

14✔
2949
                        if c.state != StateDefault {
16✔
2950
                                select {
2✔
2951
                                case closeReq.closeTx <- nil:
2✔
2952
                                case <-c.quit:
×
2953
                                }
2954

2955
                                select {
2✔
2956
                                case closeReq.errResp <- errAlreadyForceClosed:
2✔
2957
                                case <-c.quit:
×
2958
                                }
2959

2960
                                continue
2✔
2961
                        }
2962

2963
                        nextState, closeTx, err := c.advanceState(
13✔
2964
                                uint32(bestHeight), userTrigger, nil,
13✔
2965
                        )
13✔
2966
                        if err != nil {
17✔
2967
                                log.Errorf("Unable to advance state: %v", err)
4✔
2968
                        }
4✔
2969

2970
                        select {
13✔
2971
                        case closeReq.closeTx <- closeTx:
13✔
2972
                        case <-c.quit:
×
2973
                                return
×
2974
                        }
2975

2976
                        select {
13✔
2977
                        case closeReq.errResp <- err:
13✔
2978
                        case <-c.quit:
×
2979
                                return
×
2980
                        }
2981

2982
                        // If we don't have anything further to do after
2983
                        // advancing our state, then we'll exit.
2984
                        if nextState == StateFullyResolved {
13✔
2985
                                log.Infof("ChannelArbitrator(%v): all "+
×
2986
                                        "contracts resolved, exiting",
×
2987
                                        c.cfg.ChanPoint)
×
2988
                                return
×
2989
                        }
×
2990

2991
                case <-c.quit:
43✔
2992
                        return
43✔
2993
                }
2994
        }
2995
}
2996

2997
// handleBlockbeat processes a newly received blockbeat by advancing the
2998
// arbitrator's internal state using the received block height.
2999
func (c *ChannelArbitrator) handleBlockbeat(beat chainio.Blockbeat) error {
9✔
3000
        // Notify we've processed the block.
9✔
3001
        defer c.NotifyBlockProcessed(beat, nil)
9✔
3002

9✔
3003
        // If the state is StateContractClosed, StateWaitingFullResolution, or
9✔
3004
        // StateFullyResolved, there's no need to read the close event channel
9✔
3005
        // since the arbitrator can only get to this state after processing a
9✔
3006
        // previous close event and launched all its resolvers.
9✔
3007
        if c.state.IsContractClosed() {
12✔
3008
                log.Infof("ChannelArbitrator(%v): skipping reading close "+
3✔
3009
                        "events in state=%v", c.cfg.ChanPoint, c.state)
3✔
3010

3✔
3011
                // Launch all active resolvers when a new blockbeat is
3✔
3012
                // received, even when the contract is closed, we still need
3✔
3013
                // this as the resolvers may transform into new ones. For
3✔
3014
                // already launched resolvers this will be NOOP as they track
3✔
3015
                // their own `launched` states.
3✔
3016
                c.launchResolvers()
3✔
3017

3✔
3018
                return nil
3✔
3019
        }
3✔
3020

3021
        // Perform a non-blocking read on the close events in case the channel
3022
        // is closed in this blockbeat.
3023
        c.receiveAndProcessCloseEvent()
9✔
3024

9✔
3025
        // Try to advance the state if we are in StateDefault.
9✔
3026
        if c.state == StateDefault {
17✔
3027
                // Now that a new block has arrived, we'll attempt to advance
8✔
3028
                // our state forward.
8✔
3029
                _, _, err := c.advanceState(
8✔
3030
                        uint32(beat.Height()), chainTrigger, nil,
8✔
3031
                )
8✔
3032
                if err != nil {
8✔
3033
                        return fmt.Errorf("unable to advance state: %w", err)
×
3034
                }
×
3035
        }
3036

3037
        // Launch all active resolvers when a new blockbeat is received.
3038
        c.launchResolvers()
9✔
3039

9✔
3040
        return nil
9✔
3041
}
3042

3043
// receiveAndProcessCloseEvent does a non-blocking read on all the channel
3044
// close event channels. If an event is received, it will be further processed.
3045
func (c *ChannelArbitrator) receiveAndProcessCloseEvent() {
9✔
3046
        select {
9✔
3047
        // Received a coop close event, we now mark the channel as resolved and
3048
        // exit.
3049
        case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
×
3050
                err := c.handleCoopCloseEvent(closeInfo)
×
3051
                if err != nil {
×
3052
                        log.Errorf("Failed to handle coop close: %v", err)
×
3053
                        return
×
3054
                }
×
3055

3056
        // We have broadcast our commitment, and it is now confirmed onchain.
3057
        case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
1✔
3058
                if c.state != StateCommitmentBroadcasted {
1✔
3059
                        log.Errorf("ChannelArbitrator(%v): unexpected "+
×
3060
                                "local on-chain channel close", c.cfg.ChanPoint)
×
3061
                }
×
3062

3063
                err := c.handleLocalForceCloseEvent(closeInfo)
1✔
3064
                if err != nil {
1✔
3065
                        log.Errorf("Failed to handle local force close: %v",
×
3066
                                err)
×
3067

×
3068
                        return
×
3069
                }
×
3070

3071
        // The remote party has broadcast the commitment. We'll examine our
3072
        // state to determine if we need to act at all.
UNCOV
3073
        case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
×
UNCOV
3074
                err := c.handleRemoteForceCloseEvent(uniClosure)
×
UNCOV
3075
                if err != nil {
×
3076
                        log.Errorf("Failed to handle remote force close: %v",
×
3077
                                err)
×
3078

×
3079
                        return
×
3080
                }
×
3081

3082
        // The remote has breached the channel! We now launch the breach
3083
        // contract resolvers.
3084
        case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
×
3085
                err := c.handleContractBreach(breachInfo)
×
3086
                if err != nil {
×
3087
                        log.Errorf("Failed to handle contract breach: %v", err)
×
3088
                        return
×
3089
                }
×
3090

3091
        default:
9✔
3092
                log.Infof("ChannelArbitrator(%v) no close event",
9✔
3093
                        c.cfg.ChanPoint)
9✔
3094
        }
3095
}
3096

3097
// Name returns a human-readable string for this subsystem.
3098
//
3099
// NOTE: Part of chainio.Consumer interface.
3100
func (c *ChannelArbitrator) Name() string {
54✔
3101
        return fmt.Sprintf("ChannelArbitrator(%v)", c.cfg.ChanPoint)
54✔
3102
}
54✔
3103

3104
// checkLegacyBreach returns StateFullyResolved if the channel was closed with
3105
// a breach transaction before the channel arbitrator launched its own breach
3106
// resolver. StateContractClosed is returned if this is a modern breach close
3107
// with a breach resolver. StateError is returned if the log lookup failed.
3108
func (c *ChannelArbitrator) checkLegacyBreach() (ArbitratorState, error) {
5✔
3109
        // A previous version of the channel arbitrator would make the breach
5✔
3110
        // close skip to StateFullyResolved. If there are no contract
5✔
3111
        // resolutions in the bolt arbitrator log, then this is an older breach
5✔
3112
        // close. Otherwise, if there are resolutions, the state should advance
5✔
3113
        // to StateContractClosed.
5✔
3114
        _, err := c.log.FetchContractResolutions()
5✔
3115
        if err == errNoResolutions {
5✔
3116
                // This is an older breach close still in the database.
×
3117
                return StateFullyResolved, nil
×
3118
        } else if err != nil {
5✔
3119
                return StateError, err
×
3120
        }
×
3121

3122
        // This is a modern breach close with resolvers.
3123
        return StateContractClosed, nil
5✔
3124
}
3125

3126
// sweepRequest wraps the arguments used when calling `SweepInput`.
3127
type sweepRequest struct {
3128
        // input is the input to be swept.
3129
        input input.Input
3130

3131
        // params holds the sweeping parameters.
3132
        params sweep.Params
3133
}
3134

3135
// createSweepRequest creates an anchor sweeping request for a particular
3136
// version (local/remote/remote pending) of the commitment.
3137
func (c *ChannelArbitrator) createSweepRequest(
3138
        anchor *lnwallet.AnchorResolution, htlcs htlcSet, anchorPath string,
3139
        heightHint uint32) (sweepRequest, error) {
11✔
3140

11✔
3141
        // Use the chan id as the exclusive group. This prevents any of the
11✔
3142
        // anchors from being batched together.
11✔
3143
        exclusiveGroup := c.cfg.ShortChanID.ToUint64()
11✔
3144

11✔
3145
        // Find the deadline for this specific anchor.
11✔
3146
        deadline, value, err := c.findCommitmentDeadlineAndValue(
11✔
3147
                heightHint, htlcs,
11✔
3148
        )
11✔
3149
        if err != nil {
11✔
3150
                return sweepRequest{}, err
×
3151
        }
×
3152

3153
        // If we cannot find a deadline, it means there's no HTLCs at stake,
3154
        // which means we can relax our anchor sweeping conditions as we don't
3155
        // have any time sensitive outputs to sweep. However we need to
3156
        // register the anchor output with the sweeper so we are later able to
3157
        // bump the close fee.
3158
        if deadline.IsNone() {
17✔
3159
                log.Infof("ChannelArbitrator(%v): no HTLCs at stake, "+
6✔
3160
                        "sweeping anchor with default deadline",
6✔
3161
                        c.cfg.ChanPoint)
6✔
3162
        }
6✔
3163

3164
        witnessType := input.CommitmentAnchor
11✔
3165

11✔
3166
        // For taproot channels, we need to use the proper witness type.
11✔
3167
        if txscript.IsPayToTaproot(
11✔
3168
                anchor.AnchorSignDescriptor.Output.PkScript,
11✔
3169
        ) {
14✔
3170

3✔
3171
                witnessType = input.TaprootAnchorSweepSpend
3✔
3172
        }
3✔
3173

3174
        // Prepare anchor output for sweeping.
3175
        anchorInput := input.MakeBaseInput(
11✔
3176
                &anchor.CommitAnchor,
11✔
3177
                witnessType,
11✔
3178
                &anchor.AnchorSignDescriptor,
11✔
3179
                heightHint,
11✔
3180
                &input.TxInfo{
11✔
3181
                        Fee:    anchor.CommitFee,
11✔
3182
                        Weight: anchor.CommitWeight,
11✔
3183
                },
11✔
3184
        )
11✔
3185

11✔
3186
        // If we have a deadline, we'll use it to calculate the deadline
11✔
3187
        // height, otherwise default to none.
11✔
3188
        deadlineDesc := "None"
11✔
3189
        deadlineHeight := fn.MapOption(func(d int32) int32 {
19✔
3190
                deadlineDesc = fmt.Sprintf("%d", d)
8✔
3191

8✔
3192
                return d + int32(heightHint)
8✔
3193
        })(deadline)
8✔
3194

3195
        // Calculate the budget based on the value under protection, which is
3196
        // the sum of all HTLCs on this commitment subtracted by their budgets.
3197
        // The anchor output in itself has a small output value of 330 sats so
3198
        // we also include it in the budget to pay for the cpfp transaction.
3199
        budget := calculateBudget(
11✔
3200
                value, c.cfg.Budget.AnchorCPFPRatio, c.cfg.Budget.AnchorCPFP,
11✔
3201
        ) + AnchorOutputValue
11✔
3202

11✔
3203
        log.Infof("ChannelArbitrator(%v): offering anchor from %s commitment "+
11✔
3204
                "%v to sweeper with deadline=%v, budget=%v", c.cfg.ChanPoint,
11✔
3205
                anchorPath, anchor.CommitAnchor, deadlineDesc, budget)
11✔
3206

11✔
3207
        // Sweep anchor output with a confirmation target fee preference.
11✔
3208
        // Because this is a cpfp-operation, the anchor will only be attempted
11✔
3209
        // to sweep when the current fee estimate for the confirmation target
11✔
3210
        // exceeds the commit fee rate.
11✔
3211
        return sweepRequest{
11✔
3212
                input: &anchorInput,
11✔
3213
                params: sweep.Params{
11✔
3214
                        ExclusiveGroup: &exclusiveGroup,
11✔
3215
                        Budget:         budget,
11✔
3216
                        DeadlineHeight: deadlineHeight,
11✔
3217
                },
11✔
3218
        }, nil
11✔
3219
}
3220

3221
// prepareAnchorSweeps creates a list of requests to be used by the sweeper for
3222
// all possible commitment versions.
3223
func (c *ChannelArbitrator) prepareAnchorSweeps(heightHint uint32,
3224
        anchors *lnwallet.AnchorResolutions) ([]sweepRequest, error) {
22✔
3225

22✔
3226
        // requests holds all the possible anchor sweep requests. We can have
22✔
3227
        // up to 3 different versions of commitments (local/remote/remote
22✔
3228
        // dangling) to be CPFPed by the anchors.
22✔
3229
        requests := make([]sweepRequest, 0, 3)
22✔
3230

22✔
3231
        // remotePendingReq holds the request for sweeping the anchor output on
22✔
3232
        // the remote pending commitment. It's only set when there's an actual
22✔
3233
        // pending remote commitment and it's used to decide whether we need to
22✔
3234
        // update the fee budget when sweeping the anchor output on the local
22✔
3235
        // commitment.
22✔
3236
        remotePendingReq := fn.None[sweepRequest]()
22✔
3237

22✔
3238
        // First we check on the remote pending commitment and optionally
22✔
3239
        // create an anchor sweeping request.
22✔
3240
        htlcs, ok := c.activeHTLCs[RemotePendingHtlcSet]
22✔
3241
        if ok && anchors.RemotePending != nil {
24✔
3242
                req, err := c.createSweepRequest(
2✔
3243
                        anchors.RemotePending, htlcs, "remote pending",
2✔
3244
                        heightHint,
2✔
3245
                )
2✔
3246
                if err != nil {
2✔
3247
                        return nil, err
×
3248
                }
×
3249

3250
                // Save the request.
3251
                requests = append(requests, req)
2✔
3252

2✔
3253
                // Set the optional variable.
2✔
3254
                remotePendingReq = fn.Some(req)
2✔
3255
        }
3256

3257
        // Check the local commitment and optionally create an anchor sweeping
3258
        // request. The params used in this request will be influenced by the
3259
        // anchor sweeping request made from the pending remote commitment.
3260
        htlcs, ok = c.activeHTLCs[LocalHtlcSet]
22✔
3261
        if ok && anchors.Local != nil {
28✔
3262
                req, err := c.createSweepRequest(
6✔
3263
                        anchors.Local, htlcs, "local", heightHint,
6✔
3264
                )
6✔
3265
                if err != nil {
6✔
3266
                        return nil, err
×
3267
                }
×
3268

3269
                // If there's an anchor sweeping request from the pending
3270
                // remote commitment, we will compare its budget against the
3271
                // budget used here and choose the params that has a larger
3272
                // budget. The deadline when choosing the remote pending budget
3273
                // instead of the local one will always be earlier or equal to
3274
                // the local deadline because outgoing HTLCs are resolved on
3275
                // the local commitment first before they are removed from the
3276
                // remote one.
3277
                remotePendingReq.WhenSome(func(s sweepRequest) {
8✔
3278
                        if s.params.Budget <= req.params.Budget {
3✔
3279
                                return
1✔
3280
                        }
1✔
3281

3282
                        log.Infof("ChannelArbitrator(%v): replaced local "+
1✔
3283
                                "anchor(%v) sweep params with pending remote "+
1✔
3284
                                "anchor sweep params, \nold:[%v], \nnew:[%v]",
1✔
3285
                                c.cfg.ChanPoint, anchors.Local.CommitAnchor,
1✔
3286
                                req.params, s.params)
1✔
3287

1✔
3288
                        req.params = s.params
1✔
3289
                })
3290

3291
                // Save the request.
3292
                requests = append(requests, req)
6✔
3293
        }
3294

3295
        // Check the remote commitment and create an anchor sweeping request if
3296
        // needed.
3297
        htlcs, ok = c.activeHTLCs[RemoteHtlcSet]
22✔
3298
        if ok && anchors.Remote != nil {
28✔
3299
                req, err := c.createSweepRequest(
6✔
3300
                        anchors.Remote, htlcs, "remote", heightHint,
6✔
3301
                )
6✔
3302
                if err != nil {
6✔
3303
                        return nil, err
×
3304
                }
×
3305

3306
                requests = append(requests, req)
6✔
3307
        }
3308

3309
        return requests, nil
22✔
3310
}
3311

3312
// failIncomingDust resolves the incoming dust HTLCs because they do not have
3313
// an output on the commitment transaction and cannot be resolved onchain. We
3314
// mark them as failed here.
3315
func (c *ChannelArbitrator) failIncomingDust(
3316
        incomingDustHTLCs []channeldb.HTLC) error {
13✔
3317

13✔
3318
        for _, htlc := range incomingDustHTLCs {
17✔
3319
                if !htlc.Incoming || htlc.OutputIndex >= 0 {
4✔
3320
                        return fmt.Errorf("htlc with index %v is not incoming "+
×
3321
                                "dust", htlc.OutputIndex)
×
3322
                }
×
3323

3324
                key := models.CircuitKey{
4✔
3325
                        ChanID: c.cfg.ShortChanID,
4✔
3326
                        HtlcID: htlc.HtlcIndex,
4✔
3327
                }
4✔
3328

4✔
3329
                // Mark this dust htlc as final failed.
4✔
3330
                chainArbCfg := c.cfg.ChainArbitratorConfig
4✔
3331
                err := chainArbCfg.PutFinalHtlcOutcome(
4✔
3332
                        key.ChanID, key.HtlcID, false,
4✔
3333
                )
4✔
3334
                if err != nil {
4✔
3335
                        return err
×
3336
                }
×
3337

3338
                // Send notification.
3339
                chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
4✔
3340
                        key,
4✔
3341
                        channeldb.FinalHtlcInfo{
4✔
3342
                                Settled:  false,
4✔
3343
                                Offchain: false,
4✔
3344
                        },
4✔
3345
                )
4✔
3346
        }
3347

3348
        return nil
13✔
3349
}
3350

3351
// abandonForwards cancels back the incoming HTLCs for their corresponding
3352
// outgoing HTLCs. We use a set here to avoid sending duplicate failure messages
3353
// for the same HTLC. This also needs to be done for locally initiated outgoing
3354
// HTLCs they are special cased in the switch.
3355
func (c *ChannelArbitrator) abandonForwards(htlcs fn.Set[uint64]) error {
16✔
3356
        log.Debugf("ChannelArbitrator(%v): cancelling back %v incoming "+
16✔
3357
                "HTLC(s)", c.cfg.ChanPoint,
16✔
3358
                len(htlcs))
16✔
3359

16✔
3360
        msgsToSend := make([]ResolutionMsg, 0, len(htlcs))
16✔
3361
        failureMsg := &lnwire.FailPermanentChannelFailure{}
16✔
3362

16✔
3363
        for idx := range htlcs {
29✔
3364
                failMsg := ResolutionMsg{
13✔
3365
                        SourceChan: c.cfg.ShortChanID,
13✔
3366
                        HtlcIndex:  idx,
13✔
3367
                        Failure:    failureMsg,
13✔
3368
                }
13✔
3369

13✔
3370
                msgsToSend = append(msgsToSend, failMsg)
13✔
3371
        }
13✔
3372

3373
        // Send the msges to the switch, if there are any.
3374
        if len(msgsToSend) == 0 {
22✔
3375
                return nil
6✔
3376
        }
6✔
3377

3378
        log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
13✔
3379
                c.cfg.ChanPoint, lnutils.SpewLogClosure(msgsToSend))
13✔
3380

13✔
3381
        err := c.cfg.DeliverResolutionMsg(msgsToSend...)
13✔
3382
        if err != nil {
13✔
3383
                log.Errorf("Unable to send resolution msges to switch: %v", err)
×
3384
                return err
×
3385
        }
×
3386

3387
        return nil
13✔
3388
}
3389

3390
// handleCoopCloseEvent takes a coop close event from ChainEvents, marks the
3391
// channel as closed and advances the state.
3392
func (c *ChannelArbitrator) handleCoopCloseEvent(
3393
        closeInfo *CooperativeCloseInfo) error {
5✔
3394

5✔
3395
        log.Infof("ChannelArbitrator(%v) marking channel cooperatively closed "+
5✔
3396
                "at height %v", c.cfg.ChanPoint, closeInfo.CloseHeight)
5✔
3397

5✔
3398
        err := c.cfg.MarkChannelClosed(
5✔
3399
                closeInfo.ChannelCloseSummary,
5✔
3400
                channeldb.ChanStatusCoopBroadcasted,
5✔
3401
        )
5✔
3402
        if err != nil {
5✔
3403
                return fmt.Errorf("unable to mark channel closed: %w", err)
×
3404
        }
×
3405

3406
        // We'll now advance our state machine until it reaches a terminal
3407
        // state, and the channel is marked resolved.
3408
        _, _, err = c.advanceState(closeInfo.CloseHeight, coopCloseTrigger, nil)
5✔
3409
        if err != nil {
6✔
3410
                log.Errorf("Unable to advance state: %v", err)
1✔
3411
        }
1✔
3412

3413
        return nil
5✔
3414
}
3415

3416
// handleLocalForceCloseEvent takes a local force close event from ChainEvents,
3417
// saves the contract resolutions to disk, mark the channel as closed and
3418
// advance the state.
3419
func (c *ChannelArbitrator) handleLocalForceCloseEvent(
3420
        closeInfo *LocalUnilateralCloseInfo) error {
15✔
3421

15✔
3422
        closeTx := closeInfo.CloseTx
15✔
3423

15✔
3424
        resolutions, err := closeInfo.ContractResolutions.
15✔
3425
                UnwrapOrErr(
15✔
3426
                        fmt.Errorf("resolutions not found"),
15✔
3427
                )
15✔
3428
        if err != nil {
15✔
3429
                return fmt.Errorf("unable to get resolutions: %w", err)
×
3430
        }
×
3431

3432
        // We make sure that the htlc resolutions are present
3433
        // otherwise we would panic dereferencing the pointer.
3434
        //
3435
        // TODO(ziggie): Refactor ContractResolutions to use
3436
        // options.
3437
        if resolutions.HtlcResolutions == nil {
15✔
3438
                return fmt.Errorf("htlc resolutions is nil")
×
3439
        }
×
3440

3441
        log.Infof("ChannelArbitrator(%v): local force close tx=%v confirmed",
15✔
3442
                c.cfg.ChanPoint, closeTx.TxHash())
15✔
3443

15✔
3444
        contractRes := &ContractResolutions{
15✔
3445
                CommitHash:       closeTx.TxHash(),
15✔
3446
                CommitResolution: resolutions.CommitResolution,
15✔
3447
                HtlcResolutions:  *resolutions.HtlcResolutions,
15✔
3448
                AnchorResolution: resolutions.AnchorResolution,
15✔
3449
        }
15✔
3450

15✔
3451
        // When processing a unilateral close event, we'll transition to the
15✔
3452
        // ContractClosed state. We'll log out the set of resolutions such that
15✔
3453
        // they are available to fetch in that state, we'll also write the
15✔
3454
        // commit set so we can reconstruct our chain actions on restart.
15✔
3455
        err = c.log.LogContractResolutions(contractRes)
15✔
3456
        if err != nil {
15✔
3457
                return fmt.Errorf("unable to write resolutions: %w", err)
×
3458
        }
×
3459

3460
        err = c.log.InsertConfirmedCommitSet(&closeInfo.CommitSet)
15✔
3461
        if err != nil {
15✔
3462
                return fmt.Errorf("unable to write commit set: %w", err)
×
3463
        }
×
3464

3465
        // After the set of resolutions are successfully logged, we can safely
3466
        // close the channel. After this succeeds we won't be getting chain
3467
        // events anymore, so we must make sure we can recover on restart after
3468
        // it is marked closed. If the next state transition fails, we'll start
3469
        // up in the prior state again, and we won't be longer getting chain
3470
        // events. In this case we must manually re-trigger the state
3471
        // transition into StateContractClosed based on the close status of the
3472
        // channel.
3473
        err = c.cfg.MarkChannelClosed(
15✔
3474
                closeInfo.ChannelCloseSummary,
15✔
3475
                channeldb.ChanStatusLocalCloseInitiator,
15✔
3476
        )
15✔
3477
        if err != nil {
15✔
3478
                return fmt.Errorf("unable to mark channel closed: %w", err)
×
3479
        }
×
3480

3481
        // We'll now advance our state machine until it reaches a terminal
3482
        // state.
3483
        _, _, err = c.advanceState(
15✔
3484
                uint32(closeInfo.SpendingHeight),
15✔
3485
                localCloseTrigger, &closeInfo.CommitSet,
15✔
3486
        )
15✔
3487
        if err != nil {
16✔
3488
                log.Errorf("Unable to advance state: %v", err)
1✔
3489
        }
1✔
3490

3491
        return nil
15✔
3492
}
3493

3494
// handleRemoteForceCloseEvent takes a remote force close event from
3495
// ChainEvents, saves the contract resolutions to disk, mark the channel as
3496
// closed and advance the state.
3497
func (c *ChannelArbitrator) handleRemoteForceCloseEvent(
3498
        closeInfo *RemoteUnilateralCloseInfo) error {
11✔
3499

11✔
3500
        log.Infof("ChannelArbitrator(%v): remote party has force closed "+
11✔
3501
                "channel at height %v", c.cfg.ChanPoint,
11✔
3502
                closeInfo.SpendingHeight)
11✔
3503

11✔
3504
        // If we don't have a self output, and there are no active HTLC's, then
11✔
3505
        // we can immediately mark the contract as fully resolved and exit.
11✔
3506
        contractRes := &ContractResolutions{
11✔
3507
                CommitHash:       *closeInfo.SpenderTxHash,
11✔
3508
                CommitResolution: closeInfo.CommitResolution,
11✔
3509
                HtlcResolutions:  *closeInfo.HtlcResolutions,
11✔
3510
                AnchorResolution: closeInfo.AnchorResolution,
11✔
3511
        }
11✔
3512

11✔
3513
        // When processing a unilateral close event, we'll transition to the
11✔
3514
        // ContractClosed state. We'll log out the set of resolutions such that
11✔
3515
        // they are available to fetch in that state, we'll also write the
11✔
3516
        // commit set so we can reconstruct our chain actions on restart.
11✔
3517
        err := c.log.LogContractResolutions(contractRes)
11✔
3518
        if err != nil {
12✔
3519
                return fmt.Errorf("unable to write resolutions: %w", err)
1✔
3520
        }
1✔
3521

3522
        err = c.log.InsertConfirmedCommitSet(&closeInfo.CommitSet)
10✔
3523
        if err != nil {
10✔
3524
                return fmt.Errorf("unable to write commit set: %w", err)
×
3525
        }
×
3526

3527
        // After the set of resolutions are successfully logged, we can safely
3528
        // close the channel. After this succeeds we won't be getting chain
3529
        // events anymore, so we must make sure we can recover on restart after
3530
        // it is marked closed. If the next state transition fails, we'll start
3531
        // up in the prior state again, and we won't be longer getting chain
3532
        // events. In this case we must manually re-trigger the state
3533
        // transition into StateContractClosed based on the close status of the
3534
        // channel.
3535
        closeSummary := &closeInfo.ChannelCloseSummary
10✔
3536
        err = c.cfg.MarkChannelClosed(
10✔
3537
                closeSummary,
10✔
3538
                channeldb.ChanStatusRemoteCloseInitiator,
10✔
3539
        )
10✔
3540
        if err != nil {
11✔
3541
                return fmt.Errorf("unable to mark channel closed: %w", err)
1✔
3542
        }
1✔
3543

3544
        // We'll now advance our state machine until it reaches a terminal
3545
        // state.
3546
        _, _, err = c.advanceState(
9✔
3547
                uint32(closeInfo.SpendingHeight),
9✔
3548
                remoteCloseTrigger, &closeInfo.CommitSet,
9✔
3549
        )
9✔
3550
        if err != nil {
11✔
3551
                log.Errorf("Unable to advance state: %v", err)
2✔
3552
        }
2✔
3553

3554
        return nil
9✔
3555
}
3556

3557
// handleContractBreach takes a breach close event from ChainEvents, saves the
3558
// contract resolutions to disk, mark the channel as closed and advance the
3559
// state.
3560
func (c *ChannelArbitrator) handleContractBreach(
3561
        breachInfo *BreachCloseInfo) error {
4✔
3562

4✔
3563
        closeSummary := &breachInfo.CloseSummary
4✔
3564

4✔
3565
        log.Infof("ChannelArbitrator(%v): remote party has breached channel "+
4✔
3566
                "at height %v!", c.cfg.ChanPoint, closeSummary.CloseHeight)
4✔
3567

4✔
3568
        // In the breach case, we'll only have anchor and breach resolutions.
4✔
3569
        contractRes := &ContractResolutions{
4✔
3570
                CommitHash:       breachInfo.CommitHash,
4✔
3571
                BreachResolution: breachInfo.BreachResolution,
4✔
3572
                AnchorResolution: breachInfo.AnchorResolution,
4✔
3573
        }
4✔
3574

4✔
3575
        // We'll transition to the ContractClosed state and log the set of
4✔
3576
        // resolutions such that they can be turned into resolvers later on.
4✔
3577
        // We'll also insert the CommitSet of the latest set of commitments.
4✔
3578
        err := c.log.LogContractResolutions(contractRes)
4✔
3579
        if err != nil {
4✔
3580
                return fmt.Errorf("unable to write resolutions: %w", err)
×
3581
        }
×
3582

3583
        err = c.log.InsertConfirmedCommitSet(&breachInfo.CommitSet)
4✔
3584
        if err != nil {
4✔
3585
                return fmt.Errorf("unable to write commit set: %w", err)
×
3586
        }
×
3587

3588
        // The channel is finally marked pending closed here as the
3589
        // BreachArbitrator and channel arbitrator have persisted the relevant
3590
        // states.
3591
        err = c.cfg.MarkChannelClosed(
4✔
3592
                closeSummary, channeldb.ChanStatusRemoteCloseInitiator,
4✔
3593
        )
4✔
3594
        if err != nil {
4✔
3595
                return fmt.Errorf("unable to mark channel closed: %w", err)
×
3596
        }
×
3597

3598
        log.Infof("Breached channel=%v marked pending-closed",
4✔
3599
                breachInfo.BreachResolution.FundingOutPoint)
4✔
3600

4✔
3601
        // We'll advance our state machine until it reaches a terminal state.
4✔
3602
        _, _, err = c.advanceState(
4✔
3603
                closeSummary.CloseHeight, breachCloseTrigger,
4✔
3604
                &breachInfo.CommitSet,
4✔
3605
        )
4✔
3606
        if err != nil {
4✔
3607
                log.Errorf("Unable to advance state: %v", err)
×
3608
        }
×
3609

3610
        return nil
4✔
3611
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc