• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12312390362

13 Dec 2024 08:44AM UTC coverage: 57.458% (+8.5%) from 48.92%
12312390362

Pull #9343

github

ellemouton
fn: rework the ContextGuard and add tests

In this commit, the ContextGuard struct is re-worked such that the
context that its new main WithCtx method provides is cancelled in sync
with a parent context being cancelled or with it's quit channel being
cancelled. Tests are added to assert the behaviour. In order for the
close of the quit channel to be consistent with the cancelling of the
derived context, the quit channel _must_ be contained internal to the
ContextGuard so that callers are only able to close the channel via the
exposed Quit method which will then take care to first cancel any
derived context that depend on the quit channel before returning.
Pull Request #9343: fn: expand the ContextGuard and add tests

101853 of 177264 relevant lines covered (57.46%)

24972.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.21
/contractcourt/channel_arbitrator.go
1
package contractcourt
2

3
import (
4
        "bytes"
5
        "context"
6
        "errors"
7
        "fmt"
8
        "math"
9
        "sync"
10
        "sync/atomic"
11
        "time"
12

13
        "github.com/btcsuite/btcd/btcutil"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/txscript"
16
        "github.com/btcsuite/btcd/wire"
17
        "github.com/lightningnetwork/lnd/channeldb"
18
        "github.com/lightningnetwork/lnd/fn/v2"
19
        "github.com/lightningnetwork/lnd/graph/db/models"
20
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
21
        "github.com/lightningnetwork/lnd/input"
22
        "github.com/lightningnetwork/lnd/invoices"
23
        "github.com/lightningnetwork/lnd/kvdb"
24
        "github.com/lightningnetwork/lnd/labels"
25
        "github.com/lightningnetwork/lnd/lntypes"
26
        "github.com/lightningnetwork/lnd/lnutils"
27
        "github.com/lightningnetwork/lnd/lnwallet"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/sweep"
30
)
31

32
var (
33
        // errAlreadyForceClosed is an error returned when we attempt to force
34
        // close a channel that's already in the process of doing so.
35
        errAlreadyForceClosed = errors.New("channel is already in the " +
36
                "process of being force closed")
37
)
38

39
const (
40
        // arbitratorBlockBufferSize is the size of the buffer we give to each
41
        // channel arbitrator.
42
        arbitratorBlockBufferSize = 20
43

44
        // AnchorOutputValue is the output value for the anchor output of an
45
        // anchor channel.
46
        // See BOLT 03 for more details:
47
        // https://github.com/lightning/bolts/blob/master/03-transactions.md
48
        AnchorOutputValue = btcutil.Amount(330)
49
)
50

51
// WitnessSubscription represents an intent to be notified once new witnesses
52
// are discovered by various active contract resolvers. A contract resolver may
53
// use this to be notified of when it can satisfy an incoming contract after we
54
// discover the witness for an outgoing contract.
55
type WitnessSubscription struct {
56
        // WitnessUpdates is a channel that newly discovered witnesses will be
57
        // sent over.
58
        //
59
        // TODO(roasbeef): couple with WitnessType?
60
        WitnessUpdates <-chan lntypes.Preimage
61

62
        // CancelSubscription is a function closure that should be used by a
63
        // client to cancel the subscription once they are no longer interested
64
        // in receiving new updates.
65
        CancelSubscription func()
66
}
67

68
// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
69
// this interface to lookup witnesses (preimages typically) of contracts
70
// they're trying to resolve, add new preimages they resolve, and finally
71
// receive new updates each new time a preimage is discovered.
72
//
73
// TODO(roasbeef): need to delete the pre-images once we've used them
74
// and have been sufficiently confirmed?
75
type WitnessBeacon interface {
76
        // SubscribeUpdates returns a channel that will be sent upon *each* time
77
        // a new preimage is discovered.
78
        SubscribeUpdates(chanID lnwire.ShortChannelID, htlc *channeldb.HTLC,
79
                payload *hop.Payload,
80
                nextHopOnionBlob []byte) (*WitnessSubscription, error)
81

82
        // LookupPreImage attempts to lookup a preimage in the global cache.
83
        // True is returned for the second argument if the preimage is found.
84
        LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool)
85

86
        // AddPreimages adds a batch of newly discovered preimages to the global
87
        // cache, and also signals any subscribers of the newly discovered
88
        // witness.
89
        AddPreimages(preimages ...lntypes.Preimage) error
90
}
91

92
// ArbChannel is an abstraction that allows the channel arbitrator to interact
93
// with an open channel.
94
type ArbChannel interface {
95
        // ForceCloseChan should force close the contract that this attendant
96
        // is watching over. We'll use this when we decide that we need to go
97
        // to chain. It should in addition tell the switch to remove the
98
        // corresponding link, such that we won't accept any new updates. The
99
        // returned summary contains all items needed to eventually resolve all
100
        // outputs on chain.
101
        ForceCloseChan() (*wire.MsgTx, error)
102

103
        // NewAnchorResolutions returns the anchor resolutions for currently
104
        // valid commitment transactions.
105
        NewAnchorResolutions() (*lnwallet.AnchorResolutions, error)
106
}
107

108
// ChannelArbitratorConfig contains all the functionality that the
109
// ChannelArbitrator needs in order to properly arbitrate any contract dispute
110
// on chain.
111
type ChannelArbitratorConfig struct {
112
        // ChanPoint is the channel point that uniquely identifies this
113
        // channel.
114
        ChanPoint wire.OutPoint
115

116
        // Channel is the full channel data structure. For legacy channels, this
117
        // field may not always be set after a restart.
118
        Channel ArbChannel
119

120
        // ShortChanID describes the exact location of the channel within the
121
        // chain. We'll use this to address any messages that we need to send
122
        // to the switch during contract resolution.
123
        ShortChanID lnwire.ShortChannelID
124

125
        // ChainEvents is an active subscription to the chain watcher for this
126
        // channel to be notified of any on-chain activity related to this
127
        // channel.
128
        ChainEvents *ChainEventSubscription
129

130
        // MarkCommitmentBroadcasted should mark the channel as the commitment
131
        // being broadcast, and we are waiting for the commitment to confirm.
132
        MarkCommitmentBroadcasted func(*wire.MsgTx, lntypes.ChannelParty) error
133

134
        // MarkChannelClosed marks the channel closed in the database, with the
135
        // passed close summary. After this method successfully returns we can
136
        // no longer expect to receive chain events for this channel, and must
137
        // be able to recover from a failure without getting the close event
138
        // again. It takes an optional channel status which will update the
139
        // channel status in the record that we keep of historical channels.
140
        MarkChannelClosed func(*channeldb.ChannelCloseSummary,
141
                ...channeldb.ChannelStatus) error
142

143
        // IsPendingClose is a boolean indicating whether the channel is marked
144
        // as pending close in the database.
145
        IsPendingClose bool
146

147
        // ClosingHeight is the height at which the channel was closed. Note
148
        // that this value is only valid if IsPendingClose is true.
149
        ClosingHeight uint32
150

151
        // CloseType is the type of the close event in case IsPendingClose is
152
        // true. Otherwise this value is unset.
153
        CloseType channeldb.ClosureType
154

155
        // MarkChannelResolved is a function closure that serves to mark a
156
        // channel as "fully resolved". A channel itself can be considered
157
        // fully resolved once all active contracts have individually been
158
        // fully resolved.
159
        //
160
        // TODO(roasbeef): need RPC's to combine for pendingchannels RPC
161
        MarkChannelResolved func() error
162

163
        // PutResolverReport records a resolver report for the channel. If the
164
        // transaction provided is nil, the function should write the report
165
        // in a new transaction.
166
        PutResolverReport func(tx kvdb.RwTx,
167
                report *channeldb.ResolverReport) error
168

169
        // FetchHistoricalChannel retrieves the historical state of a channel.
170
        // This is mostly used to supplement the ContractResolvers with
171
        // additional information required for proper contract resolution.
172
        FetchHistoricalChannel func() (*channeldb.OpenChannel, error)
173

174
        // FindOutgoingHTLCDeadline returns the deadline in absolute block
175
        // height for the specified outgoing HTLC. For an outgoing HTLC, its
176
        // deadline is defined by the timeout height of its corresponding
177
        // incoming HTLC - this is the expiry height the that remote peer can
178
        // spend his/her outgoing HTLC via the timeout path.
179
        FindOutgoingHTLCDeadline func(htlc channeldb.HTLC) fn.Option[int32]
180

181
        ChainArbitratorConfig
182
}
183

184
// ReportOutputType describes the type of output that is being reported
185
// on.
186
type ReportOutputType uint8
187

188
const (
189
        // ReportOutputIncomingHtlc is an incoming hash time locked contract on
190
        // the commitment tx.
191
        ReportOutputIncomingHtlc ReportOutputType = iota
192

193
        // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on
194
        // the commitment tx.
195
        ReportOutputOutgoingHtlc
196

197
        // ReportOutputUnencumbered is an uncontested output on the commitment
198
        // transaction paying to us directly.
199
        ReportOutputUnencumbered
200

201
        // ReportOutputAnchor is an anchor output on the commitment tx.
202
        ReportOutputAnchor
203
)
204

205
// ContractReport provides a summary of a commitment tx output.
206
type ContractReport struct {
207
        // Outpoint is the final output that will be swept back to the wallet.
208
        Outpoint wire.OutPoint
209

210
        // Type indicates the type of the reported output.
211
        Type ReportOutputType
212

213
        // Amount is the final value that will be swept in back to the wallet.
214
        Amount btcutil.Amount
215

216
        // MaturityHeight is the absolute block height that this output will
217
        // mature at.
218
        MaturityHeight uint32
219

220
        // Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
221
        // the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
222
        // to its expiry height, while a stage 2 htlc's maturity height will be
223
        // set to its confirmation height plus the maturity requirement.
224
        Stage uint32
225

226
        // LimboBalance is the total number of frozen coins within this
227
        // contract.
228
        LimboBalance btcutil.Amount
229

230
        // RecoveredBalance is the total value that has been successfully swept
231
        // back to the user's wallet.
232
        RecoveredBalance btcutil.Amount
233
}
234

235
// resolverReport creates a resolve report using some of the information in the
236
// contract report.
237
func (c *ContractReport) resolverReport(spendTx *chainhash.Hash,
238
        resolverType channeldb.ResolverType,
239
        outcome channeldb.ResolverOutcome) *channeldb.ResolverReport {
10✔
240

10✔
241
        return &channeldb.ResolverReport{
10✔
242
                OutPoint:        c.Outpoint,
10✔
243
                Amount:          c.Amount,
10✔
244
                ResolverType:    resolverType,
10✔
245
                ResolverOutcome: outcome,
10✔
246
                SpendTxID:       spendTx,
10✔
247
        }
10✔
248
}
10✔
249

250
// htlcSet represents the set of active HTLCs on a given commitment
251
// transaction.
252
type htlcSet struct {
253
        // incomingHTLCs is a map of all incoming HTLCs on the target
254
        // commitment transaction. We may potentially go onchain to claim the
255
        // funds sent to us within this set.
256
        incomingHTLCs map[uint64]channeldb.HTLC
257

258
        // outgoingHTLCs is a map of all outgoing HTLCs on the target
259
        // commitment transaction. We may potentially go onchain to reclaim the
260
        // funds that are currently in limbo.
261
        outgoingHTLCs map[uint64]channeldb.HTLC
262
}
263

264
// newHtlcSet constructs a new HTLC set from a slice of HTLC's.
265
func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
46✔
266
        outHTLCs := make(map[uint64]channeldb.HTLC)
46✔
267
        inHTLCs := make(map[uint64]channeldb.HTLC)
46✔
268
        for _, htlc := range htlcs {
77✔
269
                if htlc.Incoming {
37✔
270
                        inHTLCs[htlc.HtlcIndex] = htlc
6✔
271
                        continue
6✔
272
                }
273

274
                outHTLCs[htlc.HtlcIndex] = htlc
25✔
275
        }
276

277
        return htlcSet{
46✔
278
                incomingHTLCs: inHTLCs,
46✔
279
                outgoingHTLCs: outHTLCs,
46✔
280
        }
46✔
281
}
282

283
// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
284
// commitment transaction.
285
type HtlcSetKey struct {
286
        // IsRemote denotes if the HTLCs are on the remote commitment
287
        // transaction.
288
        IsRemote bool
289

290
        // IsPending denotes if the commitment transaction that HTLCS are on
291
        // are pending (the higher of two unrevoked commitments).
292
        IsPending bool
293
}
294

295
var (
296
        // LocalHtlcSet is the HtlcSetKey used for local commitments.
297
        LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
298

299
        // RemoteHtlcSet is the HtlcSetKey used for remote commitments.
300
        RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
301

302
        // RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
303
        // commitment transactions.
304
        RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
305
)
306

307
// String returns a human readable string describing the target HtlcSetKey.
308
func (h HtlcSetKey) String() string {
8✔
309
        switch h {
8✔
310
        case LocalHtlcSet:
4✔
311
                return "LocalHtlcSet"
4✔
312
        case RemoteHtlcSet:
2✔
313
                return "RemoteHtlcSet"
2✔
314
        case RemotePendingHtlcSet:
2✔
315
                return "RemotePendingHtlcSet"
2✔
316
        default:
×
317
                return "unknown HtlcSetKey"
×
318
        }
319
}
320

321
// ChannelArbitrator is the on-chain arbitrator for a particular channel. The
322
// struct will keep in sync with the current set of HTLCs on the commitment
323
// transaction. The job of the attendant is to go on-chain to either settle or
324
// cancel an HTLC as necessary iff: an HTLC times out, or we known the
325
// pre-image to an HTLC, but it wasn't settled by the link off-chain. The
326
// ChannelArbitrator will factor in an expected confirmation delta when
327
// broadcasting to ensure that we avoid any possibility of race conditions, and
328
// sweep the output(s) without contest.
329
type ChannelArbitrator struct {
330
        started int32 // To be used atomically.
331
        stopped int32 // To be used atomically.
332

333
        // startTimestamp is the time when this ChannelArbitrator was started.
334
        startTimestamp time.Time
335

336
        // log is a persistent log that the attendant will use to checkpoint
337
        // its next action, and the state of any unresolved contracts.
338
        log ArbitratorLog
339

340
        // activeHTLCs is the set of active incoming/outgoing HTLC's on all
341
        // currently valid commitment transactions.
342
        activeHTLCs map[HtlcSetKey]htlcSet
343

344
        // unmergedSet is used to update the activeHTLCs map in two callsites:
345
        // checkLocalChainActions and sweepAnchors. It contains the latest
346
        // updates from the link. It is not deleted from, its entries may be
347
        // replaced on subsequent calls to notifyContractUpdate.
348
        unmergedSet map[HtlcSetKey]htlcSet
349
        unmergedMtx sync.RWMutex
350

351
        // cfg contains all the functionality that the ChannelArbitrator requires
352
        // to do its duty.
353
        cfg ChannelArbitratorConfig
354

355
        // blocks is a channel that the arbitrator will receive new blocks on.
356
        // This channel should be buffered by so that it does not block the
357
        // sender.
358
        blocks chan int32
359

360
        // signalUpdates is a channel that any new live signals for the channel
361
        // we're watching over will be sent.
362
        signalUpdates chan *signalUpdateMsg
363

364
        // activeResolvers is a slice of any active resolvers. This is used to
365
        // be able to signal them for shutdown in the case that we shutdown.
366
        activeResolvers []ContractResolver
367

368
        // activeResolversLock prevents simultaneous read and write to the
369
        // resolvers slice.
370
        activeResolversLock sync.RWMutex
371

372
        // resolutionSignal is a channel that will be sent upon by contract
373
        // resolvers once their contract has been fully resolved. With each
374
        // send, we'll check to see if the contract is fully resolved.
375
        resolutionSignal chan struct{}
376

377
        // forceCloseReqs is a channel that requests to forcibly close the
378
        // contract will be sent over.
379
        forceCloseReqs chan *forceCloseReq
380

381
        // state is the current state of the arbitrator. This state is examined
382
        // upon start up to decide which actions to take.
383
        state ArbitratorState
384

385
        wg   sync.WaitGroup
386
        quit chan struct{}
387
}
388

389
// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
390
// the passed config struct.
391
func NewChannelArbitrator(cfg ChannelArbitratorConfig,
392
        htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
51✔
393

51✔
394
        // Create a new map for unmerged HTLC's as we will overwrite the values
51✔
395
        // and want to avoid modifying activeHTLCs directly. This soft copying
51✔
396
        // is done to ensure that activeHTLCs isn't reset as an empty map later
51✔
397
        // on.
51✔
398
        unmerged := make(map[HtlcSetKey]htlcSet)
51✔
399
        unmerged[LocalHtlcSet] = htlcSets[LocalHtlcSet]
51✔
400
        unmerged[RemoteHtlcSet] = htlcSets[RemoteHtlcSet]
51✔
401

51✔
402
        // If the pending set exists, write that as well.
51✔
403
        if _, ok := htlcSets[RemotePendingHtlcSet]; ok {
51✔
404
                unmerged[RemotePendingHtlcSet] = htlcSets[RemotePendingHtlcSet]
×
405
        }
×
406

407
        return &ChannelArbitrator{
51✔
408
                log:              log,
51✔
409
                blocks:           make(chan int32, arbitratorBlockBufferSize),
51✔
410
                signalUpdates:    make(chan *signalUpdateMsg),
51✔
411
                resolutionSignal: make(chan struct{}),
51✔
412
                forceCloseReqs:   make(chan *forceCloseReq),
51✔
413
                activeHTLCs:      htlcSets,
51✔
414
                unmergedSet:      unmerged,
51✔
415
                cfg:              cfg,
51✔
416
                quit:             make(chan struct{}),
51✔
417
        }
51✔
418
}
419

420
// chanArbStartState contains the information from disk that we need to start
421
// up a channel arbitrator.
422
type chanArbStartState struct {
423
        currentState ArbitratorState
424
        commitSet    *CommitSet
425
}
426

427
// getStartState retrieves the information from disk that our channel arbitrator
428
// requires to start.
429
func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
430
        error) {
48✔
431

48✔
432
        // First, we'll read our last state from disk, so our internal state
48✔
433
        // machine can act accordingly.
48✔
434
        state, err := c.log.CurrentState(tx)
48✔
435
        if err != nil {
48✔
436
                return nil, err
×
437
        }
×
438

439
        // Next we'll fetch our confirmed commitment set. This will only exist
440
        // if the channel has been closed out on chain for modern nodes. For
441
        // older nodes, this won't be found at all, and will rely on the
442
        // existing written chain actions. Additionally, if this channel hasn't
443
        // logged any actions in the log, then this field won't be present.
444
        commitSet, err := c.log.FetchConfirmedCommitSet(tx)
48✔
445
        if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
48✔
446
                return nil, err
×
447
        }
×
448

449
        return &chanArbStartState{
48✔
450
                currentState: state,
48✔
451
                commitSet:    commitSet,
48✔
452
        }, nil
48✔
453
}
454

455
// Start starts all the goroutines that the ChannelArbitrator needs to operate.
456
// If takes a start state, which will be looked up on disk if it is not
457
// provided.
458
func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
48✔
459
        if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
48✔
460
                return nil
×
461
        }
×
462
        c.startTimestamp = c.cfg.Clock.Now()
48✔
463

48✔
464
        // If the state passed in is nil, we look it up now.
48✔
465
        if state == nil {
85✔
466
                var err error
37✔
467
                state, err = c.getStartState(nil)
37✔
468
                if err != nil {
37✔
469
                        return err
×
470
                }
×
471
        }
472

473
        log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
48✔
474
                c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs),
48✔
475
                state.currentState)
48✔
476

48✔
477
        // Set our state from our starting state.
48✔
478
        c.state = state.currentState
48✔
479

48✔
480
        _, bestHeight, err := c.cfg.ChainIO.GetBestBlock()
48✔
481
        if err != nil {
48✔
482
                return err
×
483
        }
×
484

485
        c.wg.Add(1)
48✔
486
        go c.channelAttendant(bestHeight, state.commitSet)
48✔
487

48✔
488
        return nil
48✔
489
}
490

491
// progressStateMachineAfterRestart attempts to progress the state machine
492
// after a restart. This makes sure that if the state transition failed, we
493
// will try to progress the state machine again. Moreover it will relaunch
494
// resolvers if the channel is still in the pending close state and has not
495
// been fully resolved yet.
496
func (c *ChannelArbitrator) progressStateMachineAfterRestart(bestHeight int32,
497
        commitSet *CommitSet) error {
48✔
498

48✔
499
        // If the channel has been marked pending close in the database, and we
48✔
500
        // haven't transitioned the state machine to StateContractClosed (or a
48✔
501
        // succeeding state), then a state transition most likely failed. We'll
48✔
502
        // try to recover from this by manually advancing the state by setting
48✔
503
        // the corresponding close trigger.
48✔
504
        trigger := chainTrigger
48✔
505
        triggerHeight := uint32(bestHeight)
48✔
506
        if c.cfg.IsPendingClose {
53✔
507
                switch c.state {
5✔
508
                case StateDefault:
4✔
509
                        fallthrough
4✔
510
                case StateBroadcastCommit:
5✔
511
                        fallthrough
5✔
512
                case StateCommitmentBroadcasted:
5✔
513
                        switch c.cfg.CloseType {
5✔
514

515
                        case channeldb.CooperativeClose:
1✔
516
                                trigger = coopCloseTrigger
1✔
517

518
                        case channeldb.BreachClose:
1✔
519
                                trigger = breachCloseTrigger
1✔
520

521
                        case channeldb.LocalForceClose:
1✔
522
                                trigger = localCloseTrigger
1✔
523

524
                        case channeldb.RemoteForceClose:
2✔
525
                                trigger = remoteCloseTrigger
2✔
526
                        }
527

528
                        log.Warnf("ChannelArbitrator(%v): detected stalled "+
5✔
529
                                "state=%v for closed channel",
5✔
530
                                c.cfg.ChanPoint, c.state)
5✔
531
                }
532

533
                triggerHeight = c.cfg.ClosingHeight
5✔
534
        }
535

536
        log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
48✔
537
                "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
48✔
538
                triggerHeight)
48✔
539

48✔
540
        // We'll now attempt to advance our state forward based on the current
48✔
541
        // on-chain state, and our set of active contracts.
48✔
542
        startingState := c.state
48✔
543
        nextState, _, err := c.advanceState(
48✔
544
                triggerHeight, trigger, commitSet,
48✔
545
        )
48✔
546
        if err != nil {
50✔
547
                switch err {
2✔
548

549
                // If we detect that we tried to fetch resolutions, but failed,
550
                // this channel was marked closed in the database before
551
                // resolutions successfully written. In this case there is not
552
                // much we can do, so we don't return the error.
553
                case errScopeBucketNoExist:
×
554
                        fallthrough
×
555
                case errNoResolutions:
1✔
556
                        log.Warnf("ChannelArbitrator(%v): detected closed"+
1✔
557
                                "channel with no contract resolutions written.",
1✔
558
                                c.cfg.ChanPoint)
1✔
559

560
                default:
1✔
561
                        return err
1✔
562
                }
563
        }
564

565
        // If we start and ended at the awaiting full resolution state, then
566
        // we'll relaunch our set of unresolved contracts.
567
        if startingState == StateWaitingFullResolution &&
47✔
568
                nextState == StateWaitingFullResolution {
48✔
569

1✔
570
                // In order to relaunch the resolvers, we'll need to fetch the
1✔
571
                // set of HTLCs that were present in the commitment transaction
1✔
572
                // at the time it was confirmed. commitSet.ConfCommitKey can't
1✔
573
                // be nil at this point since we're in
1✔
574
                // StateWaitingFullResolution. We can only be in
1✔
575
                // StateWaitingFullResolution after we've transitioned from
1✔
576
                // StateContractClosed which can only be triggered by the local
1✔
577
                // or remote close trigger. This trigger is only fired when we
1✔
578
                // receive a chain event from the chain watcher that the
1✔
579
                // commitment has been confirmed on chain, and before we
1✔
580
                // advance our state step, we call InsertConfirmedCommitSet.
1✔
581
                err := c.relaunchResolvers(commitSet, triggerHeight)
1✔
582
                if err != nil {
1✔
583
                        return err
×
584
                }
×
585
        }
586

587
        return nil
47✔
588
}
589

590
// maybeAugmentTaprootResolvers will update the contract resolution information
591
// for taproot channels. This ensures that all the resolvers have the latest
592
// resolution, which may also include data such as the control block and tap
593
// tweaks.
594
func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
595
        resolver ContractResolver,
596
        contractResolutions *ContractResolutions) {
1✔
597

1✔
598
        if !chanType.IsTaproot() {
2✔
599
                return
1✔
600
        }
1✔
601

602
        // The on disk resolutions contains all the ctrl block
603
        // information, so we'll set that now for the relevant
604
        // resolvers.
605
        switch r := resolver.(type) {
×
606
        case *commitSweepResolver:
×
607
                if contractResolutions.CommitResolution != nil {
×
608
                        //nolint:ll
×
609
                        r.commitResolution = *contractResolutions.CommitResolution
×
610
                }
×
611
        case *htlcOutgoingContestResolver:
×
612
                //nolint:ll
×
613
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
×
614
                for _, htlcRes := range htlcResolutions {
×
615
                        htlcRes := htlcRes
×
616

×
617
                        if r.htlcResolution.ClaimOutpoint ==
×
618
                                htlcRes.ClaimOutpoint {
×
619

×
620
                                r.htlcResolution = htlcRes
×
621
                        }
×
622
                }
623

624
        case *htlcTimeoutResolver:
×
625
                //nolint:ll
×
626
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
×
627
                for _, htlcRes := range htlcResolutions {
×
628
                        htlcRes := htlcRes
×
629

×
630
                        if r.htlcResolution.ClaimOutpoint ==
×
631
                                htlcRes.ClaimOutpoint {
×
632

×
633
                                r.htlcResolution = htlcRes
×
634
                        }
×
635
                }
636

637
        case *htlcIncomingContestResolver:
×
638
                //nolint:ll
×
639
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
×
640
                for _, htlcRes := range htlcResolutions {
×
641
                        htlcRes := htlcRes
×
642

×
643
                        if r.htlcResolution.ClaimOutpoint ==
×
644
                                htlcRes.ClaimOutpoint {
×
645

×
646
                                r.htlcResolution = htlcRes
×
647
                        }
×
648
                }
649
        case *htlcSuccessResolver:
×
650
                //nolint:ll
×
651
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
×
652
                for _, htlcRes := range htlcResolutions {
×
653
                        htlcRes := htlcRes
×
654

×
655
                        if r.htlcResolution.ClaimOutpoint ==
×
656
                                htlcRes.ClaimOutpoint {
×
657

×
658
                                r.htlcResolution = htlcRes
×
659
                        }
×
660
                }
661
        }
662
}
663

664
// relauchResolvers relaunches the set of resolvers for unresolved contracts in
665
// order to provide them with information that's not immediately available upon
666
// starting the ChannelArbitrator. This information should ideally be stored in
667
// the database, so this only serves as a intermediate work-around to prevent a
668
// migration.
669
func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
670
        heightHint uint32) error {
1✔
671

1✔
672
        // We'll now query our log to see if there are any active unresolved
1✔
673
        // contracts. If this is the case, then we'll relaunch all contract
1✔
674
        // resolvers.
1✔
675
        unresolvedContracts, err := c.log.FetchUnresolvedContracts()
1✔
676
        if err != nil {
1✔
677
                return err
×
678
        }
×
679

680
        // Retrieve the commitment tx hash from the log.
681
        contractResolutions, err := c.log.FetchContractResolutions()
1✔
682
        if err != nil {
1✔
683
                log.Errorf("unable to fetch contract resolutions: %v",
×
684
                        err)
×
685
                return err
×
686
        }
×
687
        commitHash := contractResolutions.CommitHash
1✔
688

1✔
689
        // In prior versions of lnd, the information needed to supplement the
1✔
690
        // resolvers (in most cases, the full amount of the HTLC) was found in
1✔
691
        // the chain action map, which is now deprecated.  As a result, if the
1✔
692
        // commitSet is nil (an older node with unresolved HTLCs at time of
1✔
693
        // upgrade), then we'll use the chain action information in place. The
1✔
694
        // chain actions may exclude some information, but we cannot recover it
1✔
695
        // for these older nodes at the moment.
1✔
696
        var confirmedHTLCs []channeldb.HTLC
1✔
697
        if commitSet != nil && commitSet.ConfCommitKey.IsSome() {
2✔
698
                confCommitKey, err := commitSet.ConfCommitKey.UnwrapOrErr(
1✔
699
                        fmt.Errorf("no commitKey available"),
1✔
700
                )
1✔
701
                if err != nil {
1✔
702
                        return err
×
703
                }
×
704
                confirmedHTLCs = commitSet.HtlcSets[confCommitKey]
1✔
705
        } else {
×
706
                chainActions, err := c.log.FetchChainActions()
×
707
                if err != nil {
×
708
                        log.Errorf("unable to fetch chain actions: %v", err)
×
709
                        return err
×
710
                }
×
711
                for _, htlcs := range chainActions {
×
712
                        confirmedHTLCs = append(confirmedHTLCs, htlcs...)
×
713
                }
×
714
        }
715

716
        // Reconstruct the htlc outpoints and data from the chain action log.
717
        // The purpose of the constructed htlc map is to supplement to
718
        // resolvers restored from database with extra data. Ideally this data
719
        // is stored as part of the resolver in the log. This is a workaround
720
        // to prevent a db migration. We use all available htlc sets here in
721
        // order to ensure we have complete coverage.
722
        htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
1✔
723
        for _, htlc := range confirmedHTLCs {
4✔
724
                htlc := htlc
3✔
725
                outpoint := wire.OutPoint{
3✔
726
                        Hash:  commitHash,
3✔
727
                        Index: uint32(htlc.OutputIndex),
3✔
728
                }
3✔
729
                htlcMap[outpoint] = &htlc
3✔
730
        }
3✔
731

732
        // We'll also fetch the historical state of this channel, as it should
733
        // have been marked as closed by now, and supplement it to each resolver
734
        // such that we can properly resolve our pending contracts.
735
        var chanState *channeldb.OpenChannel
1✔
736
        chanState, err = c.cfg.FetchHistoricalChannel()
1✔
737
        switch {
1✔
738
        // If we don't find this channel, then it may be the case that it
739
        // was closed before we started to retain the final state
740
        // information for open channels.
741
        case err == channeldb.ErrNoHistoricalBucket:
×
742
                fallthrough
×
743
        case err == channeldb.ErrChannelNotFound:
×
744
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
745
                        "state", c.cfg.ChanPoint)
×
746

747
        case err != nil:
×
748
                return err
×
749
        }
750

751
        log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
1✔
752
                "resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
1✔
753

1✔
754
        for i := range unresolvedContracts {
2✔
755
                resolver := unresolvedContracts[i]
1✔
756

1✔
757
                if chanState != nil {
2✔
758
                        resolver.SupplementState(chanState)
1✔
759

1✔
760
                        // For taproot channels, we'll need to also make sure
1✔
761
                        // the control block information was set properly.
1✔
762
                        maybeAugmentTaprootResolvers(
1✔
763
                                chanState.ChanType, resolver,
1✔
764
                                contractResolutions,
1✔
765
                        )
1✔
766
                }
1✔
767

768
                unresolvedContracts[i] = resolver
1✔
769

1✔
770
                htlcResolver, ok := resolver.(htlcContractResolver)
1✔
771
                if !ok {
1✔
772
                        continue
×
773
                }
774

775
                htlcPoint := htlcResolver.HtlcPoint()
1✔
776
                htlc, ok := htlcMap[htlcPoint]
1✔
777
                if !ok {
1✔
778
                        return fmt.Errorf(
×
779
                                "htlc resolver %T unavailable", resolver,
×
780
                        )
×
781
                }
×
782

783
                htlcResolver.Supplement(*htlc)
1✔
784

1✔
785
                // If this is an outgoing HTLC, we will also need to supplement
1✔
786
                // the resolver with the expiry block height of its
1✔
787
                // corresponding incoming HTLC.
1✔
788
                if !htlc.Incoming {
2✔
789
                        deadline := c.cfg.FindOutgoingHTLCDeadline(*htlc)
1✔
790
                        htlcResolver.SupplementDeadline(deadline)
1✔
791
                }
1✔
792
        }
793

794
        // The anchor resolver is stateless and can always be re-instantiated.
795
        if contractResolutions.AnchorResolution != nil {
1✔
796
                anchorResolver := newAnchorResolver(
×
797
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
×
798
                        contractResolutions.AnchorResolution.CommitAnchor,
×
799
                        heightHint, c.cfg.ChanPoint,
×
800
                        ResolverConfig{
×
801
                                ChannelArbitratorConfig: c.cfg,
×
802
                        },
×
803
                )
×
804

×
805
                anchorResolver.SupplementState(chanState)
×
806

×
807
                unresolvedContracts = append(unresolvedContracts, anchorResolver)
×
808

×
809
                // TODO(roasbeef): this isn't re-launched?
×
810
        }
×
811

812
        c.launchResolvers(unresolvedContracts, true)
1✔
813

1✔
814
        return nil
1✔
815
}
816

817
// Report returns htlc reports for the active resolvers.
818
func (c *ChannelArbitrator) Report() []*ContractReport {
×
819
        c.activeResolversLock.RLock()
×
820
        defer c.activeResolversLock.RUnlock()
×
821

×
822
        var reports []*ContractReport
×
823
        for _, resolver := range c.activeResolvers {
×
824
                r, ok := resolver.(reportingContractResolver)
×
825
                if !ok {
×
826
                        continue
×
827
                }
828

829
                report := r.report()
×
830
                if report == nil {
×
831
                        continue
×
832
                }
833

834
                reports = append(reports, report)
×
835
        }
836

837
        return reports
×
838
}
839

840
// Stop signals the ChannelArbitrator for a graceful shutdown.
841
func (c *ChannelArbitrator) Stop() error {
51✔
842
        if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
57✔
843
                return nil
6✔
844
        }
6✔
845

846
        log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
45✔
847

45✔
848
        if c.cfg.ChainEvents.Cancel != nil {
56✔
849
                go c.cfg.ChainEvents.Cancel()
11✔
850
        }
11✔
851

852
        c.activeResolversLock.RLock()
45✔
853
        for _, activeResolver := range c.activeResolvers {
51✔
854
                activeResolver.Stop()
6✔
855
        }
6✔
856
        c.activeResolversLock.RUnlock()
45✔
857

45✔
858
        close(c.quit)
45✔
859
        c.wg.Wait()
45✔
860

45✔
861
        return nil
45✔
862
}
863

864
// transitionTrigger is an enum that denotes exactly *why* a state transition
865
// was initiated. This is useful as depending on the initial trigger, we may
866
// skip certain states as those actions are expected to have already taken
867
// place as a result of the external trigger.
868
type transitionTrigger uint8
869

870
const (
871
        // chainTrigger is a transition trigger that has been attempted due to
872
        // changing on-chain conditions such as a block which times out HTLC's
873
        // being attached.
874
        chainTrigger transitionTrigger = iota
875

876
        // userTrigger is a transition trigger driven by user action. Examples
877
        // of such a trigger include a user requesting a force closure of the
878
        // channel.
879
        userTrigger
880

881
        // remoteCloseTrigger is a transition trigger driven by the remote
882
        // peer's commitment being confirmed.
883
        remoteCloseTrigger
884

885
        // localCloseTrigger is a transition trigger driven by our commitment
886
        // being confirmed.
887
        localCloseTrigger
888

889
        // coopCloseTrigger is a transition trigger driven by a cooperative
890
        // close transaction being confirmed.
891
        coopCloseTrigger
892

893
        // breachCloseTrigger is a transition trigger driven by a remote breach
894
        // being confirmed. In this case the channel arbitrator will wait for
895
        // the BreachArbitrator to finish and then clean up gracefully.
896
        breachCloseTrigger
897
)
898

899
// String returns a human readable string describing the passed
900
// transitionTrigger.
901
func (t transitionTrigger) String() string {
×
902
        switch t {
×
903
        case chainTrigger:
×
904
                return "chainTrigger"
×
905

906
        case remoteCloseTrigger:
×
907
                return "remoteCloseTrigger"
×
908

909
        case userTrigger:
×
910
                return "userTrigger"
×
911

912
        case localCloseTrigger:
×
913
                return "localCloseTrigger"
×
914

915
        case coopCloseTrigger:
×
916
                return "coopCloseTrigger"
×
917

918
        case breachCloseTrigger:
×
919
                return "breachCloseTrigger"
×
920

921
        default:
×
922
                return "unknown trigger"
×
923
        }
924
}
925

926
// stateStep is a help method that examines our internal state, and attempts
927
// the appropriate state transition if necessary. The next state we transition
928
// to is returned, Additionally, if the next transition results in a commitment
929
// broadcast, the commitment transaction itself is returned.
930
func (c *ChannelArbitrator) stateStep(
931
        triggerHeight uint32, trigger transitionTrigger,
932
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
175✔
933

175✔
934
        var (
175✔
935
                nextState ArbitratorState
175✔
936
                closeTx   *wire.MsgTx
175✔
937
        )
175✔
938
        switch c.state {
175✔
939

940
        // If we're in the default state, then we'll check our set of actions
941
        // to see if while we were down, conditions have changed.
942
        case StateDefault:
64✔
943
                log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+
64✔
944
                        "examining active HTLC's", c.cfg.ChanPoint,
64✔
945
                        triggerHeight)
64✔
946

64✔
947
                // As a new block has been connected to the end of the main
64✔
948
                // chain, we'll check to see if we need to make any on-chain
64✔
949
                // claims on behalf of the channel contract that we're
64✔
950
                // arbitrating for. If a commitment has confirmed, then we'll
64✔
951
                // use the set snapshot from the chain, otherwise we'll use our
64✔
952
                // current set.
64✔
953
                var (
64✔
954
                        chainActions ChainActionMap
64✔
955
                        err          error
64✔
956
                )
64✔
957

64✔
958
                // Normally if we force close the channel locally we will have
64✔
959
                // no confCommitSet. However when the remote commitment confirms
64✔
960
                // without us ever broadcasting our local commitment we need to
64✔
961
                // make sure we cancel all upstream HTLCs for outgoing dust
64✔
962
                // HTLCs as well hence we need to fetch the chain actions here
64✔
963
                // as well.
64✔
964
                if confCommitSet == nil {
119✔
965
                        // Update the set of activeHTLCs so
55✔
966
                        // checkLocalChainActions has an up-to-date view of the
55✔
967
                        // commitments.
55✔
968
                        c.updateActiveHTLCs()
55✔
969
                        htlcs := c.activeHTLCs
55✔
970
                        chainActions, err = c.checkLocalChainActions(
55✔
971
                                triggerHeight, trigger, htlcs, false,
55✔
972
                        )
55✔
973
                        if err != nil {
55✔
974
                                return StateDefault, nil, err
×
975
                        }
×
976
                } else {
9✔
977
                        chainActions, err = c.constructChainActions(
9✔
978
                                confCommitSet, triggerHeight, trigger,
9✔
979
                        )
9✔
980
                        if err != nil {
9✔
981
                                return StateDefault, nil, err
×
982
                        }
×
983
                }
984

985
                // If there are no actions to be made, then we'll remain in the
986
                // default state. If this isn't a self initiated event (we're
987
                // checking due to a chain update), then we'll exit now.
988
                if len(chainActions) == 0 && trigger == chainTrigger {
101✔
989
                        log.Debugf("ChannelArbitrator(%v): no actions for "+
37✔
990
                                "chain trigger, terminating", c.cfg.ChanPoint)
37✔
991

37✔
992
                        return StateDefault, closeTx, nil
37✔
993
                }
37✔
994

995
                // Otherwise, we'll log that we checked the HTLC actions as the
996
                // commitment transaction has already been broadcast.
997
                log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
27✔
998
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
27✔
999

27✔
1000
                // Cancel upstream HTLCs for all outgoing dust HTLCs available
27✔
1001
                // either on the local or the remote/remote pending commitment
27✔
1002
                // transaction.
27✔
1003
                dustHTLCs := chainActions[HtlcFailDustAction]
27✔
1004
                if len(dustHTLCs) > 0 {
28✔
1005
                        log.Debugf("ChannelArbitrator(%v): canceling %v dust "+
1✔
1006
                                "HTLCs backwards", c.cfg.ChanPoint,
1✔
1007
                                len(dustHTLCs))
1✔
1008

1✔
1009
                        getIdx := func(htlc channeldb.HTLC) uint64 {
2✔
1010
                                return htlc.HtlcIndex
1✔
1011
                        }
1✔
1012
                        dustHTLCSet := fn.NewSet(fn.Map(dustHTLCs, getIdx)...)
1✔
1013
                        err = c.abandonForwards(dustHTLCSet)
1✔
1014
                        if err != nil {
1✔
1015
                                return StateError, closeTx, err
×
1016
                        }
×
1017
                }
1018

1019
                // Depending on the type of trigger, we'll either "tunnel"
1020
                // through to a farther state, or just proceed linearly to the
1021
                // next state.
1022
                switch trigger {
27✔
1023

1024
                // If this is a chain trigger, then we'll go straight to the
1025
                // next state, as we still need to broadcast the commitment
1026
                // transaction.
1027
                case chainTrigger:
5✔
1028
                        fallthrough
5✔
1029
                case userTrigger:
15✔
1030
                        nextState = StateBroadcastCommit
15✔
1031

1032
                // If the trigger is a cooperative close being confirmed, then
1033
                // we can go straight to StateFullyResolved, as there won't be
1034
                // any contracts to resolve.
1035
                case coopCloseTrigger:
3✔
1036
                        nextState = StateFullyResolved
3✔
1037

1038
                // Otherwise, if this state advance was triggered by a
1039
                // commitment being confirmed on chain, then we'll jump
1040
                // straight to the state where the contract has already been
1041
                // closed, and we will inspect the set of unresolved contracts.
1042
                case localCloseTrigger:
2✔
1043
                        log.Errorf("ChannelArbitrator(%v): unexpected local "+
2✔
1044
                                "commitment confirmed while in StateDefault",
2✔
1045
                                c.cfg.ChanPoint)
2✔
1046
                        fallthrough
2✔
1047
                case remoteCloseTrigger:
8✔
1048
                        nextState = StateContractClosed
8✔
1049

1050
                case breachCloseTrigger:
1✔
1051
                        nextContractState, err := c.checkLegacyBreach()
1✔
1052
                        if nextContractState == StateError {
1✔
1053
                                return nextContractState, nil, err
×
1054
                        }
×
1055

1056
                        nextState = nextContractState
1✔
1057
                }
1058

1059
        // If we're in this state, then we've decided to broadcast the
1060
        // commitment transaction. We enter this state either due to an outside
1061
        // sub-system, or because an on-chain action has been triggered.
1062
        case StateBroadcastCommit:
20✔
1063
                // Under normal operation, we can only enter
20✔
1064
                // StateBroadcastCommit via a user or chain trigger. On restart,
20✔
1065
                // this state may be reexecuted after closing the channel, but
20✔
1066
                // failing to commit to StateContractClosed or
20✔
1067
                // StateFullyResolved. In that case, one of the four close
20✔
1068
                // triggers will be presented, signifying that we should skip
20✔
1069
                // rebroadcasting, and go straight to resolving the on-chain
20✔
1070
                // contract or marking the channel resolved.
20✔
1071
                switch trigger {
20✔
1072
                case localCloseTrigger, remoteCloseTrigger:
×
1073
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1074
                                "close after closing channel, fast-forwarding "+
×
1075
                                "to %s to resolve contract",
×
1076
                                c.cfg.ChanPoint, trigger, StateContractClosed)
×
1077
                        return StateContractClosed, closeTx, nil
×
1078

1079
                case breachCloseTrigger:
1✔
1080
                        nextContractState, err := c.checkLegacyBreach()
1✔
1081
                        if nextContractState == StateError {
1✔
1082
                                log.Infof("ChannelArbitrator(%v): unable to "+
×
1083
                                        "advance breach close resolution: %v",
×
1084
                                        c.cfg.ChanPoint, nextContractState)
×
1085
                                return StateError, closeTx, err
×
1086
                        }
×
1087

1088
                        log.Infof("ChannelArbitrator(%v): detected %s close "+
1✔
1089
                                "after closing channel, fast-forwarding to %s"+
1✔
1090
                                " to resolve contract", c.cfg.ChanPoint,
1✔
1091
                                trigger, nextContractState)
1✔
1092

1✔
1093
                        return nextContractState, closeTx, nil
1✔
1094

1095
                case coopCloseTrigger:
×
1096
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1097
                                "close after closing channel, fast-forwarding "+
×
1098
                                "to %s to resolve contract",
×
1099
                                c.cfg.ChanPoint, trigger, StateFullyResolved)
×
1100
                        return StateFullyResolved, closeTx, nil
×
1101
                }
1102

1103
                log.Infof("ChannelArbitrator(%v): force closing "+
19✔
1104
                        "chan", c.cfg.ChanPoint)
19✔
1105

19✔
1106
                // Now that we have all the actions decided for the set of
19✔
1107
                // HTLC's, we'll broadcast the commitment transaction, and
19✔
1108
                // signal the link to exit.
19✔
1109

19✔
1110
                // We'll tell the switch that it should remove the link for
19✔
1111
                // this channel, in addition to fetching the force close
19✔
1112
                // summary needed to close this channel on chain.
19✔
1113
                forceCloseTx, err := c.cfg.Channel.ForceCloseChan()
19✔
1114
                if err != nil {
20✔
1115
                        log.Errorf("ChannelArbitrator(%v): unable to "+
1✔
1116
                                "force close: %v", c.cfg.ChanPoint, err)
1✔
1117

1✔
1118
                        // We tried to force close (HTLC may be expiring from
1✔
1119
                        // our PoV, etc), but we think we've lost data. In this
1✔
1120
                        // case, we'll not force close, but terminate the state
1✔
1121
                        // machine here to wait to see what confirms on chain.
1✔
1122
                        if errors.Is(err, lnwallet.ErrForceCloseLocalDataLoss) {
2✔
1123
                                log.Error("ChannelArbitrator(%v): broadcast "+
1✔
1124
                                        "failed due to local data loss, "+
1✔
1125
                                        "waiting for on chain confimation...",
1✔
1126
                                        c.cfg.ChanPoint)
1✔
1127

1✔
1128
                                return StateBroadcastCommit, nil, nil
1✔
1129
                        }
1✔
1130

1131
                        return StateError, closeTx, err
×
1132
                }
1133
                closeTx = forceCloseTx
18✔
1134

18✔
1135
                // Before publishing the transaction, we store it to the
18✔
1136
                // database, such that we can re-publish later in case it
18✔
1137
                // didn't propagate. We initiated the force close, so we
18✔
1138
                // mark broadcast with local initiator set to true.
18✔
1139
                err = c.cfg.MarkCommitmentBroadcasted(closeTx, lntypes.Local)
18✔
1140
                if err != nil {
18✔
1141
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1142
                                "mark commitment broadcasted: %v",
×
1143
                                c.cfg.ChanPoint, err)
×
1144
                        return StateError, closeTx, err
×
1145
                }
×
1146

1147
                // With the close transaction in hand, broadcast the
1148
                // transaction to the network, thereby entering the post
1149
                // channel resolution state.
1150
                log.Infof("Broadcasting force close transaction %v, "+
18✔
1151
                        "ChannelPoint(%v): %v", closeTx.TxHash(),
18✔
1152
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(closeTx))
18✔
1153

18✔
1154
                // At this point, we'll now broadcast the commitment
18✔
1155
                // transaction itself.
18✔
1156
                label := labels.MakeLabel(
18✔
1157
                        labels.LabelTypeChannelClose, &c.cfg.ShortChanID,
18✔
1158
                )
18✔
1159
                if err := c.cfg.PublishTx(closeTx, label); err != nil {
23✔
1160
                        log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
5✔
1161
                                "close tx: %v", c.cfg.ChanPoint, err)
5✔
1162

5✔
1163
                        // This makes sure we don't fail at startup if the
5✔
1164
                        // commitment transaction has too low fees to make it
5✔
1165
                        // into mempool. The rebroadcaster makes sure this
5✔
1166
                        // transaction is republished regularly until confirmed
5✔
1167
                        // or replaced.
5✔
1168
                        if !errors.Is(err, lnwallet.ErrDoubleSpend) &&
5✔
1169
                                !errors.Is(err, lnwallet.ErrMempoolFee) {
7✔
1170

2✔
1171
                                return StateError, closeTx, err
2✔
1172
                        }
2✔
1173
                }
1174

1175
                // We go to the StateCommitmentBroadcasted state, where we'll
1176
                // be waiting for the commitment to be confirmed.
1177
                nextState = StateCommitmentBroadcasted
16✔
1178

1179
        // In this state we have broadcasted our own commitment, and will need
1180
        // to wait for a commitment (not necessarily the one we broadcasted!)
1181
        // to be confirmed.
1182
        case StateCommitmentBroadcasted:
30✔
1183
                switch trigger {
30✔
1184

1185
                // We are waiting for a commitment to be confirmed.
1186
                case chainTrigger, userTrigger:
17✔
1187
                        // The commitment transaction has been broadcast, but it
17✔
1188
                        // doesn't necessarily need to be the commitment
17✔
1189
                        // transaction version that is going to be confirmed. To
17✔
1190
                        // be sure that any of those versions can be anchored
17✔
1191
                        // down, we now submit all anchor resolutions to the
17✔
1192
                        // sweeper. The sweeper will keep trying to sweep all of
17✔
1193
                        // them.
17✔
1194
                        //
17✔
1195
                        // Note that the sweeper is idempotent. If we ever
17✔
1196
                        // happen to end up at this point in the code again, no
17✔
1197
                        // harm is done by re-offering the anchors to the
17✔
1198
                        // sweeper.
17✔
1199
                        anchors, err := c.cfg.Channel.NewAnchorResolutions()
17✔
1200
                        if err != nil {
17✔
1201
                                return StateError, closeTx, err
×
1202
                        }
×
1203

1204
                        err = c.sweepAnchors(anchors, triggerHeight)
17✔
1205
                        if err != nil {
17✔
1206
                                return StateError, closeTx, err
×
1207
                        }
×
1208

1209
                        nextState = StateCommitmentBroadcasted
17✔
1210

1211
                // If this state advance was triggered by any of the
1212
                // commitments being confirmed, then we'll jump to the state
1213
                // where the contract has been closed.
1214
                case localCloseTrigger, remoteCloseTrigger:
13✔
1215
                        nextState = StateContractClosed
13✔
1216

1217
                // If a coop close was confirmed, jump straight to the fully
1218
                // resolved state.
1219
                case coopCloseTrigger:
×
1220
                        nextState = StateFullyResolved
×
1221

1222
                case breachCloseTrigger:
×
1223
                        nextContractState, err := c.checkLegacyBreach()
×
1224
                        if nextContractState == StateError {
×
1225
                                return nextContractState, closeTx, err
×
1226
                        }
×
1227

1228
                        nextState = nextContractState
×
1229
                }
1230

1231
                log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
30✔
1232
                        "state %v to %v", c.cfg.ChanPoint, trigger, c.state,
30✔
1233
                        nextState)
30✔
1234

1235
        // If we're in this state, then the contract has been fully closed to
1236
        // outside sub-systems, so we'll process the prior set of on-chain
1237
        // contract actions and launch a set of resolvers.
1238
        case StateContractClosed:
22✔
1239
                // First, we'll fetch our chain actions, and both sets of
22✔
1240
                // resolutions so we can process them.
22✔
1241
                contractResolutions, err := c.log.FetchContractResolutions()
22✔
1242
                if err != nil {
24✔
1243
                        log.Errorf("unable to fetch contract resolutions: %v",
2✔
1244
                                err)
2✔
1245
                        return StateError, closeTx, err
2✔
1246
                }
2✔
1247

1248
                // If the resolution is empty, and we have no HTLCs at all to
1249
                // send to, then we're done here. We don't need to launch any
1250
                // resolvers, and can go straight to our final state.
1251
                if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
28✔
1252
                        log.Infof("ChannelArbitrator(%v): contract "+
8✔
1253
                                "resolutions empty, marking channel as fully resolved!",
8✔
1254
                                c.cfg.ChanPoint)
8✔
1255
                        nextState = StateFullyResolved
8✔
1256
                        break
8✔
1257
                }
1258

1259
                // First, we'll reconstruct a fresh set of chain actions as the
1260
                // set of actions we need to act on may differ based on if it
1261
                // was our commitment, or they're commitment that hit the chain.
1262
                htlcActions, err := c.constructChainActions(
12✔
1263
                        confCommitSet, triggerHeight, trigger,
12✔
1264
                )
12✔
1265
                if err != nil {
12✔
1266
                        return StateError, closeTx, err
×
1267
                }
×
1268

1269
                // In case its a breach transaction we fail back all upstream
1270
                // HTLCs for their corresponding outgoing HTLCs on the remote
1271
                // commitment set (remote and remote pending set).
1272
                if contractResolutions.BreachResolution != nil {
14✔
1273
                        // cancelBreachedHTLCs is a set which holds HTLCs whose
2✔
1274
                        // corresponding incoming HTLCs will be failed back
2✔
1275
                        // because the peer broadcasted an old state.
2✔
1276
                        cancelBreachedHTLCs := fn.NewSet[uint64]()
2✔
1277

2✔
1278
                        // We'll use the CommitSet, we'll fail back all
2✔
1279
                        // upstream HTLCs for their corresponding outgoing
2✔
1280
                        // HTLC that exist on either of the remote commitments.
2✔
1281
                        // The map is used to deduplicate any shared HTLC's.
2✔
1282
                        for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
4✔
1283
                                if !htlcSetKey.IsRemote {
2✔
1284
                                        continue
×
1285
                                }
1286

1287
                                for _, htlc := range htlcs {
4✔
1288
                                        // Only outgoing HTLCs have a
2✔
1289
                                        // corresponding incoming HTLC.
2✔
1290
                                        if htlc.Incoming {
3✔
1291
                                                continue
1✔
1292
                                        }
1293

1294
                                        cancelBreachedHTLCs.Add(htlc.HtlcIndex)
1✔
1295
                                }
1296
                        }
1297

1298
                        err := c.abandonForwards(cancelBreachedHTLCs)
2✔
1299
                        if err != nil {
2✔
1300
                                return StateError, closeTx, err
×
1301
                        }
×
1302
                } else {
10✔
1303
                        // If it's not a breach, we resolve all incoming dust
10✔
1304
                        // HTLCs immediately after the commitment is confirmed.
10✔
1305
                        err = c.failIncomingDust(
10✔
1306
                                htlcActions[HtlcIncomingDustFinalAction],
10✔
1307
                        )
10✔
1308
                        if err != nil {
10✔
1309
                                return StateError, closeTx, err
×
1310
                        }
×
1311

1312
                        // We fail the upstream HTLCs for all remote pending
1313
                        // outgoing HTLCs as soon as the commitment is
1314
                        // confirmed. The upstream HTLCs for outgoing dust
1315
                        // HTLCs have already been resolved before we reach
1316
                        // this point.
1317
                        getIdx := func(htlc channeldb.HTLC) uint64 {
18✔
1318
                                return htlc.HtlcIndex
8✔
1319
                        }
8✔
1320
                        remoteDangling := fn.NewSet(fn.Map(
10✔
1321
                                htlcActions[HtlcFailDanglingAction], getIdx,
10✔
1322
                        )...)
10✔
1323
                        err := c.abandonForwards(remoteDangling)
10✔
1324
                        if err != nil {
10✔
1325
                                return StateError, closeTx, err
×
1326
                        }
×
1327
                }
1328

1329
                // Now that we know we'll need to act, we'll process all the
1330
                // resolvers, then create the structures we need to resolve all
1331
                // outstanding contracts.
1332
                resolvers, err := c.prepContractResolutions(
12✔
1333
                        contractResolutions, triggerHeight, htlcActions,
12✔
1334
                )
12✔
1335
                if err != nil {
12✔
1336
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1337
                                "resolve contracts: %v", c.cfg.ChanPoint, err)
×
1338
                        return StateError, closeTx, err
×
1339
                }
×
1340

1341
                log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
12✔
1342
                        "resolvers", c.cfg.ChanPoint, len(resolvers))
12✔
1343

12✔
1344
                err = c.log.InsertUnresolvedContracts(nil, resolvers...)
12✔
1345
                if err != nil {
12✔
1346
                        return StateError, closeTx, err
×
1347
                }
×
1348

1349
                // Finally, we'll launch all the required contract resolvers.
1350
                // Once they're all resolved, we're no longer needed.
1351
                c.launchResolvers(resolvers, false)
12✔
1352

12✔
1353
                nextState = StateWaitingFullResolution
12✔
1354

1355
        // This is our terminal state. We'll keep returning this state until
1356
        // all contracts are fully resolved.
1357
        case StateWaitingFullResolution:
17✔
1358
                log.Infof("ChannelArbitrator(%v): still awaiting contract "+
17✔
1359
                        "resolution", c.cfg.ChanPoint)
17✔
1360

17✔
1361
                unresolved, err := c.log.FetchUnresolvedContracts()
17✔
1362
                if err != nil {
17✔
1363
                        return StateError, closeTx, err
×
1364
                }
×
1365

1366
                // If we have no unresolved contracts, then we can move to the
1367
                // final state.
1368
                if len(unresolved) == 0 {
29✔
1369
                        nextState = StateFullyResolved
12✔
1370
                        break
12✔
1371
                }
1372

1373
                // Otherwise we still have unresolved contracts, then we'll
1374
                // stay alive to oversee their resolution.
1375
                nextState = StateWaitingFullResolution
5✔
1376

5✔
1377
                // Add debug logs.
5✔
1378
                for _, r := range unresolved {
10✔
1379
                        log.Debugf("ChannelArbitrator(%v): still have "+
5✔
1380
                                "unresolved contract: %T", c.cfg.ChanPoint, r)
5✔
1381
                }
5✔
1382

1383
        // If we start as fully resolved, then we'll end as fully resolved.
1384
        case StateFullyResolved:
22✔
1385
                // To ensure that the state of the contract in persistent
22✔
1386
                // storage is properly reflected, we'll mark the contract as
22✔
1387
                // fully resolved now.
22✔
1388
                nextState = StateFullyResolved
22✔
1389

22✔
1390
                log.Infof("ChannelPoint(%v) has been fully resolved "+
22✔
1391
                        "on-chain at height=%v", c.cfg.ChanPoint, triggerHeight)
22✔
1392

22✔
1393
                if err := c.cfg.MarkChannelResolved(); err != nil {
22✔
1394
                        log.Errorf("unable to mark channel resolved: %v", err)
×
1395
                        return StateError, closeTx, err
×
1396
                }
×
1397
        }
1398

1399
        log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
132✔
1400
                nextState)
132✔
1401

132✔
1402
        return nextState, closeTx, nil
132✔
1403
}
1404

1405
// sweepAnchors offers all given anchor resolutions to the sweeper. It requests
1406
// sweeping at the minimum fee rate. This fee rate can be upped manually by the
1407
// user via the BumpFee rpc.
1408
func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
1409
        heightHint uint32) error {
19✔
1410

19✔
1411
        // Update the set of activeHTLCs so that the sweeping routine has an
19✔
1412
        // up-to-date view of the set of commitments.
19✔
1413
        c.updateActiveHTLCs()
19✔
1414

19✔
1415
        // Prepare the sweeping requests for all possible versions of
19✔
1416
        // commitments.
19✔
1417
        sweepReqs, err := c.prepareAnchorSweeps(heightHint, anchors)
19✔
1418
        if err != nil {
19✔
1419
                return err
×
1420
        }
×
1421

1422
        // Send out the sweeping requests to the sweeper.
1423
        for _, req := range sweepReqs {
27✔
1424
                _, err = c.cfg.Sweeper.SweepInput(req.input, req.params)
8✔
1425
                if err != nil {
8✔
1426
                        return err
×
1427
                }
×
1428
        }
1429

1430
        return nil
19✔
1431
}
1432

1433
// findCommitmentDeadlineAndValue finds the deadline (relative block height)
1434
// for a commitment transaction by extracting the minimum CLTV from its HTLCs.
1435
// From our PoV, the deadline delta is defined to be the smaller of,
1436
//   - half of the least CLTV from outgoing HTLCs' corresponding incoming
1437
//     HTLCs,  or,
1438
//   - half of the least CLTV from incoming HTLCs if the preimage is available.
1439
//
1440
// We use half of the CTLV value to ensure that we have enough time to sweep
1441
// the second-level HTLCs.
1442
//
1443
// It also finds the total value that are time-sensitive, which is the sum of
1444
// all the outgoing HTLCs plus incoming HTLCs whose preimages are known. It
1445
// then returns the value left after subtracting the budget used for sweeping
1446
// the time-sensitive HTLCs.
1447
//
1448
// NOTE: when the deadline turns out to be 0 blocks, we will replace it with 1
1449
// block because our fee estimator doesn't allow a 0 conf target. This also
1450
// means we've left behind and should increase our fee to make the transaction
1451
// confirmed asap.
1452
func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
1453
        htlcs htlcSet) (fn.Option[int32], btcutil.Amount, error) {
13✔
1454

13✔
1455
        deadlineMinHeight := uint32(math.MaxUint32)
13✔
1456
        totalValue := btcutil.Amount(0)
13✔
1457

13✔
1458
        // First, iterate through the outgoingHTLCs to find the lowest CLTV
13✔
1459
        // value.
13✔
1460
        for _, htlc := range htlcs.outgoingHTLCs {
23✔
1461
                // Skip if the HTLC is dust.
10✔
1462
                if htlc.OutputIndex < 0 {
13✔
1463
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
3✔
1464
                                "for dust htlc=%x",
3✔
1465
                                c.cfg.ChanPoint, htlc.RHash[:])
3✔
1466

3✔
1467
                        continue
3✔
1468
                }
1469

1470
                value := htlc.Amt.ToSatoshis()
7✔
1471

7✔
1472
                // Find the expiry height for this outgoing HTLC's incoming
7✔
1473
                // HTLC.
7✔
1474
                deadlineOpt := c.cfg.FindOutgoingHTLCDeadline(htlc)
7✔
1475

7✔
1476
                // The deadline is default to the current deadlineMinHeight,
7✔
1477
                // and it's overwritten when it's not none.
7✔
1478
                deadline := deadlineMinHeight
7✔
1479
                deadlineOpt.WhenSome(func(d int32) {
12✔
1480
                        deadline = uint32(d)
5✔
1481

5✔
1482
                        // We only consider the value is under protection when
5✔
1483
                        // it's time-sensitive.
5✔
1484
                        totalValue += value
5✔
1485
                })
5✔
1486

1487
                if deadline < deadlineMinHeight {
12✔
1488
                        deadlineMinHeight = deadline
5✔
1489

5✔
1490
                        log.Tracef("ChannelArbitrator(%v): outgoing HTLC has "+
5✔
1491
                                "deadline=%v, value=%v", c.cfg.ChanPoint,
5✔
1492
                                deadlineMinHeight, value)
5✔
1493
                }
5✔
1494
        }
1495

1496
        // Then going through the incomingHTLCs, and update the minHeight when
1497
        // conditions met.
1498
        for _, htlc := range htlcs.incomingHTLCs {
22✔
1499
                // Skip if the HTLC is dust.
9✔
1500
                if htlc.OutputIndex < 0 {
10✔
1501
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
1✔
1502
                                "for dust htlc=%x",
1✔
1503
                                c.cfg.ChanPoint, htlc.RHash[:])
1✔
1504

1✔
1505
                        continue
1✔
1506
                }
1507

1508
                // Since it's an HTLC sent to us, check if we have preimage for
1509
                // this HTLC.
1510
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
8✔
1511
                if err != nil {
8✔
1512
                        return fn.None[int32](), 0, err
×
1513
                }
×
1514

1515
                if !preimageAvailable {
10✔
1516
                        continue
2✔
1517
                }
1518

1519
                value := htlc.Amt.ToSatoshis()
6✔
1520
                totalValue += value
6✔
1521

6✔
1522
                if htlc.RefundTimeout < deadlineMinHeight {
11✔
1523
                        deadlineMinHeight = htlc.RefundTimeout
5✔
1524

5✔
1525
                        log.Tracef("ChannelArbitrator(%v): incoming HTLC has "+
5✔
1526
                                "deadline=%v, amt=%v", c.cfg.ChanPoint,
5✔
1527
                                deadlineMinHeight, value)
5✔
1528
                }
5✔
1529
        }
1530

1531
        // Calculate the deadline. There are two cases to be handled here,
1532
        //   - when the deadlineMinHeight never gets updated, which could
1533
        //     happen when we have no outgoing HTLCs, and, for incoming HTLCs,
1534
        //       * either we have none, or,
1535
        //       * none of the HTLCs are preimageAvailable.
1536
        //   - when our deadlineMinHeight is no greater than the heightHint,
1537
        //     which means we are behind our schedule.
1538
        var deadline uint32
13✔
1539
        switch {
13✔
1540
        // When we couldn't find a deadline height from our HTLCs, we will fall
1541
        // back to the default value as there's no time pressure here.
1542
        case deadlineMinHeight == math.MaxUint32:
4✔
1543
                return fn.None[int32](), 0, nil
4✔
1544

1545
        // When the deadline is passed, we will fall back to the smallest conf
1546
        // target (1 block).
1547
        case deadlineMinHeight <= heightHint:
1✔
1548
                log.Warnf("ChannelArbitrator(%v): deadline is passed with "+
1✔
1549
                        "deadlineMinHeight=%d, heightHint=%d",
1✔
1550
                        c.cfg.ChanPoint, deadlineMinHeight, heightHint)
1✔
1551
                deadline = 1
1✔
1552

1553
        // Use half of the deadline delta, and leave the other half to be used
1554
        // to sweep the HTLCs.
1555
        default:
8✔
1556
                deadline = (deadlineMinHeight - heightHint) / 2
8✔
1557
        }
1558

1559
        // Calculate the value left after subtracting the budget used for
1560
        // sweeping the time-sensitive HTLCs.
1561
        valueLeft := totalValue - calculateBudget(
9✔
1562
                totalValue, c.cfg.Budget.DeadlineHTLCRatio,
9✔
1563
                c.cfg.Budget.DeadlineHTLC,
9✔
1564
        )
9✔
1565

9✔
1566
        log.Debugf("ChannelArbitrator(%v): calculated valueLeft=%v, "+
9✔
1567
                "deadline=%d, using deadlineMinHeight=%d, heightHint=%d",
9✔
1568
                c.cfg.ChanPoint, valueLeft, deadline, deadlineMinHeight,
9✔
1569
                heightHint)
9✔
1570

9✔
1571
        return fn.Some(int32(deadline)), valueLeft, nil
9✔
1572
}
1573

1574
// launchResolvers updates the activeResolvers list and starts the resolvers.
1575
func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver,
1576
        immediate bool) {
13✔
1577

13✔
1578
        c.activeResolversLock.Lock()
13✔
1579
        defer c.activeResolversLock.Unlock()
13✔
1580

13✔
1581
        c.activeResolvers = resolvers
13✔
1582
        for _, contract := range resolvers {
19✔
1583
                c.wg.Add(1)
6✔
1584
                go c.resolveContract(contract, immediate)
6✔
1585
        }
6✔
1586
}
1587

1588
// advanceState is the main driver of our state machine. This method is an
1589
// iterative function which repeatedly attempts to advance the internal state
1590
// of the channel arbitrator. The state will be advanced until we reach a
1591
// redundant transition, meaning that the state transition is a noop. The final
1592
// param is a callback that allows the caller to execute an arbitrary action
1593
// after each state transition.
1594
func (c *ChannelArbitrator) advanceState(
1595
        triggerHeight uint32, trigger transitionTrigger,
1596
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
89✔
1597

89✔
1598
        var (
89✔
1599
                priorState   ArbitratorState
89✔
1600
                forceCloseTx *wire.MsgTx
89✔
1601
        )
89✔
1602

89✔
1603
        // We'll continue to advance our state forward until the state we
89✔
1604
        // transition to is that same state that we started at.
89✔
1605
        for {
264✔
1606
                priorState = c.state
175✔
1607
                log.Debugf("ChannelArbitrator(%v): attempting state step with "+
175✔
1608
                        "trigger=%v from state=%v", c.cfg.ChanPoint, trigger,
175✔
1609
                        priorState)
175✔
1610

175✔
1611
                nextState, closeTx, err := c.stateStep(
175✔
1612
                        triggerHeight, trigger, confCommitSet,
175✔
1613
                )
175✔
1614
                if err != nil {
179✔
1615
                        log.Errorf("ChannelArbitrator(%v): unable to advance "+
4✔
1616
                                "state: %v", c.cfg.ChanPoint, err)
4✔
1617
                        return priorState, nil, err
4✔
1618
                }
4✔
1619

1620
                if forceCloseTx == nil && closeTx != nil {
187✔
1621
                        forceCloseTx = closeTx
16✔
1622
                }
16✔
1623

1624
                // Our termination transition is a noop transition. If we get
1625
                // our prior state back as the next state, then we'll
1626
                // terminate.
1627
                if nextState == priorState {
253✔
1628
                        log.Debugf("ChannelArbitrator(%v): terminating at "+
82✔
1629
                                "state=%v", c.cfg.ChanPoint, nextState)
82✔
1630
                        return nextState, forceCloseTx, nil
82✔
1631
                }
82✔
1632

1633
                // As the prior state was successfully executed, we can now
1634
                // commit the next state. This ensures that we will re-execute
1635
                // the prior state if anything fails.
1636
                if err := c.log.CommitState(nextState); err != nil {
92✔
1637
                        log.Errorf("ChannelArbitrator(%v): unable to commit "+
3✔
1638
                                "next state(%v): %v", c.cfg.ChanPoint,
3✔
1639
                                nextState, err)
3✔
1640
                        return priorState, nil, err
3✔
1641
                }
3✔
1642
                c.state = nextState
86✔
1643
        }
1644
}
1645

1646
// ChainAction is an enum that encompasses all possible on-chain actions
1647
// we'll take for a set of HTLC's.
1648
type ChainAction uint8
1649

1650
const (
1651
        // NoAction is the min chainAction type, indicating that no action
1652
        // needs to be taken for a given HTLC.
1653
        NoAction ChainAction = 0
1654

1655
        // HtlcTimeoutAction indicates that the HTLC will timeout soon. As a
1656
        // result, we should get ready to sweep it on chain after the timeout.
1657
        HtlcTimeoutAction = 1
1658

1659
        // HtlcClaimAction indicates that we should claim the HTLC on chain
1660
        // before its timeout period.
1661
        HtlcClaimAction = 2
1662

1663
        // HtlcFailDustAction indicates that we should fail the upstream HTLC
1664
        // for an outgoing dust HTLC immediately (even before the commitment
1665
        // transaction is confirmed) because it has no output on the commitment
1666
        // transaction. This also includes remote pending outgoing dust HTLCs.
1667
        HtlcFailDustAction = 3
1668

1669
        // HtlcOutgoingWatchAction indicates that we can't yet timeout this
1670
        // HTLC, but we had to go to chain on order to resolve an existing
1671
        // HTLC.  In this case, we'll either: time it out once it expires, or
1672
        // will learn the pre-image if the remote party claims the output. In
1673
        // this case, well add the pre-image to our global store.
1674
        HtlcOutgoingWatchAction = 4
1675

1676
        // HtlcIncomingWatchAction indicates that we don't yet have the
1677
        // pre-image to claim incoming HTLC, but we had to go to chain in order
1678
        // to resolve and existing HTLC. In this case, we'll either: let the
1679
        // other party time it out, or eventually learn of the pre-image, in
1680
        // which case we'll claim on chain.
1681
        HtlcIncomingWatchAction = 5
1682

1683
        // HtlcIncomingDustFinalAction indicates that we should mark an incoming
1684
        // dust htlc as final because it can't be claimed on-chain.
1685
        HtlcIncomingDustFinalAction = 6
1686

1687
        // HtlcFailDanglingAction indicates that we should fail the upstream
1688
        // HTLC for an outgoing HTLC immediately after the commitment
1689
        // transaction has confirmed because it has no corresponding output on
1690
        // the commitment transaction. This category does NOT include any dust
1691
        // HTLCs which are mapped in the "HtlcFailDustAction" category.
1692
        HtlcFailDanglingAction = 7
1693
)
1694

1695
// String returns a human readable string describing a chain action.
1696
func (c ChainAction) String() string {
×
1697
        switch c {
×
1698
        case NoAction:
×
1699
                return "NoAction"
×
1700

1701
        case HtlcTimeoutAction:
×
1702
                return "HtlcTimeoutAction"
×
1703

1704
        case HtlcClaimAction:
×
1705
                return "HtlcClaimAction"
×
1706

1707
        case HtlcFailDustAction:
×
1708
                return "HtlcFailDustAction"
×
1709

1710
        case HtlcOutgoingWatchAction:
×
1711
                return "HtlcOutgoingWatchAction"
×
1712

1713
        case HtlcIncomingWatchAction:
×
1714
                return "HtlcIncomingWatchAction"
×
1715

1716
        case HtlcIncomingDustFinalAction:
×
1717
                return "HtlcIncomingDustFinalAction"
×
1718

1719
        case HtlcFailDanglingAction:
×
1720
                return "HtlcFailDanglingAction"
×
1721

1722
        default:
×
1723
                return "<unknown action>"
×
1724
        }
1725
}
1726

1727
// ChainActionMap is a map of a chain action, to the set of HTLC's that need to
1728
// be acted upon for a given action type. The channel
1729
type ChainActionMap map[ChainAction][]channeldb.HTLC
1730

1731
// Merge merges the passed chain actions with the target chain action map.
1732
func (c ChainActionMap) Merge(actions ChainActionMap) {
69✔
1733
        for chainAction, htlcs := range actions {
82✔
1734
                c[chainAction] = append(c[chainAction], htlcs...)
13✔
1735
        }
13✔
1736
}
1737

1738
// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
1739
// confirmation delta that we need is close, and returns a bool indicating if
1740
// we should go on chain to claim.  We do this rather than waiting up until the
1741
// last minute as we want to ensure that when we *need* (HTLC is timed out) to
1742
// sweep, the commitment is already confirmed.
1743
func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
1744
        broadcastDelta, currentHeight uint32) bool {
28✔
1745

28✔
1746
        // We'll calculate the broadcast cut off for this HTLC. This is the
28✔
1747
        // height that (based on our current fee estimation) we should
28✔
1748
        // broadcast in order to ensure the commitment transaction is confirmed
28✔
1749
        // before the HTLC fully expires.
28✔
1750
        broadcastCutOff := htlc.RefundTimeout - broadcastDelta
28✔
1751

28✔
1752
        log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
28✔
1753
                "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
28✔
1754
                broadcastCutOff, currentHeight)
28✔
1755

28✔
1756
        // TODO(roasbeef): take into account default HTLC delta, don't need to
28✔
1757
        // broadcast immediately
28✔
1758
        //  * can then batch with SINGLE | ANYONECANPAY
28✔
1759

28✔
1760
        // We should on-chain for this HTLC, iff we're within out broadcast
28✔
1761
        // cutoff window.
28✔
1762
        if currentHeight < broadcastCutOff {
49✔
1763
                return false
21✔
1764
        }
21✔
1765

1766
        // In case of incoming htlc we should go to chain.
1767
        if htlc.Incoming {
7✔
1768
                return true
×
1769
        }
×
1770

1771
        // For htlcs that are result of our initiated payments we give some grace
1772
        // period before force closing the channel. During this time we expect
1773
        // both nodes to connect and give a chance to the other node to send its
1774
        // updates and cancel the htlc.
1775
        // This shouldn't add any security risk as there is no incoming htlc to
1776
        // fulfill at this case and the expectation is that when the channel is
1777
        // active the other node will send update_fail_htlc to remove the htlc
1778
        // without closing the channel. It is up to the user to force close the
1779
        // channel if the peer misbehaves and doesn't send the update_fail_htlc.
1780
        // It is useful when this node is most of the time not online and is
1781
        // likely to miss the time slot where the htlc may be cancelled.
1782
        isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex)
7✔
1783
        upTime := c.cfg.Clock.Now().Sub(c.startTimestamp)
7✔
1784
        return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod
7✔
1785
}
1786

1787
// checkCommitChainActions is called for each new block connected to the end of
1788
// the main chain. Given the new block height, this new method will examine all
1789
// active HTLC's, and determine if we need to go on-chain to claim any of them.
1790
// A map of action -> []htlc is returned, detailing what action (if any) should
1791
// be performed for each HTLC. For timed out HTLC's, once the commitment has
1792
// been sufficiently confirmed, the HTLC's should be canceled backwards. For
1793
// redeemed HTLC's, we should send the pre-image back to the incoming link.
1794
func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
1795
        trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, error) {
69✔
1796

69✔
1797
        // TODO(roasbeef): would need to lock channel? channel totem?
69✔
1798
        //  * race condition if adding and we broadcast, etc
69✔
1799
        //  * or would make each instance sync?
69✔
1800

69✔
1801
        log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
69✔
1802
                "height=%v, in_htlc_count=%v, out_htlc_count=%v",
69✔
1803
                c.cfg.ChanPoint, height,
69✔
1804
                len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
69✔
1805

69✔
1806
        actionMap := make(ChainActionMap)
69✔
1807

69✔
1808
        // First, we'll make an initial pass over the set of incoming and
69✔
1809
        // outgoing HTLC's to decide if we need to go on chain at all.
69✔
1810
        haveChainActions := false
69✔
1811
        for _, htlc := range htlcs.outgoingHTLCs {
77✔
1812
                // We'll need to go on-chain for an outgoing HTLC if it was
8✔
1813
                // never resolved downstream, and it's "close" to timing out.
8✔
1814
                //
8✔
1815
                // TODO(yy): If there's no corresponding incoming HTLC, it
8✔
1816
                // means we are the first hop, hence the payer. This is a
8✔
1817
                // tricky case - unlike a forwarding hop, we don't have an
8✔
1818
                // incoming HTLC that will time out, which means as long as we
8✔
1819
                // can learn the preimage, we can settle the invoice (before it
8✔
1820
                // expires?).
8✔
1821
                toChain := c.shouldGoOnChain(
8✔
1822
                        htlc, c.cfg.OutgoingBroadcastDelta, height,
8✔
1823
                )
8✔
1824

8✔
1825
                if toChain {
8✔
1826
                        // Convert to int64 in case of overflow.
×
1827
                        remainingBlocks := int64(htlc.RefundTimeout) -
×
1828
                                int64(height)
×
1829

×
1830
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
×
1831
                                "outgoing htlc %x: timeout=%v, amount=%v, "+
×
1832
                                "blocks_until_expiry=%v, broadcast_delta=%v",
×
1833
                                c.cfg.ChanPoint, htlc.RHash[:],
×
1834
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
×
1835
                                c.cfg.OutgoingBroadcastDelta,
×
1836
                        )
×
1837
                }
×
1838

1839
                haveChainActions = haveChainActions || toChain
8✔
1840
        }
1841

1842
        for _, htlc := range htlcs.incomingHTLCs {
75✔
1843
                // We'll need to go on-chain to pull an incoming HTLC iff we
6✔
1844
                // know the pre-image and it's close to timing out. We need to
6✔
1845
                // ensure that we claim the funds that are rightfully ours
6✔
1846
                // on-chain.
6✔
1847
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
6✔
1848
                if err != nil {
6✔
1849
                        return nil, err
×
1850
                }
×
1851

1852
                if !preimageAvailable {
10✔
1853
                        continue
4✔
1854
                }
1855

1856
                toChain := c.shouldGoOnChain(
2✔
1857
                        htlc, c.cfg.IncomingBroadcastDelta, height,
2✔
1858
                )
2✔
1859

2✔
1860
                if toChain {
2✔
1861
                        // Convert to int64 in case of overflow.
×
1862
                        remainingBlocks := int64(htlc.RefundTimeout) -
×
1863
                                int64(height)
×
1864

×
1865
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
×
1866
                                "incoming htlc %x: timeout=%v, amount=%v, "+
×
1867
                                "blocks_until_expiry=%v, broadcast_delta=%v",
×
1868
                                c.cfg.ChanPoint, htlc.RHash[:],
×
1869
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
×
1870
                                c.cfg.IncomingBroadcastDelta,
×
1871
                        )
×
1872
                }
×
1873

1874
                haveChainActions = haveChainActions || toChain
2✔
1875
        }
1876

1877
        // If we don't have any actions to make, then we'll return an empty
1878
        // action map. We only do this if this was a chain trigger though, as
1879
        // if we're going to broadcast the commitment (or the remote party did)
1880
        // we're *forced* to act on each HTLC.
1881
        if !haveChainActions && trigger == chainTrigger {
110✔
1882
                log.Tracef("ChannelArbitrator(%v): no actions to take at "+
41✔
1883
                        "height=%v", c.cfg.ChanPoint, height)
41✔
1884
                return actionMap, nil
41✔
1885
        }
41✔
1886

1887
        // Now that we know we'll need to go on-chain, we'll examine all of our
1888
        // active outgoing HTLC's to see if we either need to: sweep them after
1889
        // a timeout (then cancel backwards), cancel them backwards
1890
        // immediately, or watch them as they're still active contracts.
1891
        for _, htlc := range htlcs.outgoingHTLCs {
35✔
1892
                switch {
7✔
1893
                // If the HTLC is dust, then we can cancel it backwards
1894
                // immediately as there's no matching contract to arbitrate
1895
                // on-chain. We know the HTLC is dust, if the OutputIndex
1896
                // negative.
1897
                case htlc.OutputIndex < 0:
2✔
1898
                        log.Tracef("ChannelArbitrator(%v): immediately "+
2✔
1899
                                "failing dust htlc=%x", c.cfg.ChanPoint,
2✔
1900
                                htlc.RHash[:])
2✔
1901

2✔
1902
                        actionMap[HtlcFailDustAction] = append(
2✔
1903
                                actionMap[HtlcFailDustAction], htlc,
2✔
1904
                        )
2✔
1905

1906
                // If we don't need to immediately act on this HTLC, then we'll
1907
                // mark it still "live". After we broadcast, we'll monitor it
1908
                // until the HTLC times out to see if we can also redeem it
1909
                // on-chain.
1910
                case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
1911
                        height,
1912
                ):
5✔
1913
                        // TODO(roasbeef): also need to be able to query
5✔
1914
                        // circuit map to see if HTLC hasn't been fully
5✔
1915
                        // resolved
5✔
1916
                        //
5✔
1917
                        //  * can't fail incoming until if outgoing not yet
5✔
1918
                        //  failed
5✔
1919

5✔
1920
                        log.Tracef("ChannelArbitrator(%v): watching chain to "+
5✔
1921
                                "decide action for outgoing htlc=%x",
5✔
1922
                                c.cfg.ChanPoint, htlc.RHash[:])
5✔
1923

5✔
1924
                        actionMap[HtlcOutgoingWatchAction] = append(
5✔
1925
                                actionMap[HtlcOutgoingWatchAction], htlc,
5✔
1926
                        )
5✔
1927

1928
                // Otherwise, we'll update our actionMap to mark that we need
1929
                // to sweep this HTLC on-chain
1930
                default:
×
1931
                        log.Tracef("ChannelArbitrator(%v): going on-chain to "+
×
1932
                                "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
×
1933

×
1934
                        actionMap[HtlcTimeoutAction] = append(
×
1935
                                actionMap[HtlcTimeoutAction], htlc,
×
1936
                        )
×
1937
                }
1938
        }
1939

1940
        // Similarly, for each incoming HTLC, now that we need to go on-chain,
1941
        // we'll either: sweep it immediately if we know the pre-image, or
1942
        // observe the output on-chain if we don't In this last, case we'll
1943
        // either learn of it eventually from the outgoing HTLC, or the sender
1944
        // will timeout the HTLC.
1945
        for _, htlc := range htlcs.incomingHTLCs {
33✔
1946
                // If the HTLC is dust, there is no action to be taken.
5✔
1947
                if htlc.OutputIndex < 0 {
7✔
1948
                        log.Debugf("ChannelArbitrator(%v): no resolution "+
2✔
1949
                                "needed for incoming dust htlc=%x",
2✔
1950
                                c.cfg.ChanPoint, htlc.RHash[:])
2✔
1951

2✔
1952
                        actionMap[HtlcIncomingDustFinalAction] = append(
2✔
1953
                                actionMap[HtlcIncomingDustFinalAction], htlc,
2✔
1954
                        )
2✔
1955

2✔
1956
                        continue
2✔
1957
                }
1958

1959
                log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
3✔
1960
                        "action for incoming htlc=%x", c.cfg.ChanPoint,
3✔
1961
                        htlc.RHash[:])
3✔
1962

3✔
1963
                actionMap[HtlcIncomingWatchAction] = append(
3✔
1964
                        actionMap[HtlcIncomingWatchAction], htlc,
3✔
1965
                )
3✔
1966
        }
1967

1968
        return actionMap, nil
28✔
1969
}
1970

1971
// isPreimageAvailable returns whether the hash preimage is available in either
1972
// the preimage cache or the invoice database.
1973
func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
1974
        error) {
18✔
1975

18✔
1976
        // Start by checking the preimage cache for preimages of
18✔
1977
        // forwarded HTLCs.
18✔
1978
        _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
18✔
1979
                hash,
18✔
1980
        )
18✔
1981
        if preimageAvailable {
26✔
1982
                return true, nil
8✔
1983
        }
8✔
1984

1985
        // Then check if we have an invoice that can be settled by this HTLC.
1986
        //
1987
        // TODO(joostjager): Check that there are still more blocks remaining
1988
        // than the invoice cltv delta. We don't want to go to chain only to
1989
        // have the incoming contest resolver decide that we don't want to
1990
        // settle this invoice.
1991
        invoice, err := c.cfg.Registry.LookupInvoice(context.Background(), hash)
10✔
1992
        switch {
10✔
1993
        case err == nil:
×
1994
        case errors.Is(err, invoices.ErrInvoiceNotFound) ||
1995
                errors.Is(err, invoices.ErrNoInvoicesCreated):
10✔
1996

10✔
1997
                return false, nil
10✔
1998
        default:
×
1999
                return false, err
×
2000
        }
2001

2002
        preimageAvailable = invoice.Terms.PaymentPreimage != nil
×
2003

×
2004
        return preimageAvailable, nil
×
2005
}
2006

2007
// checkLocalChainActions is similar to checkCommitChainActions, but it also
2008
// examines the set of HTLCs on the remote party's commitment. This allows us
2009
// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
2010
// outgoing HTLCs.
2011
func (c *ChannelArbitrator) checkLocalChainActions(
2012
        height uint32, trigger transitionTrigger,
2013
        activeHTLCs map[HtlcSetKey]htlcSet,
2014
        commitsConfirmed bool) (ChainActionMap, error) {
61✔
2015

61✔
2016
        // First, we'll check our local chain actions as normal. This will only
61✔
2017
        // examine HTLCs on our local commitment (timeout or settle).
61✔
2018
        localCommitActions, err := c.checkCommitChainActions(
61✔
2019
                height, trigger, activeHTLCs[LocalHtlcSet],
61✔
2020
        )
61✔
2021
        if err != nil {
61✔
2022
                return nil, err
×
2023
        }
×
2024

2025
        // Next, we'll examine the remote commitment (and maybe a dangling one)
2026
        // to see if the set difference of our HTLCs is non-empty. If so, then
2027
        // we may need to cancel back some HTLCs if we decide go to chain.
2028
        remoteDanglingActions := c.checkRemoteDanglingActions(
61✔
2029
                height, activeHTLCs, commitsConfirmed,
61✔
2030
        )
61✔
2031

61✔
2032
        // Finally, we'll merge the two set of chain actions.
61✔
2033
        localCommitActions.Merge(remoteDanglingActions)
61✔
2034

61✔
2035
        return localCommitActions, nil
61✔
2036
}
2037

2038
// checkRemoteDanglingActions examines the set of remote commitments for any
2039
// HTLCs that are close to timing out. If we find any, then we'll return a set
2040
// of chain actions for HTLCs that are on our commitment, but not theirs to
2041
// cancel immediately.
2042
func (c *ChannelArbitrator) checkRemoteDanglingActions(
2043
        height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
2044
        commitsConfirmed bool) ChainActionMap {
61✔
2045

61✔
2046
        var (
61✔
2047
                pendingRemoteHTLCs []channeldb.HTLC
61✔
2048
                localHTLCs         = make(map[uint64]struct{})
61✔
2049
                remoteHTLCs        = make(map[uint64]channeldb.HTLC)
61✔
2050
                actionMap          = make(ChainActionMap)
61✔
2051
        )
61✔
2052

61✔
2053
        // First, we'll construct two sets of the outgoing HTLCs: those on our
61✔
2054
        // local commitment, and those that are on the remote commitment(s).
61✔
2055
        for htlcSetKey, htlcs := range activeHTLCs {
180✔
2056
                if htlcSetKey.IsRemote {
182✔
2057
                        for _, htlc := range htlcs.outgoingHTLCs {
78✔
2058
                                remoteHTLCs[htlc.HtlcIndex] = htlc
15✔
2059
                        }
15✔
2060
                } else {
56✔
2061
                        for _, htlc := range htlcs.outgoingHTLCs {
62✔
2062
                                localHTLCs[htlc.HtlcIndex] = struct{}{}
6✔
2063
                        }
6✔
2064
                }
2065
        }
2066

2067
        // With both sets constructed, we'll now compute the set difference of
2068
        // our two sets of HTLCs. This'll give us the HTLCs that exist on the
2069
        // remote commitment transaction, but not on ours.
2070
        for htlcIndex, htlc := range remoteHTLCs {
76✔
2071
                if _, ok := localHTLCs[htlcIndex]; ok {
17✔
2072
                        continue
2✔
2073
                }
2074

2075
                pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
13✔
2076
        }
2077

2078
        // Finally, we'll examine all the pending remote HTLCs for those that
2079
        // have expired. If we find any, then we'll recommend that they be
2080
        // failed now so we can free up the incoming HTLC.
2081
        for _, htlc := range pendingRemoteHTLCs {
74✔
2082
                // We'll now check if we need to go to chain in order to cancel
13✔
2083
                // the incoming HTLC.
13✔
2084
                goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
13✔
2085
                        height,
13✔
2086
                )
13✔
2087

13✔
2088
                // If we don't need to go to chain, and no commitments have
13✔
2089
                // been confirmed, then we can move on. Otherwise, if
13✔
2090
                // commitments have been confirmed, then we need to cancel back
13✔
2091
                // *all* of the pending remote HTLCS.
13✔
2092
                if !goToChain && !commitsConfirmed {
17✔
2093
                        continue
4✔
2094
                }
2095

2096
                // Dust htlcs can be canceled back even before the commitment
2097
                // transaction confirms. Dust htlcs are not enforceable onchain.
2098
                // If another version of the commit tx would confirm we either
2099
                // gain or lose those dust amounts but there is no other way
2100
                // than cancelling the incoming back because we will never learn
2101
                // the preimage.
2102
                if htlc.OutputIndex < 0 {
9✔
2103
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2104
                                "htlc=%x from local/remote commitments diff",
×
2105
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2106

×
2107
                        actionMap[HtlcFailDustAction] = append(
×
2108
                                actionMap[HtlcFailDustAction], htlc,
×
2109
                        )
×
2110

×
2111
                        continue
×
2112
                }
2113

2114
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
9✔
2115
                        "local/remote commitments diff",
9✔
2116
                        c.cfg.ChanPoint, htlc.RHash[:])
9✔
2117

9✔
2118
                actionMap[HtlcFailDanglingAction] = append(
9✔
2119
                        actionMap[HtlcFailDanglingAction], htlc,
9✔
2120
                )
9✔
2121
        }
2122

2123
        return actionMap
61✔
2124
}
2125

2126
// checkRemoteChainActions examines the two possible remote commitment chains
2127
// and returns the set of chain actions we need to carry out if the remote
2128
// commitment (non pending) confirms. The pendingConf indicates if the pending
2129
// remote commitment confirmed. This is similar to checkCommitChainActions, but
2130
// we'll immediately fail any HTLCs on the pending remote commit, but not the
2131
// remote commit (or the other way around).
2132
func (c *ChannelArbitrator) checkRemoteChainActions(
2133
        height uint32, trigger transitionTrigger,
2134
        activeHTLCs map[HtlcSetKey]htlcSet,
2135
        pendingConf bool) (ChainActionMap, error) {
8✔
2136

8✔
2137
        // First, we'll examine all the normal chain actions on the remote
8✔
2138
        // commitment that confirmed.
8✔
2139
        confHTLCs := activeHTLCs[RemoteHtlcSet]
8✔
2140
        if pendingConf {
10✔
2141
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
2✔
2142
        }
2✔
2143
        remoteCommitActions, err := c.checkCommitChainActions(
8✔
2144
                height, trigger, confHTLCs,
8✔
2145
        )
8✔
2146
        if err != nil {
8✔
2147
                return nil, err
×
2148
        }
×
2149

2150
        // With these actions computed, we'll now check the diff of the HTLCs on
2151
        // the commitments, and cancel back any that are on the pending but not
2152
        // the non-pending.
2153
        remoteDiffActions := c.checkRemoteDiffActions(
8✔
2154
                activeHTLCs, pendingConf,
8✔
2155
        )
8✔
2156

8✔
2157
        // Finally, we'll merge all the chain actions and the final set of
8✔
2158
        // chain actions.
8✔
2159
        remoteCommitActions.Merge(remoteDiffActions)
8✔
2160
        return remoteCommitActions, nil
8✔
2161
}
2162

2163
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
2164
// confirmed commit and remote pending commit for HTLCS that we need to cancel
2165
// back. If we find any HTLCs on the remote pending but not the remote, then
2166
// we'll mark them to be failed immediately.
2167
func (c *ChannelArbitrator) checkRemoteDiffActions(
2168
        activeHTLCs map[HtlcSetKey]htlcSet,
2169
        pendingConf bool) ChainActionMap {
8✔
2170

8✔
2171
        // First, we'll partition the HTLCs into those that are present on the
8✔
2172
        // confirmed commitment, and those on the dangling commitment.
8✔
2173
        confHTLCs := activeHTLCs[RemoteHtlcSet]
8✔
2174
        danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
8✔
2175
        if pendingConf {
10✔
2176
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
2✔
2177
                danglingHTLCs = activeHTLCs[RemoteHtlcSet]
2✔
2178
        }
2✔
2179

2180
        // Next, we'll create a set of all the HTLCs confirmed commitment.
2181
        remoteHtlcs := make(map[uint64]struct{})
8✔
2182
        for _, htlc := range confHTLCs.outgoingHTLCs {
10✔
2183
                remoteHtlcs[htlc.HtlcIndex] = struct{}{}
2✔
2184
        }
2✔
2185

2186
        // With the remote HTLCs assembled, we'll mark any HTLCs only on the
2187
        // remote pending commitment to be failed asap.
2188
        actionMap := make(ChainActionMap)
8✔
2189
        for _, htlc := range danglingHTLCs.outgoingHTLCs {
12✔
2190
                if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
4✔
2191
                        continue
×
2192
                }
2193

2194
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
4✔
2195
                if err != nil {
4✔
2196
                        log.Errorf("ChannelArbitrator(%v): failed to query "+
×
2197
                                "preimage for dangling htlc=%x from remote "+
×
2198
                                "commitments diff", c.cfg.ChanPoint,
×
2199
                                htlc.RHash[:])
×
2200

×
2201
                        continue
×
2202
                }
2203

2204
                if preimageAvailable {
4✔
2205
                        continue
×
2206
                }
2207

2208
                // Dust HTLCs on the remote commitment can be failed back.
2209
                if htlc.OutputIndex < 0 {
4✔
2210
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2211
                                "htlc=%x from remote commitments diff",
×
2212
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2213

×
2214
                        actionMap[HtlcFailDustAction] = append(
×
2215
                                actionMap[HtlcFailDustAction], htlc,
×
2216
                        )
×
2217

×
2218
                        continue
×
2219
                }
2220

2221
                actionMap[HtlcFailDanglingAction] = append(
4✔
2222
                        actionMap[HtlcFailDanglingAction], htlc,
4✔
2223
                )
4✔
2224

4✔
2225
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
4✔
2226
                        "remote commitments diff",
4✔
2227
                        c.cfg.ChanPoint, htlc.RHash[:])
4✔
2228
        }
2229

2230
        return actionMap
8✔
2231
}
2232

2233
// constructChainActions returns the set of actions that should be taken for
2234
// confirmed HTLCs at the specified height. Our actions will depend on the set
2235
// of HTLCs that were active across all channels at the time of channel
2236
// closure.
2237
func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
2238
        height uint32, trigger transitionTrigger) (ChainActionMap, error) {
21✔
2239

21✔
2240
        // If we've reached this point and have not confirmed commitment set,
21✔
2241
        // then this is an older node that had a pending close channel before
21✔
2242
        // the CommitSet was introduced. In this case, we'll just return the
21✔
2243
        // existing ChainActionMap they had on disk.
21✔
2244
        if confCommitSet == nil || confCommitSet.ConfCommitKey.IsNone() {
28✔
2245
                return c.log.FetchChainActions()
7✔
2246
        }
7✔
2247

2248
        // Otherwise, we have the full commitment set written to disk, and can
2249
        // proceed as normal.
2250
        htlcSets := confCommitSet.toActiveHTLCSets()
14✔
2251
        confCommitKey, err := confCommitSet.ConfCommitKey.UnwrapOrErr(
14✔
2252
                fmt.Errorf("no commitKey available"),
14✔
2253
        )
14✔
2254
        if err != nil {
14✔
2255
                return nil, err
×
2256
        }
×
2257

2258
        switch confCommitKey {
14✔
2259
        // If the local commitment transaction confirmed, then we'll examine
2260
        // that as well as their commitments to the set of chain actions.
2261
        case LocalHtlcSet:
6✔
2262
                return c.checkLocalChainActions(
6✔
2263
                        height, trigger, htlcSets, true,
6✔
2264
                )
6✔
2265

2266
        // If the remote commitment confirmed, then we'll grab all the chain
2267
        // actions for the remote commit, and check the pending commit for any
2268
        // HTLCS we need to handle immediately (dust).
2269
        case RemoteHtlcSet:
6✔
2270
                return c.checkRemoteChainActions(
6✔
2271
                        height, trigger, htlcSets, false,
6✔
2272
                )
6✔
2273

2274
        // Otherwise, the remote pending commitment confirmed, so we'll examine
2275
        // the HTLCs on that unrevoked dangling commitment.
2276
        case RemotePendingHtlcSet:
2✔
2277
                return c.checkRemoteChainActions(
2✔
2278
                        height, trigger, htlcSets, true,
2✔
2279
                )
2✔
2280
        }
2281

2282
        return nil, fmt.Errorf("unable to locate chain actions")
×
2283
}
2284

2285
// prepContractResolutions is called either in the case that we decide we need
2286
// to go to chain, or the remote party goes to chain. Given a set of actions we
2287
// need to take for each HTLC, this method will return a set of contract
2288
// resolvers that will resolve the contracts on-chain if needed, and also a set
2289
// of packets to send to the htlcswitch in order to ensure all incoming HTLC's
2290
// are properly resolved.
2291
func (c *ChannelArbitrator) prepContractResolutions(
2292
        contractResolutions *ContractResolutions, height uint32,
2293
        htlcActions ChainActionMap) ([]ContractResolver, error) {
12✔
2294

12✔
2295
        // We'll also fetch the historical state of this channel, as it should
12✔
2296
        // have been marked as closed by now, and supplement it to each resolver
12✔
2297
        // such that we can properly resolve our pending contracts.
12✔
2298
        var chanState *channeldb.OpenChannel
12✔
2299
        chanState, err := c.cfg.FetchHistoricalChannel()
12✔
2300
        switch {
12✔
2301
        // If we don't find this channel, then it may be the case that it
2302
        // was closed before we started to retain the final state
2303
        // information for open channels.
2304
        case err == channeldb.ErrNoHistoricalBucket:
×
2305
                fallthrough
×
2306
        case err == channeldb.ErrChannelNotFound:
×
2307
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
2308
                        "state", c.cfg.ChanPoint)
×
2309

2310
        case err != nil:
×
2311
                return nil, err
×
2312
        }
2313

2314
        incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
12✔
2315
        outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
12✔
2316

12✔
2317
        // We'll use these two maps to quickly look up an active HTLC with its
12✔
2318
        // matching HTLC resolution.
12✔
2319
        outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution)
12✔
2320
        inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution)
12✔
2321
        for i := 0; i < len(incomingResolutions); i++ {
12✔
2322
                inRes := incomingResolutions[i]
×
2323
                inResolutionMap[inRes.HtlcPoint()] = inRes
×
2324
        }
×
2325
        for i := 0; i < len(outgoingResolutions); i++ {
13✔
2326
                outRes := outgoingResolutions[i]
1✔
2327
                outResolutionMap[outRes.HtlcPoint()] = outRes
1✔
2328
        }
1✔
2329

2330
        // We'll create the resolver kit that we'll be cloning for each
2331
        // resolver so they each can do their duty.
2332
        resolverCfg := ResolverConfig{
12✔
2333
                ChannelArbitratorConfig: c.cfg,
12✔
2334
                Checkpoint: func(res ContractResolver,
12✔
2335
                        reports ...*channeldb.ResolverReport) error {
14✔
2336

2✔
2337
                        return c.log.InsertUnresolvedContracts(reports, res)
2✔
2338
                },
2✔
2339
        }
2340

2341
        commitHash := contractResolutions.CommitHash
12✔
2342

12✔
2343
        var htlcResolvers []ContractResolver
12✔
2344

12✔
2345
        // We instantiate an anchor resolver if the commitment tx has an
12✔
2346
        // anchor.
12✔
2347
        if contractResolutions.AnchorResolution != nil {
14✔
2348
                anchorResolver := newAnchorResolver(
2✔
2349
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
2✔
2350
                        contractResolutions.AnchorResolution.CommitAnchor,
2✔
2351
                        height, c.cfg.ChanPoint, resolverCfg,
2✔
2352
                )
2✔
2353
                anchorResolver.SupplementState(chanState)
2✔
2354

2✔
2355
                htlcResolvers = append(htlcResolvers, anchorResolver)
2✔
2356
        }
2✔
2357

2358
        // If this is a breach close, we'll create a breach resolver, determine
2359
        // the htlc's to fail back, and exit. This is done because the other
2360
        // steps taken for non-breach-closes do not matter for breach-closes.
2361
        if contractResolutions.BreachResolution != nil {
14✔
2362
                breachResolver := newBreachResolver(resolverCfg)
2✔
2363
                htlcResolvers = append(htlcResolvers, breachResolver)
2✔
2364

2✔
2365
                return htlcResolvers, nil
2✔
2366
        }
2✔
2367

2368
        // For each HTLC, we'll either act immediately, meaning we'll instantly
2369
        // fail the HTLC, or we'll act only once the transaction has been
2370
        // confirmed, in which case we'll need an HTLC resolver.
2371
        for htlcAction, htlcs := range htlcActions {
21✔
2372
                switch htlcAction {
11✔
2373
                // If we can claim this HTLC, we'll create an HTLC resolver to
2374
                // claim the HTLC (second-level or directly), then add the pre
2375
                case HtlcClaimAction:
×
2376
                        for _, htlc := range htlcs {
×
2377
                                htlc := htlc
×
2378

×
2379
                                htlcOp := wire.OutPoint{
×
2380
                                        Hash:  commitHash,
×
2381
                                        Index: uint32(htlc.OutputIndex),
×
2382
                                }
×
2383

×
2384
                                resolution, ok := inResolutionMap[htlcOp]
×
2385
                                if !ok {
×
2386
                                        // TODO(roasbeef): panic?
×
2387
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2388
                                                "incoming resolution: %v",
×
2389
                                                c.cfg.ChanPoint, htlcOp)
×
2390
                                        continue
×
2391
                                }
2392

2393
                                resolver := newSuccessResolver(
×
2394
                                        resolution, height, htlc, resolverCfg,
×
2395
                                )
×
2396
                                if chanState != nil {
×
2397
                                        resolver.SupplementState(chanState)
×
2398
                                }
×
2399
                                htlcResolvers = append(htlcResolvers, resolver)
×
2400
                        }
2401

2402
                // If we can timeout the HTLC directly, then we'll create the
2403
                // proper resolver to do so, who will then cancel the packet
2404
                // backwards.
2405
                case HtlcTimeoutAction:
×
2406
                        for _, htlc := range htlcs {
×
2407
                                htlc := htlc
×
2408

×
2409
                                htlcOp := wire.OutPoint{
×
2410
                                        Hash:  commitHash,
×
2411
                                        Index: uint32(htlc.OutputIndex),
×
2412
                                }
×
2413

×
2414
                                resolution, ok := outResolutionMap[htlcOp]
×
2415
                                if !ok {
×
2416
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2417
                                                "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
×
2418
                                        continue
×
2419
                                }
2420

2421
                                resolver := newTimeoutResolver(
×
2422
                                        resolution, height, htlc, resolverCfg,
×
2423
                                )
×
2424
                                if chanState != nil {
×
2425
                                        resolver.SupplementState(chanState)
×
2426
                                }
×
2427

2428
                                // For outgoing HTLCs, we will also need to
2429
                                // supplement the resolver with the expiry
2430
                                // block height of its corresponding incoming
2431
                                // HTLC.
2432
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
×
2433
                                resolver.SupplementDeadline(deadline)
×
2434

×
2435
                                htlcResolvers = append(htlcResolvers, resolver)
×
2436
                        }
2437

2438
                // If this is an incoming HTLC, but we can't act yet, then
2439
                // we'll create an incoming resolver to redeem the HTLC if we
2440
                // learn of the pre-image, or let the remote party time out.
2441
                case HtlcIncomingWatchAction:
×
2442
                        for _, htlc := range htlcs {
×
2443
                                htlc := htlc
×
2444

×
2445
                                htlcOp := wire.OutPoint{
×
2446
                                        Hash:  commitHash,
×
2447
                                        Index: uint32(htlc.OutputIndex),
×
2448
                                }
×
2449

×
2450
                                // TODO(roasbeef): need to handle incoming dust...
×
2451

×
2452
                                // TODO(roasbeef): can't be negative!!!
×
2453
                                resolution, ok := inResolutionMap[htlcOp]
×
2454
                                if !ok {
×
2455
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2456
                                                "incoming resolution: %v",
×
2457
                                                c.cfg.ChanPoint, htlcOp)
×
2458
                                        continue
×
2459
                                }
2460

2461
                                resolver := newIncomingContestResolver(
×
2462
                                        resolution, height, htlc,
×
2463
                                        resolverCfg,
×
2464
                                )
×
2465
                                if chanState != nil {
×
2466
                                        resolver.SupplementState(chanState)
×
2467
                                }
×
2468
                                htlcResolvers = append(htlcResolvers, resolver)
×
2469
                        }
2470

2471
                // Finally, if this is an outgoing HTLC we've sent, then we'll
2472
                // launch a resolver to watch for the pre-image (and settle
2473
                // backwards), or just timeout.
2474
                case HtlcOutgoingWatchAction:
1✔
2475
                        for _, htlc := range htlcs {
2✔
2476
                                htlc := htlc
1✔
2477

1✔
2478
                                htlcOp := wire.OutPoint{
1✔
2479
                                        Hash:  commitHash,
1✔
2480
                                        Index: uint32(htlc.OutputIndex),
1✔
2481
                                }
1✔
2482

1✔
2483
                                resolution, ok := outResolutionMap[htlcOp]
1✔
2484
                                if !ok {
1✔
2485
                                        log.Errorf("ChannelArbitrator(%v) "+
×
2486
                                                "unable to find outgoing "+
×
2487
                                                "resolution: %v",
×
2488
                                                c.cfg.ChanPoint, htlcOp)
×
2489

×
2490
                                        continue
×
2491
                                }
2492

2493
                                resolver := newOutgoingContestResolver(
1✔
2494
                                        resolution, height, htlc, resolverCfg,
1✔
2495
                                )
1✔
2496
                                if chanState != nil {
2✔
2497
                                        resolver.SupplementState(chanState)
1✔
2498
                                }
1✔
2499

2500
                                // For outgoing HTLCs, we will also need to
2501
                                // supplement the resolver with the expiry
2502
                                // block height of its corresponding incoming
2503
                                // HTLC.
2504
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
1✔
2505
                                resolver.SupplementDeadline(deadline)
1✔
2506

1✔
2507
                                htlcResolvers = append(htlcResolvers, resolver)
1✔
2508
                        }
2509
                }
2510
        }
2511

2512
        // If this is was an unilateral closure, then we'll also create a
2513
        // resolver to sweep our commitment output (but only if it wasn't
2514
        // trimmed).
2515
        if contractResolutions.CommitResolution != nil {
10✔
2516
                resolver := newCommitSweepResolver(
×
2517
                        *contractResolutions.CommitResolution, height,
×
2518
                        c.cfg.ChanPoint, resolverCfg,
×
2519
                )
×
2520
                if chanState != nil {
×
2521
                        resolver.SupplementState(chanState)
×
2522
                }
×
2523
                htlcResolvers = append(htlcResolvers, resolver)
×
2524
        }
2525

2526
        return htlcResolvers, nil
10✔
2527
}
2528

2529
// replaceResolver replaces a in the list of active resolvers. If the resolver
2530
// to be replaced is not found, it returns an error.
2531
func (c *ChannelArbitrator) replaceResolver(oldResolver,
2532
        newResolver ContractResolver) error {
1✔
2533

1✔
2534
        c.activeResolversLock.Lock()
1✔
2535
        defer c.activeResolversLock.Unlock()
1✔
2536

1✔
2537
        oldKey := oldResolver.ResolverKey()
1✔
2538
        for i, r := range c.activeResolvers {
2✔
2539
                if bytes.Equal(r.ResolverKey(), oldKey) {
2✔
2540
                        c.activeResolvers[i] = newResolver
1✔
2541
                        return nil
1✔
2542
                }
1✔
2543
        }
2544

2545
        return errors.New("resolver to be replaced not found")
×
2546
}
2547

2548
// resolveContract is a goroutine tasked with fully resolving an unresolved
2549
// contract. Either the initial contract will be resolved after a single step,
2550
// or the contract will itself create another contract to be resolved. In
2551
// either case, one the contract has been fully resolved, we'll signal back to
2552
// the main goroutine so it can properly keep track of the set of unresolved
2553
// contracts.
2554
//
2555
// NOTE: This MUST be run as a goroutine.
2556
func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
2557
        immediate bool) {
6✔
2558

6✔
2559
        defer c.wg.Done()
6✔
2560

6✔
2561
        log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
6✔
2562
                c.cfg.ChanPoint, currentContract)
6✔
2563

6✔
2564
        // Until the contract is fully resolved, we'll continue to iteratively
6✔
2565
        // resolve the contract one step at a time.
6✔
2566
        for !currentContract.IsResolved() {
13✔
2567
                log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
7✔
2568
                        c.cfg.ChanPoint, currentContract)
7✔
2569

7✔
2570
                select {
7✔
2571

2572
                // If we've been signalled to quit, then we'll exit early.
2573
                case <-c.quit:
×
2574
                        return
×
2575

2576
                default:
7✔
2577
                        // Otherwise, we'll attempt to resolve the current
7✔
2578
                        // contract.
7✔
2579
                        nextContract, err := currentContract.Resolve(immediate)
7✔
2580
                        if err != nil {
8✔
2581
                                if err == errResolverShuttingDown {
1✔
2582
                                        return
×
2583
                                }
×
2584

2585
                                log.Errorf("ChannelArbitrator(%v): unable to "+
1✔
2586
                                        "progress %T: %v",
1✔
2587
                                        c.cfg.ChanPoint, currentContract, err)
1✔
2588
                                return
1✔
2589
                        }
2590

2591
                        switch {
6✔
2592
                        // If this contract produced another, then this means
2593
                        // the current contract was only able to be partially
2594
                        // resolved in this step. So we'll do a contract swap
2595
                        // within our logs: the new contract will take the
2596
                        // place of the old one.
2597
                        case nextContract != nil:
1✔
2598
                                log.Debugf("ChannelArbitrator(%v): swapping "+
1✔
2599
                                        "out contract %T for %T ",
1✔
2600
                                        c.cfg.ChanPoint, currentContract,
1✔
2601
                                        nextContract)
1✔
2602

1✔
2603
                                // Swap contract in log.
1✔
2604
                                err := c.log.SwapContract(
1✔
2605
                                        currentContract, nextContract,
1✔
2606
                                )
1✔
2607
                                if err != nil {
1✔
2608
                                        log.Errorf("unable to add recurse "+
×
2609
                                                "contract: %v", err)
×
2610
                                }
×
2611

2612
                                // Swap contract in resolvers list. This is to
2613
                                // make sure that reports are queried from the
2614
                                // new resolver.
2615
                                err = c.replaceResolver(
1✔
2616
                                        currentContract, nextContract,
1✔
2617
                                )
1✔
2618
                                if err != nil {
1✔
2619
                                        log.Errorf("unable to replace "+
×
2620
                                                "contract: %v", err)
×
2621
                                }
×
2622

2623
                                // As this contract produced another, we'll
2624
                                // re-assign, so we can continue our resolution
2625
                                // loop.
2626
                                currentContract = nextContract
1✔
2627

2628
                        // If this contract is actually fully resolved, then
2629
                        // we'll mark it as such within the database.
2630
                        case currentContract.IsResolved():
5✔
2631
                                log.Debugf("ChannelArbitrator(%v): marking "+
5✔
2632
                                        "contract %T fully resolved",
5✔
2633
                                        c.cfg.ChanPoint, currentContract)
5✔
2634

5✔
2635
                                err := c.log.ResolveContract(currentContract)
5✔
2636
                                if err != nil {
5✔
2637
                                        log.Errorf("unable to resolve contract: %v",
×
2638
                                                err)
×
2639
                                }
×
2640

2641
                                // Now that the contract has been resolved,
2642
                                // well signal to the main goroutine.
2643
                                select {
5✔
2644
                                case c.resolutionSignal <- struct{}{}:
4✔
2645
                                case <-c.quit:
1✔
2646
                                        return
1✔
2647
                                }
2648
                        }
2649

2650
                }
2651
        }
2652
}
2653

2654
// signalUpdateMsg is a struct that carries fresh signals to the
2655
// ChannelArbitrator. We need to receive a message like this each time the
2656
// channel becomes active, as it's internal state may change.
2657
type signalUpdateMsg struct {
2658
        // newSignals is the set of new active signals to be sent to the
2659
        // arbitrator.
2660
        newSignals *ContractSignals
2661

2662
        // doneChan is a channel that will be closed on the arbitrator has
2663
        // attached the new signals.
2664
        doneChan chan struct{}
2665
}
2666

2667
// UpdateContractSignals updates the set of signals the ChannelArbitrator needs
2668
// to receive from a channel in real-time in order to keep in sync with the
2669
// latest state of the contract.
2670
func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) {
11✔
2671
        done := make(chan struct{})
11✔
2672

11✔
2673
        select {
11✔
2674
        case c.signalUpdates <- &signalUpdateMsg{
2675
                newSignals: newSignals,
2676
                doneChan:   done,
2677
        }:
11✔
2678
        case <-c.quit:
×
2679
        }
2680

2681
        select {
11✔
2682
        case <-done:
11✔
2683
        case <-c.quit:
×
2684
        }
2685
}
2686

2687
// notifyContractUpdate updates the ChannelArbitrator's unmerged mappings such
2688
// that it can later be merged with activeHTLCs when calling
2689
// checkLocalChainActions or sweepAnchors. These are the only two places that
2690
// activeHTLCs is used.
2691
func (c *ChannelArbitrator) notifyContractUpdate(upd *ContractUpdate) {
12✔
2692
        c.unmergedMtx.Lock()
12✔
2693
        defer c.unmergedMtx.Unlock()
12✔
2694

12✔
2695
        // Update the mapping.
12✔
2696
        c.unmergedSet[upd.HtlcKey] = newHtlcSet(upd.Htlcs)
12✔
2697

12✔
2698
        log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
12✔
2699
                c.cfg.ChanPoint, lnutils.SpewLogClosure(upd))
12✔
2700
}
12✔
2701

2702
// updateActiveHTLCs merges the unmerged set of HTLCs from the link with
2703
// activeHTLCs.
2704
func (c *ChannelArbitrator) updateActiveHTLCs() {
74✔
2705
        c.unmergedMtx.RLock()
74✔
2706
        defer c.unmergedMtx.RUnlock()
74✔
2707

74✔
2708
        // Update the mapping.
74✔
2709
        c.activeHTLCs[LocalHtlcSet] = c.unmergedSet[LocalHtlcSet]
74✔
2710
        c.activeHTLCs[RemoteHtlcSet] = c.unmergedSet[RemoteHtlcSet]
74✔
2711

74✔
2712
        // If the pending set exists, update that as well.
74✔
2713
        if _, ok := c.unmergedSet[RemotePendingHtlcSet]; ok {
83✔
2714
                pendingSet := c.unmergedSet[RemotePendingHtlcSet]
9✔
2715
                c.activeHTLCs[RemotePendingHtlcSet] = pendingSet
9✔
2716
        }
9✔
2717
}
2718

2719
// channelAttendant is the primary goroutine that acts at the judicial
2720
// arbitrator between our channel state, the remote channel peer, and the
2721
// blockchain (Our judge). This goroutine will ensure that we faithfully execute
2722
// all clauses of our contract in the case that we need to go on-chain for a
2723
// dispute. Currently, two such conditions warrant our intervention: when an
2724
// outgoing HTLC is about to timeout, and when we know the pre-image for an
2725
// incoming HTLC, but it hasn't yet been settled off-chain. In these cases,
2726
// we'll: broadcast our commitment, cancel/settle any HTLC's backwards after
2727
// sufficient confirmation, and finally send our set of outputs to the UTXO
2728
// Nursery for incubation, and ultimate sweeping.
2729
//
2730
// NOTE: This MUST be run as a goroutine.
2731
//
2732
//nolint:funlen
2733
func (c *ChannelArbitrator) channelAttendant(bestHeight int32,
2734
        commitSet *CommitSet) {
48✔
2735

48✔
2736
        // TODO(roasbeef): tell top chain arb we're done
48✔
2737
        defer func() {
93✔
2738
                c.wg.Done()
45✔
2739
        }()
45✔
2740

2741
        err := c.progressStateMachineAfterRestart(bestHeight, commitSet)
48✔
2742
        if err != nil {
49✔
2743
                // In case of an error, we return early but we do not shutdown
1✔
2744
                // LND, because there might be other channels that still can be
1✔
2745
                // resolved and we don't want to interfere with that.
1✔
2746
                // We continue to run the channel attendant in case the channel
1✔
2747
                // closes via other means for example the remote pary force
1✔
2748
                // closes the channel. So we log the error and continue.
1✔
2749
                log.Errorf("Unable to progress state machine after "+
1✔
2750
                        "restart: %v", err)
1✔
2751
        }
1✔
2752

2753
        for {
154✔
2754
                select {
106✔
2755

2756
                // A new block has arrived, we'll examine all the active HTLC's
2757
                // to see if any of them have expired, and also update our
2758
                // track of the best current height.
2759
                case blockHeight, ok := <-c.blocks:
24✔
2760
                        if !ok {
33✔
2761
                                return
9✔
2762
                        }
9✔
2763
                        bestHeight = blockHeight
15✔
2764

15✔
2765
                        // If we're not in the default state, then we can
15✔
2766
                        // ignore this signal as we're waiting for contract
15✔
2767
                        // resolution.
15✔
2768
                        if c.state != StateDefault {
24✔
2769
                                continue
9✔
2770
                        }
2771

2772
                        // Now that a new block has arrived, we'll attempt to
2773
                        // advance our state forward.
2774
                        nextState, _, err := c.advanceState(
6✔
2775
                                uint32(bestHeight), chainTrigger, nil,
6✔
2776
                        )
6✔
2777
                        if err != nil {
6✔
2778
                                log.Errorf("Unable to advance state: %v", err)
×
2779
                        }
×
2780

2781
                        // If as a result of this trigger, the contract is
2782
                        // fully resolved, then well exit.
2783
                        if nextState == StateFullyResolved {
6✔
2784
                                return
×
2785
                        }
×
2786

2787
                // A new signal update was just sent. This indicates that the
2788
                // channel under watch is now live, and may modify its internal
2789
                // state, so we'll get the most up to date signals to we can
2790
                // properly do our job.
2791
                case signalUpdate := <-c.signalUpdates:
11✔
2792
                        log.Tracef("ChannelArbitrator(%v): got new signal "+
11✔
2793
                                "update!", c.cfg.ChanPoint)
11✔
2794

11✔
2795
                        // We'll update the ShortChannelID.
11✔
2796
                        c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
11✔
2797

11✔
2798
                        // Now that the signal has been updated, we'll now
11✔
2799
                        // close the done channel to signal to the caller we've
11✔
2800
                        // registered the new ShortChannelID.
11✔
2801
                        close(signalUpdate.doneChan)
11✔
2802

2803
                // We've cooperatively closed the channel, so we're no longer
2804
                // needed. We'll mark the channel as resolved and exit.
2805
                case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
2✔
2806
                        log.Infof("ChannelArbitrator(%v) marking channel "+
2✔
2807
                                "cooperatively closed", c.cfg.ChanPoint)
2✔
2808

2✔
2809
                        err := c.cfg.MarkChannelClosed(
2✔
2810
                                closeInfo.ChannelCloseSummary,
2✔
2811
                                channeldb.ChanStatusCoopBroadcasted,
2✔
2812
                        )
2✔
2813
                        if err != nil {
2✔
2814
                                log.Errorf("Unable to mark channel closed: "+
×
2815
                                        "%v", err)
×
2816
                                return
×
2817
                        }
×
2818

2819
                        // We'll now advance our state machine until it reaches
2820
                        // a terminal state, and the channel is marked resolved.
2821
                        _, _, err = c.advanceState(
2✔
2822
                                closeInfo.CloseHeight, coopCloseTrigger, nil,
2✔
2823
                        )
2✔
2824
                        if err != nil {
3✔
2825
                                log.Errorf("Unable to advance state: %v", err)
1✔
2826
                                return
1✔
2827
                        }
1✔
2828

2829
                // We have broadcasted our commitment, and it is now confirmed
2830
                // on-chain.
2831
                case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
12✔
2832
                        log.Infof("ChannelArbitrator(%v): local on-chain "+
12✔
2833
                                "channel close", c.cfg.ChanPoint)
12✔
2834

12✔
2835
                        if c.state != StateCommitmentBroadcasted {
13✔
2836
                                log.Errorf("ChannelArbitrator(%v): unexpected "+
1✔
2837
                                        "local on-chain channel close",
1✔
2838
                                        c.cfg.ChanPoint)
1✔
2839
                        }
1✔
2840
                        closeTx := closeInfo.CloseTx
12✔
2841

12✔
2842
                        resolutions, err := closeInfo.ContractResolutions.
12✔
2843
                                UnwrapOrErr(
12✔
2844
                                        fmt.Errorf("resolutions not found"),
12✔
2845
                                )
12✔
2846
                        if err != nil {
12✔
2847
                                log.Errorf("ChannelArbitrator(%v): unable to "+
×
2848
                                        "get resolutions: %v", c.cfg.ChanPoint,
×
2849
                                        err)
×
2850

×
2851
                                return
×
2852
                        }
×
2853

2854
                        // We make sure that the htlc resolutions are present
2855
                        // otherwise we would panic dereferencing the pointer.
2856
                        //
2857
                        // TODO(ziggie): Refactor ContractResolutions to use
2858
                        // options.
2859
                        if resolutions.HtlcResolutions == nil {
12✔
2860
                                log.Errorf("ChannelArbitrator(%v): htlc "+
×
2861
                                        "resolutions not found",
×
2862
                                        c.cfg.ChanPoint)
×
2863

×
2864
                                return
×
2865
                        }
×
2866

2867
                        contractRes := &ContractResolutions{
12✔
2868
                                CommitHash:       closeTx.TxHash(),
12✔
2869
                                CommitResolution: resolutions.CommitResolution,
12✔
2870
                                HtlcResolutions:  *resolutions.HtlcResolutions,
12✔
2871
                                AnchorResolution: resolutions.AnchorResolution,
12✔
2872
                        }
12✔
2873

12✔
2874
                        // When processing a unilateral close event, we'll
12✔
2875
                        // transition to the ContractClosed state. We'll log
12✔
2876
                        // out the set of resolutions such that they are
12✔
2877
                        // available to fetch in that state, we'll also write
12✔
2878
                        // the commit set so we can reconstruct our chain
12✔
2879
                        // actions on restart.
12✔
2880
                        err = c.log.LogContractResolutions(contractRes)
12✔
2881
                        if err != nil {
12✔
2882
                                log.Errorf("Unable to write resolutions: %v",
×
2883
                                        err)
×
2884
                                return
×
2885
                        }
×
2886
                        err = c.log.InsertConfirmedCommitSet(
12✔
2887
                                &closeInfo.CommitSet,
12✔
2888
                        )
12✔
2889
                        if err != nil {
12✔
2890
                                log.Errorf("Unable to write commit set: %v",
×
2891
                                        err)
×
2892
                                return
×
2893
                        }
×
2894

2895
                        // After the set of resolutions are successfully
2896
                        // logged, we can safely close the channel. After this
2897
                        // succeeds we won't be getting chain events anymore,
2898
                        // so we must make sure we can recover on restart after
2899
                        // it is marked closed. If the next state transition
2900
                        // fails, we'll start up in the prior state again, and
2901
                        // we won't be longer getting chain events. In this
2902
                        // case we must manually re-trigger the state
2903
                        // transition into StateContractClosed based on the
2904
                        // close status of the channel.
2905
                        err = c.cfg.MarkChannelClosed(
12✔
2906
                                closeInfo.ChannelCloseSummary,
12✔
2907
                                channeldb.ChanStatusLocalCloseInitiator,
12✔
2908
                        )
12✔
2909
                        if err != nil {
12✔
2910
                                log.Errorf("Unable to mark "+
×
2911
                                        "channel closed: %v", err)
×
2912
                                return
×
2913
                        }
×
2914

2915
                        // We'll now advance our state machine until it reaches
2916
                        // a terminal state.
2917
                        _, _, err = c.advanceState(
12✔
2918
                                uint32(closeInfo.SpendingHeight),
12✔
2919
                                localCloseTrigger, &closeInfo.CommitSet,
12✔
2920
                        )
12✔
2921
                        if err != nil {
13✔
2922
                                log.Errorf("Unable to advance state: %v", err)
1✔
2923
                        }
1✔
2924

2925
                // The remote party has broadcast the commitment on-chain.
2926
                // We'll examine our state to determine if we need to act at
2927
                // all.
2928
                case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
8✔
2929
                        log.Infof("ChannelArbitrator(%v): remote party has "+
8✔
2930
                                "closed channel out on-chain", c.cfg.ChanPoint)
8✔
2931

8✔
2932
                        // If we don't have a self output, and there are no
8✔
2933
                        // active HTLC's, then we can immediately mark the
8✔
2934
                        // contract as fully resolved and exit.
8✔
2935
                        contractRes := &ContractResolutions{
8✔
2936
                                CommitHash:       *uniClosure.SpenderTxHash,
8✔
2937
                                CommitResolution: uniClosure.CommitResolution,
8✔
2938
                                HtlcResolutions:  *uniClosure.HtlcResolutions,
8✔
2939
                                AnchorResolution: uniClosure.AnchorResolution,
8✔
2940
                        }
8✔
2941

8✔
2942
                        // When processing a unilateral close event, we'll
8✔
2943
                        // transition to the ContractClosed state. We'll log
8✔
2944
                        // out the set of resolutions such that they are
8✔
2945
                        // available to fetch in that state, we'll also write
8✔
2946
                        // the commit set so we can reconstruct our chain
8✔
2947
                        // actions on restart.
8✔
2948
                        err := c.log.LogContractResolutions(contractRes)
8✔
2949
                        if err != nil {
9✔
2950
                                log.Errorf("Unable to write resolutions: %v",
1✔
2951
                                        err)
1✔
2952
                                return
1✔
2953
                        }
1✔
2954
                        err = c.log.InsertConfirmedCommitSet(
7✔
2955
                                &uniClosure.CommitSet,
7✔
2956
                        )
7✔
2957
                        if err != nil {
7✔
2958
                                log.Errorf("Unable to write commit set: %v",
×
2959
                                        err)
×
2960
                                return
×
2961
                        }
×
2962

2963
                        // After the set of resolutions are successfully
2964
                        // logged, we can safely close the channel. After this
2965
                        // succeeds we won't be getting chain events anymore,
2966
                        // so we must make sure we can recover on restart after
2967
                        // it is marked closed. If the next state transition
2968
                        // fails, we'll start up in the prior state again, and
2969
                        // we won't be longer getting chain events. In this
2970
                        // case we must manually re-trigger the state
2971
                        // transition into StateContractClosed based on the
2972
                        // close status of the channel.
2973
                        closeSummary := &uniClosure.ChannelCloseSummary
7✔
2974
                        err = c.cfg.MarkChannelClosed(
7✔
2975
                                closeSummary,
7✔
2976
                                channeldb.ChanStatusRemoteCloseInitiator,
7✔
2977
                        )
7✔
2978
                        if err != nil {
8✔
2979
                                log.Errorf("Unable to mark channel closed: %v",
1✔
2980
                                        err)
1✔
2981
                                return
1✔
2982
                        }
1✔
2983

2984
                        // We'll now advance our state machine until it reaches
2985
                        // a terminal state.
2986
                        _, _, err = c.advanceState(
6✔
2987
                                uint32(uniClosure.SpendingHeight),
6✔
2988
                                remoteCloseTrigger, &uniClosure.CommitSet,
6✔
2989
                        )
6✔
2990
                        if err != nil {
8✔
2991
                                log.Errorf("Unable to advance state: %v", err)
2✔
2992
                        }
2✔
2993

2994
                // The remote has breached the channel. As this is handled by
2995
                // the ChainWatcher and BreachArbitrator, we don't have to do
2996
                // anything in particular, so just advance our state and
2997
                // gracefully exit.
2998
                case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
1✔
2999
                        log.Infof("ChannelArbitrator(%v): remote party has "+
1✔
3000
                                "breached channel!", c.cfg.ChanPoint)
1✔
3001

1✔
3002
                        // In the breach case, we'll only have anchor and
1✔
3003
                        // breach resolutions.
1✔
3004
                        contractRes := &ContractResolutions{
1✔
3005
                                CommitHash:       breachInfo.CommitHash,
1✔
3006
                                BreachResolution: breachInfo.BreachResolution,
1✔
3007
                                AnchorResolution: breachInfo.AnchorResolution,
1✔
3008
                        }
1✔
3009

1✔
3010
                        // We'll transition to the ContractClosed state and log
1✔
3011
                        // the set of resolutions such that they can be turned
1✔
3012
                        // into resolvers later on. We'll also insert the
1✔
3013
                        // CommitSet of the latest set of commitments.
1✔
3014
                        err := c.log.LogContractResolutions(contractRes)
1✔
3015
                        if err != nil {
1✔
3016
                                log.Errorf("Unable to write resolutions: %v",
×
3017
                                        err)
×
3018
                                return
×
3019
                        }
×
3020
                        err = c.log.InsertConfirmedCommitSet(
1✔
3021
                                &breachInfo.CommitSet,
1✔
3022
                        )
1✔
3023
                        if err != nil {
1✔
3024
                                log.Errorf("Unable to write commit set: %v",
×
3025
                                        err)
×
3026
                                return
×
3027
                        }
×
3028

3029
                        // The channel is finally marked pending closed here as
3030
                        // the BreachArbitrator and channel arbitrator have
3031
                        // persisted the relevant states.
3032
                        closeSummary := &breachInfo.CloseSummary
1✔
3033
                        err = c.cfg.MarkChannelClosed(
1✔
3034
                                closeSummary,
1✔
3035
                                channeldb.ChanStatusRemoteCloseInitiator,
1✔
3036
                        )
1✔
3037
                        if err != nil {
1✔
3038
                                log.Errorf("Unable to mark channel closed: %v",
×
3039
                                        err)
×
3040
                                return
×
3041
                        }
×
3042

3043
                        log.Infof("Breached channel=%v marked pending-closed",
1✔
3044
                                breachInfo.BreachResolution.FundingOutPoint)
1✔
3045

1✔
3046
                        // We'll advance our state machine until it reaches a
1✔
3047
                        // terminal state.
1✔
3048
                        _, _, err = c.advanceState(
1✔
3049
                                uint32(bestHeight), breachCloseTrigger,
1✔
3050
                                &breachInfo.CommitSet,
1✔
3051
                        )
1✔
3052
                        if err != nil {
1✔
3053
                                log.Errorf("Unable to advance state: %v", err)
×
3054
                        }
×
3055

3056
                // A new contract has just been resolved, we'll now check our
3057
                // log to see if all contracts have been resolved. If so, then
3058
                // we can exit as the contract is fully resolved.
3059
                case <-c.resolutionSignal:
4✔
3060
                        log.Infof("ChannelArbitrator(%v): a contract has been "+
4✔
3061
                                "fully resolved!", c.cfg.ChanPoint)
4✔
3062

4✔
3063
                        nextState, _, err := c.advanceState(
4✔
3064
                                uint32(bestHeight), chainTrigger, nil,
4✔
3065
                        )
4✔
3066
                        if err != nil {
4✔
3067
                                log.Errorf("Unable to advance state: %v", err)
×
3068
                        }
×
3069

3070
                        // If we don't have anything further to do after
3071
                        // advancing our state, then we'll exit.
3072
                        if nextState == StateFullyResolved {
7✔
3073
                                log.Infof("ChannelArbitrator(%v): all "+
3✔
3074
                                        "contracts fully resolved, exiting",
3✔
3075
                                        c.cfg.ChanPoint)
3✔
3076

3✔
3077
                                return
3✔
3078
                        }
3✔
3079

3080
                // We've just received a request to forcibly close out the
3081
                // channel. We'll
3082
                case closeReq := <-c.forceCloseReqs:
11✔
3083
                        log.Infof("ChannelArbitrator(%v): received force "+
11✔
3084
                                "close request", c.cfg.ChanPoint)
11✔
3085

11✔
3086
                        if c.state != StateDefault {
12✔
3087
                                select {
1✔
3088
                                case closeReq.closeTx <- nil:
1✔
3089
                                case <-c.quit:
×
3090
                                }
3091

3092
                                select {
1✔
3093
                                case closeReq.errResp <- errAlreadyForceClosed:
1✔
3094
                                case <-c.quit:
×
3095
                                }
3096

3097
                                continue
1✔
3098
                        }
3099

3100
                        nextState, closeTx, err := c.advanceState(
10✔
3101
                                uint32(bestHeight), userTrigger, nil,
10✔
3102
                        )
10✔
3103
                        if err != nil {
11✔
3104
                                log.Errorf("Unable to advance state: %v", err)
1✔
3105
                        }
1✔
3106

3107
                        select {
10✔
3108
                        case closeReq.closeTx <- closeTx:
10✔
3109
                        case <-c.quit:
×
3110
                                return
×
3111
                        }
3112

3113
                        select {
10✔
3114
                        case closeReq.errResp <- err:
10✔
3115
                        case <-c.quit:
×
3116
                                return
×
3117
                        }
3118

3119
                        // If we don't have anything further to do after
3120
                        // advancing our state, then we'll exit.
3121
                        if nextState == StateFullyResolved {
10✔
3122
                                log.Infof("ChannelArbitrator(%v): all "+
×
3123
                                        "contracts resolved, exiting",
×
3124
                                        c.cfg.ChanPoint)
×
3125
                                return
×
3126
                        }
×
3127

3128
                case <-c.quit:
30✔
3129
                        return
30✔
3130
                }
3131
        }
3132
}
3133

3134
// checkLegacyBreach returns StateFullyResolved if the channel was closed with
3135
// a breach transaction before the channel arbitrator launched its own breach
3136
// resolver. StateContractClosed is returned if this is a modern breach close
3137
// with a breach resolver. StateError is returned if the log lookup failed.
3138
func (c *ChannelArbitrator) checkLegacyBreach() (ArbitratorState, error) {
2✔
3139
        // A previous version of the channel arbitrator would make the breach
2✔
3140
        // close skip to StateFullyResolved. If there are no contract
2✔
3141
        // resolutions in the bolt arbitrator log, then this is an older breach
2✔
3142
        // close. Otherwise, if there are resolutions, the state should advance
2✔
3143
        // to StateContractClosed.
2✔
3144
        _, err := c.log.FetchContractResolutions()
2✔
3145
        if err == errNoResolutions {
2✔
3146
                // This is an older breach close still in the database.
×
3147
                return StateFullyResolved, nil
×
3148
        } else if err != nil {
2✔
3149
                return StateError, err
×
3150
        }
×
3151

3152
        // This is a modern breach close with resolvers.
3153
        return StateContractClosed, nil
2✔
3154
}
3155

3156
// sweepRequest wraps the arguments used when calling `SweepInput`.
3157
type sweepRequest struct {
3158
        // input is the input to be swept.
3159
        input input.Input
3160

3161
        // params holds the sweeping parameters.
3162
        params sweep.Params
3163
}
3164

3165
// createSweepRequest creates an anchor sweeping request for a particular
3166
// version (local/remote/remote pending) of the commitment.
3167
func (c *ChannelArbitrator) createSweepRequest(
3168
        anchor *lnwallet.AnchorResolution, htlcs htlcSet, anchorPath string,
3169
        heightHint uint32) (sweepRequest, error) {
8✔
3170

8✔
3171
        // Use the chan id as the exclusive group. This prevents any of the
8✔
3172
        // anchors from being batched together.
8✔
3173
        exclusiveGroup := c.cfg.ShortChanID.ToUint64()
8✔
3174

8✔
3175
        // Find the deadline for this specific anchor.
8✔
3176
        deadline, value, err := c.findCommitmentDeadlineAndValue(
8✔
3177
                heightHint, htlcs,
8✔
3178
        )
8✔
3179
        if err != nil {
8✔
3180
                return sweepRequest{}, err
×
3181
        }
×
3182

3183
        // If we cannot find a deadline, it means there's no HTLCs at stake,
3184
        // which means we can relax our anchor sweeping conditions as we don't
3185
        // have any time sensitive outputs to sweep. However we need to
3186
        // register the anchor output with the sweeper so we are later able to
3187
        // bump the close fee.
3188
        if deadline.IsNone() {
11✔
3189
                log.Infof("ChannelArbitrator(%v): no HTLCs at stake, "+
3✔
3190
                        "sweeping anchor with default deadline",
3✔
3191
                        c.cfg.ChanPoint)
3✔
3192
        }
3✔
3193

3194
        witnessType := input.CommitmentAnchor
8✔
3195

8✔
3196
        // For taproot channels, we need to use the proper witness type.
8✔
3197
        if txscript.IsPayToTaproot(
8✔
3198
                anchor.AnchorSignDescriptor.Output.PkScript,
8✔
3199
        ) {
8✔
3200

×
3201
                witnessType = input.TaprootAnchorSweepSpend
×
3202
        }
×
3203

3204
        // Prepare anchor output for sweeping.
3205
        anchorInput := input.MakeBaseInput(
8✔
3206
                &anchor.CommitAnchor,
8✔
3207
                witnessType,
8✔
3208
                &anchor.AnchorSignDescriptor,
8✔
3209
                heightHint,
8✔
3210
                &input.TxInfo{
8✔
3211
                        Fee:    anchor.CommitFee,
8✔
3212
                        Weight: anchor.CommitWeight,
8✔
3213
                },
8✔
3214
        )
8✔
3215

8✔
3216
        // If we have a deadline, we'll use it to calculate the deadline
8✔
3217
        // height, otherwise default to none.
8✔
3218
        deadlineDesc := "None"
8✔
3219
        deadlineHeight := fn.MapOption(func(d int32) int32 {
13✔
3220
                deadlineDesc = fmt.Sprintf("%d", d)
5✔
3221

5✔
3222
                return d + int32(heightHint)
5✔
3223
        })(deadline)
5✔
3224

3225
        // Calculate the budget based on the value under protection, which is
3226
        // the sum of all HTLCs on this commitment subtracted by their budgets.
3227
        // The anchor output in itself has a small output value of 330 sats so
3228
        // we also include it in the budget to pay for the cpfp transaction.
3229
        budget := calculateBudget(
8✔
3230
                value, c.cfg.Budget.AnchorCPFPRatio, c.cfg.Budget.AnchorCPFP,
8✔
3231
        ) + AnchorOutputValue
8✔
3232

8✔
3233
        log.Infof("ChannelArbitrator(%v): offering anchor from %s commitment "+
8✔
3234
                "%v to sweeper with deadline=%v, budget=%v", c.cfg.ChanPoint,
8✔
3235
                anchorPath, anchor.CommitAnchor, deadlineDesc, budget)
8✔
3236

8✔
3237
        // Sweep anchor output with a confirmation target fee preference.
8✔
3238
        // Because this is a cpfp-operation, the anchor will only be attempted
8✔
3239
        // to sweep when the current fee estimate for the confirmation target
8✔
3240
        // exceeds the commit fee rate.
8✔
3241
        return sweepRequest{
8✔
3242
                input: &anchorInput,
8✔
3243
                params: sweep.Params{
8✔
3244
                        ExclusiveGroup: &exclusiveGroup,
8✔
3245
                        Budget:         budget,
8✔
3246
                        DeadlineHeight: deadlineHeight,
8✔
3247
                },
8✔
3248
        }, nil
8✔
3249
}
3250

3251
// prepareAnchorSweeps creates a list of requests to be used by the sweeper for
3252
// all possible commitment versions.
3253
func (c *ChannelArbitrator) prepareAnchorSweeps(heightHint uint32,
3254
        anchors *lnwallet.AnchorResolutions) ([]sweepRequest, error) {
19✔
3255

19✔
3256
        // requests holds all the possible anchor sweep requests. We can have
19✔
3257
        // up to 3 different versions of commitments (local/remote/remote
19✔
3258
        // dangling) to be CPFPed by the anchors.
19✔
3259
        requests := make([]sweepRequest, 0, 3)
19✔
3260

19✔
3261
        // remotePendingReq holds the request for sweeping the anchor output on
19✔
3262
        // the remote pending commitment. It's only set when there's an actual
19✔
3263
        // pending remote commitment and it's used to decide whether we need to
19✔
3264
        // update the fee budget when sweeping the anchor output on the local
19✔
3265
        // commitment.
19✔
3266
        remotePendingReq := fn.None[sweepRequest]()
19✔
3267

19✔
3268
        // First we check on the remote pending commitment and optionally
19✔
3269
        // create an anchor sweeping request.
19✔
3270
        htlcs, ok := c.activeHTLCs[RemotePendingHtlcSet]
19✔
3271
        if ok && anchors.RemotePending != nil {
21✔
3272
                req, err := c.createSweepRequest(
2✔
3273
                        anchors.RemotePending, htlcs, "remote pending",
2✔
3274
                        heightHint,
2✔
3275
                )
2✔
3276
                if err != nil {
2✔
3277
                        return nil, err
×
3278
                }
×
3279

3280
                // Save the request.
3281
                requests = append(requests, req)
2✔
3282

2✔
3283
                // Set the optional variable.
2✔
3284
                remotePendingReq = fn.Some(req)
2✔
3285
        }
3286

3287
        // Check the local commitment and optionally create an anchor sweeping
3288
        // request. The params used in this request will be influenced by the
3289
        // anchor sweeping request made from the pending remote commitment.
3290
        htlcs, ok = c.activeHTLCs[LocalHtlcSet]
19✔
3291
        if ok && anchors.Local != nil {
22✔
3292
                req, err := c.createSweepRequest(
3✔
3293
                        anchors.Local, htlcs, "local", heightHint,
3✔
3294
                )
3✔
3295
                if err != nil {
3✔
3296
                        return nil, err
×
3297
                }
×
3298

3299
                // If there's an anchor sweeping request from the pending
3300
                // remote commitment, we will compare its budget against the
3301
                // budget used here and choose the params that has a larger
3302
                // budget. The deadline when choosing the remote pending budget
3303
                // instead of the local one will always be earlier or equal to
3304
                // the local deadline because outgoing HTLCs are resolved on
3305
                // the local commitment first before they are removed from the
3306
                // remote one.
3307
                remotePendingReq.WhenSome(func(s sweepRequest) {
5✔
3308
                        if s.params.Budget <= req.params.Budget {
3✔
3309
                                return
1✔
3310
                        }
1✔
3311

3312
                        log.Infof("ChannelArbitrator(%v): replaced local "+
1✔
3313
                                "anchor(%v) sweep params with pending remote "+
1✔
3314
                                "anchor sweep params, \nold:[%v], \nnew:[%v]",
1✔
3315
                                c.cfg.ChanPoint, anchors.Local.CommitAnchor,
1✔
3316
                                req.params, s.params)
1✔
3317

1✔
3318
                        req.params = s.params
1✔
3319
                })
3320

3321
                // Save the request.
3322
                requests = append(requests, req)
3✔
3323
        }
3324

3325
        // Check the remote commitment and create an anchor sweeping request if
3326
        // needed.
3327
        htlcs, ok = c.activeHTLCs[RemoteHtlcSet]
19✔
3328
        if ok && anchors.Remote != nil {
22✔
3329
                req, err := c.createSweepRequest(
3✔
3330
                        anchors.Remote, htlcs, "remote", heightHint,
3✔
3331
                )
3✔
3332
                if err != nil {
3✔
3333
                        return nil, err
×
3334
                }
×
3335

3336
                requests = append(requests, req)
3✔
3337
        }
3338

3339
        return requests, nil
19✔
3340
}
3341

3342
// failIncomingDust resolves the incoming dust HTLCs because they do not have
3343
// an output on the commitment transaction and cannot be resolved onchain. We
3344
// mark them as failed here.
3345
func (c *ChannelArbitrator) failIncomingDust(
3346
        incomingDustHTLCs []channeldb.HTLC) error {
10✔
3347

10✔
3348
        for _, htlc := range incomingDustHTLCs {
11✔
3349
                if !htlc.Incoming || htlc.OutputIndex >= 0 {
1✔
3350
                        return fmt.Errorf("htlc with index %v is not incoming "+
×
3351
                                "dust", htlc.OutputIndex)
×
3352
                }
×
3353

3354
                key := models.CircuitKey{
1✔
3355
                        ChanID: c.cfg.ShortChanID,
1✔
3356
                        HtlcID: htlc.HtlcIndex,
1✔
3357
                }
1✔
3358

1✔
3359
                // Mark this dust htlc as final failed.
1✔
3360
                chainArbCfg := c.cfg.ChainArbitratorConfig
1✔
3361
                err := chainArbCfg.PutFinalHtlcOutcome(
1✔
3362
                        key.ChanID, key.HtlcID, false,
1✔
3363
                )
1✔
3364
                if err != nil {
1✔
3365
                        return err
×
3366
                }
×
3367

3368
                // Send notification.
3369
                chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
1✔
3370
                        key,
1✔
3371
                        channeldb.FinalHtlcInfo{
1✔
3372
                                Settled:  false,
1✔
3373
                                Offchain: false,
1✔
3374
                        },
1✔
3375
                )
1✔
3376
        }
3377

3378
        return nil
10✔
3379
}
3380

3381
// abandonForwards cancels back the incoming HTLCs for their corresponding
3382
// outgoing HTLCs. We use a set here to avoid sending duplicate failure messages
3383
// for the same HTLC. This also needs to be done for locally initiated outgoing
3384
// HTLCs they are special cased in the switch.
3385
func (c *ChannelArbitrator) abandonForwards(htlcs fn.Set[uint64]) error {
13✔
3386
        log.Debugf("ChannelArbitrator(%v): cancelling back %v incoming "+
13✔
3387
                "HTLC(s)", c.cfg.ChanPoint,
13✔
3388
                len(htlcs))
13✔
3389

13✔
3390
        msgsToSend := make([]ResolutionMsg, 0, len(htlcs))
13✔
3391
        failureMsg := &lnwire.FailPermanentChannelFailure{}
13✔
3392

13✔
3393
        for idx := range htlcs {
23✔
3394
                failMsg := ResolutionMsg{
10✔
3395
                        SourceChan: c.cfg.ShortChanID,
10✔
3396
                        HtlcIndex:  idx,
10✔
3397
                        Failure:    failureMsg,
10✔
3398
                }
10✔
3399

10✔
3400
                msgsToSend = append(msgsToSend, failMsg)
10✔
3401
        }
10✔
3402

3403
        // Send the msges to the switch, if there are any.
3404
        if len(msgsToSend) == 0 {
16✔
3405
                return nil
3✔
3406
        }
3✔
3407

3408
        log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
10✔
3409
                c.cfg.ChanPoint, lnutils.SpewLogClosure(msgsToSend))
10✔
3410

10✔
3411
        err := c.cfg.DeliverResolutionMsg(msgsToSend...)
10✔
3412
        if err != nil {
10✔
3413
                log.Errorf("Unable to send resolution msges to switch: %v", err)
×
3414
                return err
×
3415
        }
×
3416

3417
        return nil
10✔
3418
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc