• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12058234999

27 Nov 2024 09:06PM UTC coverage: 57.847% (-1.1%) from 58.921%
12058234999

Pull #9148

github

ProofOfKeags
lnwire: convert DynPropose and DynCommit to use typed tlv records
Pull Request #9148: DynComms [2/n]: lnwire: add authenticated wire messages for Dyn*

142 of 177 new or added lines in 4 files covered. (80.23%)

19365 existing lines in 251 files now uncovered.

100876 of 174383 relevant lines covered (57.85%)

25338.28 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.96
/contractcourt/channel_arbitrator.go
1
package contractcourt
2

3
import (
4
        "bytes"
5
        "context"
6
        "errors"
7
        "fmt"
8
        "math"
9
        "sync"
10
        "sync/atomic"
11
        "time"
12

13
        "github.com/btcsuite/btcd/btcutil"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/txscript"
16
        "github.com/btcsuite/btcd/wire"
17
        "github.com/lightningnetwork/lnd/channeldb"
18
        "github.com/lightningnetwork/lnd/channeldb/models"
19
        "github.com/lightningnetwork/lnd/fn"
20
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
21
        "github.com/lightningnetwork/lnd/input"
22
        "github.com/lightningnetwork/lnd/invoices"
23
        "github.com/lightningnetwork/lnd/kvdb"
24
        "github.com/lightningnetwork/lnd/labels"
25
        "github.com/lightningnetwork/lnd/lntypes"
26
        "github.com/lightningnetwork/lnd/lnutils"
27
        "github.com/lightningnetwork/lnd/lnwallet"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/sweep"
30
)
31

32
var (
33
        // errAlreadyForceClosed is an error returned when we attempt to force
34
        // close a channel that's already in the process of doing so.
35
        errAlreadyForceClosed = errors.New("channel is already in the " +
36
                "process of being force closed")
37
)
38

39
const (
40
        // arbitratorBlockBufferSize is the size of the buffer we give to each
41
        // channel arbitrator.
42
        arbitratorBlockBufferSize = 20
43

44
        // AnchorOutputValue is the output value for the anchor output of an
45
        // anchor channel.
46
        // See BOLT 03 for more details:
47
        // https://github.com/lightning/bolts/blob/master/03-transactions.md
48
        AnchorOutputValue = btcutil.Amount(330)
49
)
50

51
// WitnessSubscription represents an intent to be notified once new witnesses
52
// are discovered by various active contract resolvers. A contract resolver may
53
// use this to be notified of when it can satisfy an incoming contract after we
54
// discover the witness for an outgoing contract.
55
type WitnessSubscription struct {
56
        // WitnessUpdates is a channel that newly discovered witnesses will be
57
        // sent over.
58
        //
59
        // TODO(roasbeef): couple with WitnessType?
60
        WitnessUpdates <-chan lntypes.Preimage
61

62
        // CancelSubscription is a function closure that should be used by a
63
        // client to cancel the subscription once they are no longer interested
64
        // in receiving new updates.
65
        CancelSubscription func()
66
}
67

68
// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
69
// this interface to lookup witnesses (preimages typically) of contracts
70
// they're trying to resolve, add new preimages they resolve, and finally
71
// receive new updates each new time a preimage is discovered.
72
//
73
// TODO(roasbeef): need to delete the pre-images once we've used them
74
// and have been sufficiently confirmed?
75
type WitnessBeacon interface {
76
        // SubscribeUpdates returns a channel that will be sent upon *each* time
77
        // a new preimage is discovered.
78
        SubscribeUpdates(chanID lnwire.ShortChannelID, htlc *channeldb.HTLC,
79
                payload *hop.Payload,
80
                nextHopOnionBlob []byte) (*WitnessSubscription, error)
81

82
        // LookupPreImage attempts to lookup a preimage in the global cache.
83
        // True is returned for the second argument if the preimage is found.
84
        LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool)
85

86
        // AddPreimages adds a batch of newly discovered preimages to the global
87
        // cache, and also signals any subscribers of the newly discovered
88
        // witness.
89
        AddPreimages(preimages ...lntypes.Preimage) error
90
}
91

92
// ArbChannel is an abstraction that allows the channel arbitrator to interact
93
// with an open channel.
94
type ArbChannel interface {
95
        // ForceCloseChan should force close the contract that this attendant
96
        // is watching over. We'll use this when we decide that we need to go
97
        // to chain. It should in addition tell the switch to remove the
98
        // corresponding link, such that we won't accept any new updates. The
99
        // returned summary contains all items needed to eventually resolve all
100
        // outputs on chain.
101
        ForceCloseChan() (*wire.MsgTx, error)
102

103
        // NewAnchorResolutions returns the anchor resolutions for currently
104
        // valid commitment transactions.
105
        NewAnchorResolutions() (*lnwallet.AnchorResolutions, error)
106
}
107

108
// ChannelArbitratorConfig contains all the functionality that the
109
// ChannelArbitrator needs in order to properly arbitrate any contract dispute
110
// on chain.
111
type ChannelArbitratorConfig struct {
112
        // ChanPoint is the channel point that uniquely identifies this
113
        // channel.
114
        ChanPoint wire.OutPoint
115

116
        // Channel is the full channel data structure. For legacy channels, this
117
        // field may not always be set after a restart.
118
        Channel ArbChannel
119

120
        // ShortChanID describes the exact location of the channel within the
121
        // chain. We'll use this to address any messages that we need to send
122
        // to the switch during contract resolution.
123
        ShortChanID lnwire.ShortChannelID
124

125
        // ChainEvents is an active subscription to the chain watcher for this
126
        // channel to be notified of any on-chain activity related to this
127
        // channel.
128
        ChainEvents *ChainEventSubscription
129

130
        // MarkCommitmentBroadcasted should mark the channel as the commitment
131
        // being broadcast, and we are waiting for the commitment to confirm.
132
        MarkCommitmentBroadcasted func(*wire.MsgTx, lntypes.ChannelParty) error
133

134
        // MarkChannelClosed marks the channel closed in the database, with the
135
        // passed close summary. After this method successfully returns we can
136
        // no longer expect to receive chain events for this channel, and must
137
        // be able to recover from a failure without getting the close event
138
        // again. It takes an optional channel status which will update the
139
        // channel status in the record that we keep of historical channels.
140
        MarkChannelClosed func(*channeldb.ChannelCloseSummary,
141
                ...channeldb.ChannelStatus) error
142

143
        // IsPendingClose is a boolean indicating whether the channel is marked
144
        // as pending close in the database.
145
        IsPendingClose bool
146

147
        // ClosingHeight is the height at which the channel was closed. Note
148
        // that this value is only valid if IsPendingClose is true.
149
        ClosingHeight uint32
150

151
        // CloseType is the type of the close event in case IsPendingClose is
152
        // true. Otherwise this value is unset.
153
        CloseType channeldb.ClosureType
154

155
        // MarkChannelResolved is a function closure that serves to mark a
156
        // channel as "fully resolved". A channel itself can be considered
157
        // fully resolved once all active contracts have individually been
158
        // fully resolved.
159
        //
160
        // TODO(roasbeef): need RPC's to combine for pendingchannels RPC
161
        MarkChannelResolved func() error
162

163
        // PutResolverReport records a resolver report for the channel. If the
164
        // transaction provided is nil, the function should write the report
165
        // in a new transaction.
166
        PutResolverReport func(tx kvdb.RwTx,
167
                report *channeldb.ResolverReport) error
168

169
        // FetchHistoricalChannel retrieves the historical state of a channel.
170
        // This is mostly used to supplement the ContractResolvers with
171
        // additional information required for proper contract resolution.
172
        FetchHistoricalChannel func() (*channeldb.OpenChannel, error)
173

174
        // FindOutgoingHTLCDeadline returns the deadline in absolute block
175
        // height for the specified outgoing HTLC. For an outgoing HTLC, its
176
        // deadline is defined by the timeout height of its corresponding
177
        // incoming HTLC - this is the expiry height the that remote peer can
178
        // spend his/her outgoing HTLC via the timeout path.
179
        FindOutgoingHTLCDeadline func(htlc channeldb.HTLC) fn.Option[int32]
180

181
        ChainArbitratorConfig
182
}
183

184
// ReportOutputType describes the type of output that is being reported
185
// on.
186
type ReportOutputType uint8
187

188
const (
189
        // ReportOutputIncomingHtlc is an incoming hash time locked contract on
190
        // the commitment tx.
191
        ReportOutputIncomingHtlc ReportOutputType = iota
192

193
        // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on
194
        // the commitment tx.
195
        ReportOutputOutgoingHtlc
196

197
        // ReportOutputUnencumbered is an uncontested output on the commitment
198
        // transaction paying to us directly.
199
        ReportOutputUnencumbered
200

201
        // ReportOutputAnchor is an anchor output on the commitment tx.
202
        ReportOutputAnchor
203
)
204

205
// ContractReport provides a summary of a commitment tx output.
206
type ContractReport struct {
207
        // Outpoint is the final output that will be swept back to the wallet.
208
        Outpoint wire.OutPoint
209

210
        // Type indicates the type of the reported output.
211
        Type ReportOutputType
212

213
        // Amount is the final value that will be swept in back to the wallet.
214
        Amount btcutil.Amount
215

216
        // MaturityHeight is the absolute block height that this output will
217
        // mature at.
218
        MaturityHeight uint32
219

220
        // Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
221
        // the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
222
        // to its expiry height, while a stage 2 htlc's maturity height will be
223
        // set to its confirmation height plus the maturity requirement.
224
        Stage uint32
225

226
        // LimboBalance is the total number of frozen coins within this
227
        // contract.
228
        LimboBalance btcutil.Amount
229

230
        // RecoveredBalance is the total value that has been successfully swept
231
        // back to the user's wallet.
232
        RecoveredBalance btcutil.Amount
233
}
234

235
// resolverReport creates a resolve report using some of the information in the
236
// contract report.
237
func (c *ContractReport) resolverReport(spendTx *chainhash.Hash,
238
        resolverType channeldb.ResolverType,
239
        outcome channeldb.ResolverOutcome) *channeldb.ResolverReport {
10✔
240

10✔
241
        return &channeldb.ResolverReport{
10✔
242
                OutPoint:        c.Outpoint,
10✔
243
                Amount:          c.Amount,
10✔
244
                ResolverType:    resolverType,
10✔
245
                ResolverOutcome: outcome,
10✔
246
                SpendTxID:       spendTx,
10✔
247
        }
10✔
248
}
10✔
249

250
// htlcSet represents the set of active HTLCs on a given commitment
251
// transaction.
252
type htlcSet struct {
253
        // incomingHTLCs is a map of all incoming HTLCs on the target
254
        // commitment transaction. We may potentially go onchain to claim the
255
        // funds sent to us within this set.
256
        incomingHTLCs map[uint64]channeldb.HTLC
257

258
        // outgoingHTLCs is a map of all outgoing HTLCs on the target
259
        // commitment transaction. We may potentially go onchain to reclaim the
260
        // funds that are currently in limbo.
261
        outgoingHTLCs map[uint64]channeldb.HTLC
262
}
263

264
// newHtlcSet constructs a new HTLC set from a slice of HTLC's.
265
func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
46✔
266
        outHTLCs := make(map[uint64]channeldb.HTLC)
46✔
267
        inHTLCs := make(map[uint64]channeldb.HTLC)
46✔
268
        for _, htlc := range htlcs {
77✔
269
                if htlc.Incoming {
37✔
270
                        inHTLCs[htlc.HtlcIndex] = htlc
6✔
271
                        continue
6✔
272
                }
273

274
                outHTLCs[htlc.HtlcIndex] = htlc
25✔
275
        }
276

277
        return htlcSet{
46✔
278
                incomingHTLCs: inHTLCs,
46✔
279
                outgoingHTLCs: outHTLCs,
46✔
280
        }
46✔
281
}
282

283
// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
284
// commitment transaction.
285
type HtlcSetKey struct {
286
        // IsRemote denotes if the HTLCs are on the remote commitment
287
        // transaction.
288
        IsRemote bool
289

290
        // IsPending denotes if the commitment transaction that HTLCS are on
291
        // are pending (the higher of two unrevoked commitments).
292
        IsPending bool
293
}
294

295
var (
296
        // LocalHtlcSet is the HtlcSetKey used for local commitments.
297
        LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
298

299
        // RemoteHtlcSet is the HtlcSetKey used for remote commitments.
300
        RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
301

302
        // RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
303
        // commitment transactions.
304
        RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
305
)
306

307
// String returns a human readable string describing the target HtlcSetKey.
308
func (h HtlcSetKey) String() string {
8✔
309
        switch h {
8✔
310
        case LocalHtlcSet:
4✔
311
                return "LocalHtlcSet"
4✔
312
        case RemoteHtlcSet:
2✔
313
                return "RemoteHtlcSet"
2✔
314
        case RemotePendingHtlcSet:
2✔
315
                return "RemotePendingHtlcSet"
2✔
316
        default:
×
317
                return "unknown HtlcSetKey"
×
318
        }
319
}
320

321
// ChannelArbitrator is the on-chain arbitrator for a particular channel. The
322
// struct will keep in sync with the current set of HTLCs on the commitment
323
// transaction. The job of the attendant is to go on-chain to either settle or
324
// cancel an HTLC as necessary iff: an HTLC times out, or we known the
325
// pre-image to an HTLC, but it wasn't settled by the link off-chain. The
326
// ChannelArbitrator will factor in an expected confirmation delta when
327
// broadcasting to ensure that we avoid any possibility of race conditions, and
328
// sweep the output(s) without contest.
329
type ChannelArbitrator struct {
330
        started int32 // To be used atomically.
331
        stopped int32 // To be used atomically.
332

333
        // startTimestamp is the time when this ChannelArbitrator was started.
334
        startTimestamp time.Time
335

336
        // log is a persistent log that the attendant will use to checkpoint
337
        // its next action, and the state of any unresolved contracts.
338
        log ArbitratorLog
339

340
        // activeHTLCs is the set of active incoming/outgoing HTLC's on all
341
        // currently valid commitment transactions.
342
        activeHTLCs map[HtlcSetKey]htlcSet
343

344
        // unmergedSet is used to update the activeHTLCs map in two callsites:
345
        // checkLocalChainActions and sweepAnchors. It contains the latest
346
        // updates from the link. It is not deleted from, its entries may be
347
        // replaced on subsequent calls to notifyContractUpdate.
348
        unmergedSet map[HtlcSetKey]htlcSet
349
        unmergedMtx sync.RWMutex
350

351
        // cfg contains all the functionality that the ChannelArbitrator requires
352
        // to do its duty.
353
        cfg ChannelArbitratorConfig
354

355
        // blocks is a channel that the arbitrator will receive new blocks on.
356
        // This channel should be buffered by so that it does not block the
357
        // sender.
358
        blocks chan int32
359

360
        // signalUpdates is a channel that any new live signals for the channel
361
        // we're watching over will be sent.
362
        signalUpdates chan *signalUpdateMsg
363

364
        // activeResolvers is a slice of any active resolvers. This is used to
365
        // be able to signal them for shutdown in the case that we shutdown.
366
        activeResolvers []ContractResolver
367

368
        // activeResolversLock prevents simultaneous read and write to the
369
        // resolvers slice.
370
        activeResolversLock sync.RWMutex
371

372
        // resolutionSignal is a channel that will be sent upon by contract
373
        // resolvers once their contract has been fully resolved. With each
374
        // send, we'll check to see if the contract is fully resolved.
375
        resolutionSignal chan struct{}
376

377
        // forceCloseReqs is a channel that requests to forcibly close the
378
        // contract will be sent over.
379
        forceCloseReqs chan *forceCloseReq
380

381
        // state is the current state of the arbitrator. This state is examined
382
        // upon start up to decide which actions to take.
383
        state ArbitratorState
384

385
        wg   sync.WaitGroup
386
        quit chan struct{}
387
}
388

389
// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
390
// the passed config struct.
391
func NewChannelArbitrator(cfg ChannelArbitratorConfig,
392
        htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
51✔
393

51✔
394
        // Create a new map for unmerged HTLC's as we will overwrite the values
51✔
395
        // and want to avoid modifying activeHTLCs directly. This soft copying
51✔
396
        // is done to ensure that activeHTLCs isn't reset as an empty map later
51✔
397
        // on.
51✔
398
        unmerged := make(map[HtlcSetKey]htlcSet)
51✔
399
        unmerged[LocalHtlcSet] = htlcSets[LocalHtlcSet]
51✔
400
        unmerged[RemoteHtlcSet] = htlcSets[RemoteHtlcSet]
51✔
401

51✔
402
        // If the pending set exists, write that as well.
51✔
403
        if _, ok := htlcSets[RemotePendingHtlcSet]; ok {
51✔
404
                unmerged[RemotePendingHtlcSet] = htlcSets[RemotePendingHtlcSet]
×
405
        }
×
406

407
        return &ChannelArbitrator{
51✔
408
                log:              log,
51✔
409
                blocks:           make(chan int32, arbitratorBlockBufferSize),
51✔
410
                signalUpdates:    make(chan *signalUpdateMsg),
51✔
411
                resolutionSignal: make(chan struct{}),
51✔
412
                forceCloseReqs:   make(chan *forceCloseReq),
51✔
413
                activeHTLCs:      htlcSets,
51✔
414
                unmergedSet:      unmerged,
51✔
415
                cfg:              cfg,
51✔
416
                quit:             make(chan struct{}),
51✔
417
        }
51✔
418
}
419

420
// chanArbStartState contains the information from disk that we need to start
421
// up a channel arbitrator.
422
type chanArbStartState struct {
423
        currentState ArbitratorState
424
        commitSet    *CommitSet
425
}
426

427
// getStartState retrieves the information from disk that our channel arbitrator
428
// requires to start.
429
func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
430
        error) {
48✔
431

48✔
432
        // First, we'll read our last state from disk, so our internal state
48✔
433
        // machine can act accordingly.
48✔
434
        state, err := c.log.CurrentState(tx)
48✔
435
        if err != nil {
48✔
436
                return nil, err
×
437
        }
×
438

439
        // Next we'll fetch our confirmed commitment set. This will only exist
440
        // if the channel has been closed out on chain for modern nodes. For
441
        // older nodes, this won't be found at all, and will rely on the
442
        // existing written chain actions. Additionally, if this channel hasn't
443
        // logged any actions in the log, then this field won't be present.
444
        commitSet, err := c.log.FetchConfirmedCommitSet(tx)
48✔
445
        if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
48✔
446
                return nil, err
×
447
        }
×
448

449
        return &chanArbStartState{
48✔
450
                currentState: state,
48✔
451
                commitSet:    commitSet,
48✔
452
        }, nil
48✔
453
}
454

455
// Start starts all the goroutines that the ChannelArbitrator needs to operate.
456
// If takes a start state, which will be looked up on disk if it is not
457
// provided.
458
func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
48✔
459
        if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
48✔
460
                return nil
×
461
        }
×
462
        c.startTimestamp = c.cfg.Clock.Now()
48✔
463

48✔
464
        // If the state passed in is nil, we look it up now.
48✔
465
        if state == nil {
85✔
466
                var err error
37✔
467
                state, err = c.getStartState(nil)
37✔
468
                if err != nil {
37✔
469
                        return err
×
470
                }
×
471
        }
472

473
        log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
48✔
474
                c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs),
48✔
475
                state.currentState)
48✔
476

48✔
477
        // Set our state from our starting state.
48✔
478
        c.state = state.currentState
48✔
479

48✔
480
        _, bestHeight, err := c.cfg.ChainIO.GetBestBlock()
48✔
481
        if err != nil {
48✔
482
                return err
×
483
        }
×
484

485
        // If the channel has been marked pending close in the database, and we
486
        // haven't transitioned the state machine to StateContractClosed (or a
487
        // succeeding state), then a state transition most likely failed. We'll
488
        // try to recover from this by manually advancing the state by setting
489
        // the corresponding close trigger.
490
        trigger := chainTrigger
48✔
491
        triggerHeight := uint32(bestHeight)
48✔
492
        if c.cfg.IsPendingClose {
53✔
493
                switch c.state {
5✔
494
                case StateDefault:
4✔
495
                        fallthrough
4✔
496
                case StateBroadcastCommit:
5✔
497
                        fallthrough
5✔
498
                case StateCommitmentBroadcasted:
5✔
499
                        switch c.cfg.CloseType {
5✔
500

501
                        case channeldb.CooperativeClose:
1✔
502
                                trigger = coopCloseTrigger
1✔
503

504
                        case channeldb.BreachClose:
1✔
505
                                trigger = breachCloseTrigger
1✔
506

507
                        case channeldb.LocalForceClose:
1✔
508
                                trigger = localCloseTrigger
1✔
509

510
                        case channeldb.RemoteForceClose:
2✔
511
                                trigger = remoteCloseTrigger
2✔
512
                        }
513

514
                        log.Warnf("ChannelArbitrator(%v): detected stalled "+
5✔
515
                                "state=%v for closed channel",
5✔
516
                                c.cfg.ChanPoint, c.state)
5✔
517
                }
518

519
                triggerHeight = c.cfg.ClosingHeight
5✔
520
        }
521

522
        log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
48✔
523
                "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
48✔
524
                triggerHeight)
48✔
525

48✔
526
        // We'll now attempt to advance our state forward based on the current
48✔
527
        // on-chain state, and our set of active contracts.
48✔
528
        startingState := c.state
48✔
529
        nextState, _, err := c.advanceState(
48✔
530
                triggerHeight, trigger, state.commitSet,
48✔
531
        )
48✔
532
        if err != nil {
50✔
533
                switch err {
2✔
534

535
                // If we detect that we tried to fetch resolutions, but failed,
536
                // this channel was marked closed in the database before
537
                // resolutions successfully written. In this case there is not
538
                // much we can do, so we don't return the error.
539
                case errScopeBucketNoExist:
×
540
                        fallthrough
×
541
                case errNoResolutions:
1✔
542
                        log.Warnf("ChannelArbitrator(%v): detected closed"+
1✔
543
                                "channel with no contract resolutions written.",
1✔
544
                                c.cfg.ChanPoint)
1✔
545

546
                default:
1✔
547
                        return err
1✔
548
                }
549
        }
550

551
        // If we start and ended at the awaiting full resolution state, then
552
        // we'll relaunch our set of unresolved contracts.
553
        if startingState == StateWaitingFullResolution &&
47✔
554
                nextState == StateWaitingFullResolution {
48✔
555

1✔
556
                // In order to relaunch the resolvers, we'll need to fetch the
1✔
557
                // set of HTLCs that were present in the commitment transaction
1✔
558
                // at the time it was confirmed. commitSet.ConfCommitKey can't
1✔
559
                // be nil at this point since we're in
1✔
560
                // StateWaitingFullResolution. We can only be in
1✔
561
                // StateWaitingFullResolution after we've transitioned from
1✔
562
                // StateContractClosed which can only be triggered by the local
1✔
563
                // or remote close trigger. This trigger is only fired when we
1✔
564
                // receive a chain event from the chain watcher that the
1✔
565
                // commitment has been confirmed on chain, and before we
1✔
566
                // advance our state step, we call InsertConfirmedCommitSet.
1✔
567
                err := c.relaunchResolvers(state.commitSet, triggerHeight)
1✔
568
                if err != nil {
1✔
569
                        return err
×
570
                }
×
571
        }
572

573
        c.wg.Add(1)
47✔
574
        go c.channelAttendant(bestHeight)
47✔
575
        return nil
47✔
576
}
577

578
// maybeAugmentTaprootResolvers will update the contract resolution information
579
// for taproot channels. This ensures that all the resolvers have the latest
580
// resolution, which may also include data such as the control block and tap
581
// tweaks.
582
func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
583
        resolver ContractResolver,
584
        contractResolutions *ContractResolutions) {
1✔
585

1✔
586
        if !chanType.IsTaproot() {
2✔
587
                return
1✔
588
        }
1✔
589

590
        // The on disk resolutions contains all the ctrl block
591
        // information, so we'll set that now for the relevant
592
        // resolvers.
UNCOV
593
        switch r := resolver.(type) {
×
UNCOV
594
        case *commitSweepResolver:
×
UNCOV
595
                if contractResolutions.CommitResolution != nil {
×
UNCOV
596
                        //nolint:lll
×
UNCOV
597
                        r.commitResolution = *contractResolutions.CommitResolution
×
UNCOV
598
                }
×
UNCOV
599
        case *htlcOutgoingContestResolver:
×
UNCOV
600
                //nolint:lll
×
UNCOV
601
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
×
UNCOV
602
                for _, htlcRes := range htlcResolutions {
×
UNCOV
603
                        htlcRes := htlcRes
×
UNCOV
604

×
UNCOV
605
                        if r.htlcResolution.ClaimOutpoint ==
×
UNCOV
606
                                htlcRes.ClaimOutpoint {
×
UNCOV
607

×
UNCOV
608
                                r.htlcResolution = htlcRes
×
UNCOV
609
                        }
×
610
                }
611

UNCOV
612
        case *htlcTimeoutResolver:
×
UNCOV
613
                //nolint:lll
×
UNCOV
614
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
×
UNCOV
615
                for _, htlcRes := range htlcResolutions {
×
UNCOV
616
                        htlcRes := htlcRes
×
UNCOV
617

×
UNCOV
618
                        if r.htlcResolution.ClaimOutpoint ==
×
UNCOV
619
                                htlcRes.ClaimOutpoint {
×
UNCOV
620

×
UNCOV
621
                                r.htlcResolution = htlcRes
×
UNCOV
622
                        }
×
623
                }
624

UNCOV
625
        case *htlcIncomingContestResolver:
×
UNCOV
626
                //nolint:lll
×
UNCOV
627
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
×
UNCOV
628
                for _, htlcRes := range htlcResolutions {
×
UNCOV
629
                        htlcRes := htlcRes
×
UNCOV
630

×
UNCOV
631
                        if r.htlcResolution.ClaimOutpoint ==
×
UNCOV
632
                                htlcRes.ClaimOutpoint {
×
UNCOV
633

×
UNCOV
634
                                r.htlcResolution = htlcRes
×
UNCOV
635
                        }
×
636
                }
637
        case *htlcSuccessResolver:
×
638
                //nolint:lll
×
639
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
×
640
                for _, htlcRes := range htlcResolutions {
×
641
                        htlcRes := htlcRes
×
642

×
643
                        if r.htlcResolution.ClaimOutpoint ==
×
644
                                htlcRes.ClaimOutpoint {
×
645

×
646
                                r.htlcResolution = htlcRes
×
647
                        }
×
648
                }
649
        }
650
}
651

652
// relauchResolvers relaunches the set of resolvers for unresolved contracts in
653
// order to provide them with information that's not immediately available upon
654
// starting the ChannelArbitrator. This information should ideally be stored in
655
// the database, so this only serves as a intermediate work-around to prevent a
656
// migration.
657
func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
658
        heightHint uint32) error {
1✔
659

1✔
660
        // We'll now query our log to see if there are any active unresolved
1✔
661
        // contracts. If this is the case, then we'll relaunch all contract
1✔
662
        // resolvers.
1✔
663
        unresolvedContracts, err := c.log.FetchUnresolvedContracts()
1✔
664
        if err != nil {
1✔
665
                return err
×
666
        }
×
667

668
        // Retrieve the commitment tx hash from the log.
669
        contractResolutions, err := c.log.FetchContractResolutions()
1✔
670
        if err != nil {
1✔
671
                log.Errorf("unable to fetch contract resolutions: %v",
×
672
                        err)
×
673
                return err
×
674
        }
×
675
        commitHash := contractResolutions.CommitHash
1✔
676

1✔
677
        // In prior versions of lnd, the information needed to supplement the
1✔
678
        // resolvers (in most cases, the full amount of the HTLC) was found in
1✔
679
        // the chain action map, which is now deprecated.  As a result, if the
1✔
680
        // commitSet is nil (an older node with unresolved HTLCs at time of
1✔
681
        // upgrade), then we'll use the chain action information in place. The
1✔
682
        // chain actions may exclude some information, but we cannot recover it
1✔
683
        // for these older nodes at the moment.
1✔
684
        var confirmedHTLCs []channeldb.HTLC
1✔
685
        if commitSet != nil && commitSet.ConfCommitKey.IsSome() {
2✔
686
                confCommitKey, err := commitSet.ConfCommitKey.UnwrapOrErr(
1✔
687
                        fmt.Errorf("no commitKey available"),
1✔
688
                )
1✔
689
                if err != nil {
1✔
690
                        return err
×
691
                }
×
692
                confirmedHTLCs = commitSet.HtlcSets[confCommitKey]
1✔
693
        } else {
×
694
                chainActions, err := c.log.FetchChainActions()
×
695
                if err != nil {
×
696
                        log.Errorf("unable to fetch chain actions: %v", err)
×
697
                        return err
×
698
                }
×
699
                for _, htlcs := range chainActions {
×
700
                        confirmedHTLCs = append(confirmedHTLCs, htlcs...)
×
701
                }
×
702
        }
703

704
        // Reconstruct the htlc outpoints and data from the chain action log.
705
        // The purpose of the constructed htlc map is to supplement to
706
        // resolvers restored from database with extra data. Ideally this data
707
        // is stored as part of the resolver in the log. This is a workaround
708
        // to prevent a db migration. We use all available htlc sets here in
709
        // order to ensure we have complete coverage.
710
        htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
1✔
711
        for _, htlc := range confirmedHTLCs {
4✔
712
                htlc := htlc
3✔
713
                outpoint := wire.OutPoint{
3✔
714
                        Hash:  commitHash,
3✔
715
                        Index: uint32(htlc.OutputIndex),
3✔
716
                }
3✔
717
                htlcMap[outpoint] = &htlc
3✔
718
        }
3✔
719

720
        // We'll also fetch the historical state of this channel, as it should
721
        // have been marked as closed by now, and supplement it to each resolver
722
        // such that we can properly resolve our pending contracts.
723
        var chanState *channeldb.OpenChannel
1✔
724
        chanState, err = c.cfg.FetchHistoricalChannel()
1✔
725
        switch {
1✔
726
        // If we don't find this channel, then it may be the case that it
727
        // was closed before we started to retain the final state
728
        // information for open channels.
729
        case err == channeldb.ErrNoHistoricalBucket:
×
730
                fallthrough
×
731
        case err == channeldb.ErrChannelNotFound:
×
732
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
733
                        "state", c.cfg.ChanPoint)
×
734

735
        case err != nil:
×
736
                return err
×
737
        }
738

739
        log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
1✔
740
                "resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
1✔
741

1✔
742
        for i := range unresolvedContracts {
2✔
743
                resolver := unresolvedContracts[i]
1✔
744

1✔
745
                if chanState != nil {
2✔
746
                        resolver.SupplementState(chanState)
1✔
747

1✔
748
                        // For taproot channels, we'll need to also make sure
1✔
749
                        // the control block information was set properly.
1✔
750
                        maybeAugmentTaprootResolvers(
1✔
751
                                chanState.ChanType, resolver,
1✔
752
                                contractResolutions,
1✔
753
                        )
1✔
754
                }
1✔
755

756
                unresolvedContracts[i] = resolver
1✔
757

1✔
758
                htlcResolver, ok := resolver.(htlcContractResolver)
1✔
759
                if !ok {
1✔
UNCOV
760
                        continue
×
761
                }
762

763
                htlcPoint := htlcResolver.HtlcPoint()
1✔
764
                htlc, ok := htlcMap[htlcPoint]
1✔
765
                if !ok {
1✔
766
                        return fmt.Errorf(
×
767
                                "htlc resolver %T unavailable", resolver,
×
768
                        )
×
769
                }
×
770

771
                htlcResolver.Supplement(*htlc)
1✔
772

1✔
773
                // If this is an outgoing HTLC, we will also need to supplement
1✔
774
                // the resolver with the expiry block height of its
1✔
775
                // corresponding incoming HTLC.
1✔
776
                if !htlc.Incoming {
2✔
777
                        deadline := c.cfg.FindOutgoingHTLCDeadline(*htlc)
1✔
778
                        htlcResolver.SupplementDeadline(deadline)
1✔
779
                }
1✔
780
        }
781

782
        // The anchor resolver is stateless and can always be re-instantiated.
783
        if contractResolutions.AnchorResolution != nil {
1✔
UNCOV
784
                anchorResolver := newAnchorResolver(
×
UNCOV
785
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
×
UNCOV
786
                        contractResolutions.AnchorResolution.CommitAnchor,
×
UNCOV
787
                        heightHint, c.cfg.ChanPoint,
×
UNCOV
788
                        ResolverConfig{
×
UNCOV
789
                                ChannelArbitratorConfig: c.cfg,
×
UNCOV
790
                        },
×
UNCOV
791
                )
×
UNCOV
792

×
UNCOV
793
                anchorResolver.SupplementState(chanState)
×
UNCOV
794

×
UNCOV
795
                unresolvedContracts = append(unresolvedContracts, anchorResolver)
×
UNCOV
796

×
UNCOV
797
                // TODO(roasbeef): this isn't re-launched?
×
UNCOV
798
        }
×
799

800
        c.launchResolvers(unresolvedContracts, true)
1✔
801

1✔
802
        return nil
1✔
803
}
804

805
// Report returns htlc reports for the active resolvers.
UNCOV
806
func (c *ChannelArbitrator) Report() []*ContractReport {
×
UNCOV
807
        c.activeResolversLock.RLock()
×
UNCOV
808
        defer c.activeResolversLock.RUnlock()
×
UNCOV
809

×
UNCOV
810
        var reports []*ContractReport
×
UNCOV
811
        for _, resolver := range c.activeResolvers {
×
UNCOV
812
                r, ok := resolver.(reportingContractResolver)
×
UNCOV
813
                if !ok {
×
814
                        continue
×
815
                }
816

UNCOV
817
                report := r.report()
×
UNCOV
818
                if report == nil {
×
UNCOV
819
                        continue
×
820
                }
821

UNCOV
822
                reports = append(reports, report)
×
823
        }
824

UNCOV
825
        return reports
×
826
}
827

828
// Stop signals the ChannelArbitrator for a graceful shutdown.
829
func (c *ChannelArbitrator) Stop() error {
50✔
830
        if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
56✔
831
                return nil
6✔
832
        }
6✔
833

834
        log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
44✔
835

44✔
836
        if c.cfg.ChainEvents.Cancel != nil {
55✔
837
                go c.cfg.ChainEvents.Cancel()
11✔
838
        }
11✔
839

840
        c.activeResolversLock.RLock()
44✔
841
        for _, activeResolver := range c.activeResolvers {
50✔
842
                activeResolver.Stop()
6✔
843
        }
6✔
844
        c.activeResolversLock.RUnlock()
44✔
845

44✔
846
        close(c.quit)
44✔
847
        c.wg.Wait()
44✔
848

44✔
849
        return nil
44✔
850
}
851

852
// transitionTrigger is an enum that denotes exactly *why* a state transition
853
// was initiated. This is useful as depending on the initial trigger, we may
854
// skip certain states as those actions are expected to have already taken
855
// place as a result of the external trigger.
856
type transitionTrigger uint8
857

858
const (
859
        // chainTrigger is a transition trigger that has been attempted due to
860
        // changing on-chain conditions such as a block which times out HTLC's
861
        // being attached.
862
        chainTrigger transitionTrigger = iota
863

864
        // userTrigger is a transition trigger driven by user action. Examples
865
        // of such a trigger include a user requesting a force closure of the
866
        // channel.
867
        userTrigger
868

869
        // remoteCloseTrigger is a transition trigger driven by the remote
870
        // peer's commitment being confirmed.
871
        remoteCloseTrigger
872

873
        // localCloseTrigger is a transition trigger driven by our commitment
874
        // being confirmed.
875
        localCloseTrigger
876

877
        // coopCloseTrigger is a transition trigger driven by a cooperative
878
        // close transaction being confirmed.
879
        coopCloseTrigger
880

881
        // breachCloseTrigger is a transition trigger driven by a remote breach
882
        // being confirmed. In this case the channel arbitrator will wait for
883
        // the BreachArbitrator to finish and then clean up gracefully.
884
        breachCloseTrigger
885
)
886

887
// String returns a human readable string describing the passed
888
// transitionTrigger.
UNCOV
889
func (t transitionTrigger) String() string {
×
UNCOV
890
        switch t {
×
UNCOV
891
        case chainTrigger:
×
UNCOV
892
                return "chainTrigger"
×
893

UNCOV
894
        case remoteCloseTrigger:
×
UNCOV
895
                return "remoteCloseTrigger"
×
896

UNCOV
897
        case userTrigger:
×
UNCOV
898
                return "userTrigger"
×
899

UNCOV
900
        case localCloseTrigger:
×
UNCOV
901
                return "localCloseTrigger"
×
902

UNCOV
903
        case coopCloseTrigger:
×
UNCOV
904
                return "coopCloseTrigger"
×
905

UNCOV
906
        case breachCloseTrigger:
×
UNCOV
907
                return "breachCloseTrigger"
×
908

909
        default:
×
910
                return "unknown trigger"
×
911
        }
912
}
913

914
// stateStep is a help method that examines our internal state, and attempts
915
// the appropriate state transition if necessary. The next state we transition
916
// to is returned, Additionally, if the next transition results in a commitment
917
// broadcast, the commitment transaction itself is returned.
918
func (c *ChannelArbitrator) stateStep(
919
        triggerHeight uint32, trigger transitionTrigger,
920
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
175✔
921

175✔
922
        var (
175✔
923
                nextState ArbitratorState
175✔
924
                closeTx   *wire.MsgTx
175✔
925
        )
175✔
926
        switch c.state {
175✔
927

928
        // If we're in the default state, then we'll check our set of actions
929
        // to see if while we were down, conditions have changed.
930
        case StateDefault:
64✔
931
                log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+
64✔
932
                        "examining active HTLC's", c.cfg.ChanPoint,
64✔
933
                        triggerHeight)
64✔
934

64✔
935
                // As a new block has been connected to the end of the main
64✔
936
                // chain, we'll check to see if we need to make any on-chain
64✔
937
                // claims on behalf of the channel contract that we're
64✔
938
                // arbitrating for. If a commitment has confirmed, then we'll
64✔
939
                // use the set snapshot from the chain, otherwise we'll use our
64✔
940
                // current set.
64✔
941
                var (
64✔
942
                        chainActions ChainActionMap
64✔
943
                        err          error
64✔
944
                )
64✔
945

64✔
946
                // Normally if we force close the channel locally we will have
64✔
947
                // no confCommitSet. However when the remote commitment confirms
64✔
948
                // without us ever broadcasting our local commitment we need to
64✔
949
                // make sure we cancel all upstream HTLCs for outgoing dust
64✔
950
                // HTLCs as well hence we need to fetch the chain actions here
64✔
951
                // as well.
64✔
952
                if confCommitSet == nil {
119✔
953
                        // Update the set of activeHTLCs so
55✔
954
                        // checkLocalChainActions has an up-to-date view of the
55✔
955
                        // commitments.
55✔
956
                        c.updateActiveHTLCs()
55✔
957
                        htlcs := c.activeHTLCs
55✔
958
                        chainActions, err = c.checkLocalChainActions(
55✔
959
                                triggerHeight, trigger, htlcs, false,
55✔
960
                        )
55✔
961
                        if err != nil {
55✔
962
                                return StateDefault, nil, err
×
963
                        }
×
964
                } else {
9✔
965
                        chainActions, err = c.constructChainActions(
9✔
966
                                confCommitSet, triggerHeight, trigger,
9✔
967
                        )
9✔
968
                        if err != nil {
9✔
969
                                return StateDefault, nil, err
×
970
                        }
×
971
                }
972

973
                // If there are no actions to be made, then we'll remain in the
974
                // default state. If this isn't a self initiated event (we're
975
                // checking due to a chain update), then we'll exit now.
976
                if len(chainActions) == 0 && trigger == chainTrigger {
101✔
977
                        log.Debugf("ChannelArbitrator(%v): no actions for "+
37✔
978
                                "chain trigger, terminating", c.cfg.ChanPoint)
37✔
979

37✔
980
                        return StateDefault, closeTx, nil
37✔
981
                }
37✔
982

983
                // Otherwise, we'll log that we checked the HTLC actions as the
984
                // commitment transaction has already been broadcast.
985
                log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
27✔
986
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
27✔
987

27✔
988
                // Cancel upstream HTLCs for all outgoing dust HTLCs available
27✔
989
                // either on the local or the remote/remote pending commitment
27✔
990
                // transaction.
27✔
991
                dustHTLCs := chainActions[HtlcFailDustAction]
27✔
992
                if len(dustHTLCs) > 0 {
28✔
993
                        log.Debugf("ChannelArbitrator(%v): canceling %v dust "+
1✔
994
                                "HTLCs backwards", c.cfg.ChanPoint,
1✔
995
                                len(dustHTLCs))
1✔
996

1✔
997
                        getIdx := func(htlc channeldb.HTLC) uint64 {
2✔
998
                                return htlc.HtlcIndex
1✔
999
                        }
1✔
1000
                        dustHTLCSet := fn.NewSet(fn.Map(getIdx, dustHTLCs)...)
1✔
1001
                        err = c.abandonForwards(dustHTLCSet)
1✔
1002
                        if err != nil {
1✔
1003
                                return StateError, closeTx, err
×
1004
                        }
×
1005
                }
1006

1007
                // Depending on the type of trigger, we'll either "tunnel"
1008
                // through to a farther state, or just proceed linearly to the
1009
                // next state.
1010
                switch trigger {
27✔
1011

1012
                // If this is a chain trigger, then we'll go straight to the
1013
                // next state, as we still need to broadcast the commitment
1014
                // transaction.
1015
                case chainTrigger:
5✔
1016
                        fallthrough
5✔
1017
                case userTrigger:
15✔
1018
                        nextState = StateBroadcastCommit
15✔
1019

1020
                // If the trigger is a cooperative close being confirmed, then
1021
                // we can go straight to StateFullyResolved, as there won't be
1022
                // any contracts to resolve.
1023
                case coopCloseTrigger:
3✔
1024
                        nextState = StateFullyResolved
3✔
1025

1026
                // Otherwise, if this state advance was triggered by a
1027
                // commitment being confirmed on chain, then we'll jump
1028
                // straight to the state where the contract has already been
1029
                // closed, and we will inspect the set of unresolved contracts.
1030
                case localCloseTrigger:
2✔
1031
                        log.Errorf("ChannelArbitrator(%v): unexpected local "+
2✔
1032
                                "commitment confirmed while in StateDefault",
2✔
1033
                                c.cfg.ChanPoint)
2✔
1034
                        fallthrough
2✔
1035
                case remoteCloseTrigger:
8✔
1036
                        nextState = StateContractClosed
8✔
1037

1038
                case breachCloseTrigger:
1✔
1039
                        nextContractState, err := c.checkLegacyBreach()
1✔
1040
                        if nextContractState == StateError {
1✔
1041
                                return nextContractState, nil, err
×
1042
                        }
×
1043

1044
                        nextState = nextContractState
1✔
1045
                }
1046

1047
        // If we're in this state, then we've decided to broadcast the
1048
        // commitment transaction. We enter this state either due to an outside
1049
        // sub-system, or because an on-chain action has been triggered.
1050
        case StateBroadcastCommit:
20✔
1051
                // Under normal operation, we can only enter
20✔
1052
                // StateBroadcastCommit via a user or chain trigger. On restart,
20✔
1053
                // this state may be reexecuted after closing the channel, but
20✔
1054
                // failing to commit to StateContractClosed or
20✔
1055
                // StateFullyResolved. In that case, one of the four close
20✔
1056
                // triggers will be presented, signifying that we should skip
20✔
1057
                // rebroadcasting, and go straight to resolving the on-chain
20✔
1058
                // contract or marking the channel resolved.
20✔
1059
                switch trigger {
20✔
1060
                case localCloseTrigger, remoteCloseTrigger:
×
1061
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1062
                                "close after closing channel, fast-forwarding "+
×
1063
                                "to %s to resolve contract",
×
1064
                                c.cfg.ChanPoint, trigger, StateContractClosed)
×
1065
                        return StateContractClosed, closeTx, nil
×
1066

1067
                case breachCloseTrigger:
1✔
1068
                        nextContractState, err := c.checkLegacyBreach()
1✔
1069
                        if nextContractState == StateError {
1✔
1070
                                log.Infof("ChannelArbitrator(%v): unable to "+
×
1071
                                        "advance breach close resolution: %v",
×
1072
                                        c.cfg.ChanPoint, nextContractState)
×
1073
                                return StateError, closeTx, err
×
1074
                        }
×
1075

1076
                        log.Infof("ChannelArbitrator(%v): detected %s close "+
1✔
1077
                                "after closing channel, fast-forwarding to %s"+
1✔
1078
                                " to resolve contract", c.cfg.ChanPoint,
1✔
1079
                                trigger, nextContractState)
1✔
1080

1✔
1081
                        return nextContractState, closeTx, nil
1✔
1082

1083
                case coopCloseTrigger:
×
1084
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1085
                                "close after closing channel, fast-forwarding "+
×
1086
                                "to %s to resolve contract",
×
1087
                                c.cfg.ChanPoint, trigger, StateFullyResolved)
×
1088
                        return StateFullyResolved, closeTx, nil
×
1089
                }
1090

1091
                log.Infof("ChannelArbitrator(%v): force closing "+
19✔
1092
                        "chan", c.cfg.ChanPoint)
19✔
1093

19✔
1094
                // Now that we have all the actions decided for the set of
19✔
1095
                // HTLC's, we'll broadcast the commitment transaction, and
19✔
1096
                // signal the link to exit.
19✔
1097

19✔
1098
                // We'll tell the switch that it should remove the link for
19✔
1099
                // this channel, in addition to fetching the force close
19✔
1100
                // summary needed to close this channel on chain.
19✔
1101
                forceCloseTx, err := c.cfg.Channel.ForceCloseChan()
19✔
1102
                if err != nil {
20✔
1103
                        log.Errorf("ChannelArbitrator(%v): unable to "+
1✔
1104
                                "force close: %v", c.cfg.ChanPoint, err)
1✔
1105

1✔
1106
                        // We tried to force close (HTLC may be expiring from
1✔
1107
                        // our PoV, etc), but we think we've lost data. In this
1✔
1108
                        // case, we'll not force close, but terminate the state
1✔
1109
                        // machine here to wait to see what confirms on chain.
1✔
1110
                        if errors.Is(err, lnwallet.ErrForceCloseLocalDataLoss) {
2✔
1111
                                log.Error("ChannelArbitrator(%v): broadcast "+
1✔
1112
                                        "failed due to local data loss, "+
1✔
1113
                                        "waiting for on chain confimation...",
1✔
1114
                                        c.cfg.ChanPoint)
1✔
1115

1✔
1116
                                return StateBroadcastCommit, nil, nil
1✔
1117
                        }
1✔
1118

1119
                        return StateError, closeTx, err
×
1120
                }
1121
                closeTx = forceCloseTx
18✔
1122

18✔
1123
                // Before publishing the transaction, we store it to the
18✔
1124
                // database, such that we can re-publish later in case it
18✔
1125
                // didn't propagate. We initiated the force close, so we
18✔
1126
                // mark broadcast with local initiator set to true.
18✔
1127
                err = c.cfg.MarkCommitmentBroadcasted(closeTx, lntypes.Local)
18✔
1128
                if err != nil {
18✔
1129
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1130
                                "mark commitment broadcasted: %v",
×
1131
                                c.cfg.ChanPoint, err)
×
1132
                        return StateError, closeTx, err
×
1133
                }
×
1134

1135
                // With the close transaction in hand, broadcast the
1136
                // transaction to the network, thereby entering the post
1137
                // channel resolution state.
1138
                log.Infof("Broadcasting force close transaction %v, "+
18✔
1139
                        "ChannelPoint(%v): %v", closeTx.TxHash(),
18✔
1140
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(closeTx))
18✔
1141

18✔
1142
                // At this point, we'll now broadcast the commitment
18✔
1143
                // transaction itself.
18✔
1144
                label := labels.MakeLabel(
18✔
1145
                        labels.LabelTypeChannelClose, &c.cfg.ShortChanID,
18✔
1146
                )
18✔
1147
                if err := c.cfg.PublishTx(closeTx, label); err != nil {
23✔
1148
                        log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
5✔
1149
                                "close tx: %v", c.cfg.ChanPoint, err)
5✔
1150

5✔
1151
                        // This makes sure we don't fail at startup if the
5✔
1152
                        // commitment transaction has too low fees to make it
5✔
1153
                        // into mempool. The rebroadcaster makes sure this
5✔
1154
                        // transaction is republished regularly until confirmed
5✔
1155
                        // or replaced.
5✔
1156
                        if !errors.Is(err, lnwallet.ErrDoubleSpend) &&
5✔
1157
                                !errors.Is(err, lnwallet.ErrMempoolFee) {
7✔
1158

2✔
1159
                                return StateError, closeTx, err
2✔
1160
                        }
2✔
1161
                }
1162

1163
                // We go to the StateCommitmentBroadcasted state, where we'll
1164
                // be waiting for the commitment to be confirmed.
1165
                nextState = StateCommitmentBroadcasted
16✔
1166

1167
        // In this state we have broadcasted our own commitment, and will need
1168
        // to wait for a commitment (not necessarily the one we broadcasted!)
1169
        // to be confirmed.
1170
        case StateCommitmentBroadcasted:
30✔
1171
                switch trigger {
30✔
1172

1173
                // We are waiting for a commitment to be confirmed.
1174
                case chainTrigger, userTrigger:
17✔
1175
                        // The commitment transaction has been broadcast, but it
17✔
1176
                        // doesn't necessarily need to be the commitment
17✔
1177
                        // transaction version that is going to be confirmed. To
17✔
1178
                        // be sure that any of those versions can be anchored
17✔
1179
                        // down, we now submit all anchor resolutions to the
17✔
1180
                        // sweeper. The sweeper will keep trying to sweep all of
17✔
1181
                        // them.
17✔
1182
                        //
17✔
1183
                        // Note that the sweeper is idempotent. If we ever
17✔
1184
                        // happen to end up at this point in the code again, no
17✔
1185
                        // harm is done by re-offering the anchors to the
17✔
1186
                        // sweeper.
17✔
1187
                        anchors, err := c.cfg.Channel.NewAnchorResolutions()
17✔
1188
                        if err != nil {
17✔
1189
                                return StateError, closeTx, err
×
1190
                        }
×
1191

1192
                        err = c.sweepAnchors(anchors, triggerHeight)
17✔
1193
                        if err != nil {
17✔
1194
                                return StateError, closeTx, err
×
1195
                        }
×
1196

1197
                        nextState = StateCommitmentBroadcasted
17✔
1198

1199
                // If this state advance was triggered by any of the
1200
                // commitments being confirmed, then we'll jump to the state
1201
                // where the contract has been closed.
1202
                case localCloseTrigger, remoteCloseTrigger:
13✔
1203
                        nextState = StateContractClosed
13✔
1204

1205
                // If a coop close was confirmed, jump straight to the fully
1206
                // resolved state.
1207
                case coopCloseTrigger:
×
1208
                        nextState = StateFullyResolved
×
1209

1210
                case breachCloseTrigger:
×
1211
                        nextContractState, err := c.checkLegacyBreach()
×
1212
                        if nextContractState == StateError {
×
1213
                                return nextContractState, closeTx, err
×
1214
                        }
×
1215

1216
                        nextState = nextContractState
×
1217
                }
1218

1219
                log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
30✔
1220
                        "state %v to %v", c.cfg.ChanPoint, trigger, c.state,
30✔
1221
                        nextState)
30✔
1222

1223
        // If we're in this state, then the contract has been fully closed to
1224
        // outside sub-systems, so we'll process the prior set of on-chain
1225
        // contract actions and launch a set of resolvers.
1226
        case StateContractClosed:
22✔
1227
                // First, we'll fetch our chain actions, and both sets of
22✔
1228
                // resolutions so we can process them.
22✔
1229
                contractResolutions, err := c.log.FetchContractResolutions()
22✔
1230
                if err != nil {
24✔
1231
                        log.Errorf("unable to fetch contract resolutions: %v",
2✔
1232
                                err)
2✔
1233
                        return StateError, closeTx, err
2✔
1234
                }
2✔
1235

1236
                // If the resolution is empty, and we have no HTLCs at all to
1237
                // send to, then we're done here. We don't need to launch any
1238
                // resolvers, and can go straight to our final state.
1239
                if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
28✔
1240
                        log.Infof("ChannelArbitrator(%v): contract "+
8✔
1241
                                "resolutions empty, marking channel as fully resolved!",
8✔
1242
                                c.cfg.ChanPoint)
8✔
1243
                        nextState = StateFullyResolved
8✔
1244
                        break
8✔
1245
                }
1246

1247
                // First, we'll reconstruct a fresh set of chain actions as the
1248
                // set of actions we need to act on may differ based on if it
1249
                // was our commitment, or they're commitment that hit the chain.
1250
                htlcActions, err := c.constructChainActions(
12✔
1251
                        confCommitSet, triggerHeight, trigger,
12✔
1252
                )
12✔
1253
                if err != nil {
12✔
1254
                        return StateError, closeTx, err
×
1255
                }
×
1256

1257
                // In case its a breach transaction we fail back all upstream
1258
                // HTLCs for their corresponding outgoing HTLCs on the remote
1259
                // commitment set (remote and remote pending set).
1260
                if contractResolutions.BreachResolution != nil {
14✔
1261
                        // cancelBreachedHTLCs is a set which holds HTLCs whose
2✔
1262
                        // corresponding incoming HTLCs will be failed back
2✔
1263
                        // because the peer broadcasted an old state.
2✔
1264
                        cancelBreachedHTLCs := fn.NewSet[uint64]()
2✔
1265

2✔
1266
                        // We'll use the CommitSet, we'll fail back all
2✔
1267
                        // upstream HTLCs for their corresponding outgoing
2✔
1268
                        // HTLC that exist on either of the remote commitments.
2✔
1269
                        // The map is used to deduplicate any shared HTLC's.
2✔
1270
                        for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
4✔
1271
                                if !htlcSetKey.IsRemote {
2✔
UNCOV
1272
                                        continue
×
1273
                                }
1274

1275
                                for _, htlc := range htlcs {
4✔
1276
                                        // Only outgoing HTLCs have a
2✔
1277
                                        // corresponding incoming HTLC.
2✔
1278
                                        if htlc.Incoming {
3✔
1279
                                                continue
1✔
1280
                                        }
1281

1282
                                        cancelBreachedHTLCs.Add(htlc.HtlcIndex)
1✔
1283
                                }
1284
                        }
1285

1286
                        err := c.abandonForwards(cancelBreachedHTLCs)
2✔
1287
                        if err != nil {
2✔
1288
                                return StateError, closeTx, err
×
1289
                        }
×
1290
                } else {
10✔
1291
                        // If it's not a breach, we resolve all incoming dust
10✔
1292
                        // HTLCs immediately after the commitment is confirmed.
10✔
1293
                        err = c.failIncomingDust(
10✔
1294
                                htlcActions[HtlcIncomingDustFinalAction],
10✔
1295
                        )
10✔
1296
                        if err != nil {
10✔
1297
                                return StateError, closeTx, err
×
1298
                        }
×
1299

1300
                        // We fail the upstream HTLCs for all remote pending
1301
                        // outgoing HTLCs as soon as the commitment is
1302
                        // confirmed. The upstream HTLCs for outgoing dust
1303
                        // HTLCs have already been resolved before we reach
1304
                        // this point.
1305
                        getIdx := func(htlc channeldb.HTLC) uint64 {
18✔
1306
                                return htlc.HtlcIndex
8✔
1307
                        }
8✔
1308
                        remoteDangling := fn.NewSet(fn.Map(
10✔
1309
                                getIdx, htlcActions[HtlcFailDanglingAction],
10✔
1310
                        )...)
10✔
1311
                        err := c.abandonForwards(remoteDangling)
10✔
1312
                        if err != nil {
10✔
1313
                                return StateError, closeTx, err
×
1314
                        }
×
1315
                }
1316

1317
                // Now that we know we'll need to act, we'll process all the
1318
                // resolvers, then create the structures we need to resolve all
1319
                // outstanding contracts.
1320
                resolvers, err := c.prepContractResolutions(
12✔
1321
                        contractResolutions, triggerHeight, htlcActions,
12✔
1322
                )
12✔
1323
                if err != nil {
12✔
1324
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1325
                                "resolve contracts: %v", c.cfg.ChanPoint, err)
×
1326
                        return StateError, closeTx, err
×
1327
                }
×
1328

1329
                log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
12✔
1330
                        "resolvers", c.cfg.ChanPoint, len(resolvers))
12✔
1331

12✔
1332
                err = c.log.InsertUnresolvedContracts(nil, resolvers...)
12✔
1333
                if err != nil {
12✔
1334
                        return StateError, closeTx, err
×
1335
                }
×
1336

1337
                // Finally, we'll launch all the required contract resolvers.
1338
                // Once they're all resolved, we're no longer needed.
1339
                c.launchResolvers(resolvers, false)
12✔
1340

12✔
1341
                nextState = StateWaitingFullResolution
12✔
1342

1343
        // This is our terminal state. We'll keep returning this state until
1344
        // all contracts are fully resolved.
1345
        case StateWaitingFullResolution:
17✔
1346
                log.Infof("ChannelArbitrator(%v): still awaiting contract "+
17✔
1347
                        "resolution", c.cfg.ChanPoint)
17✔
1348

17✔
1349
                unresolved, err := c.log.FetchUnresolvedContracts()
17✔
1350
                if err != nil {
17✔
1351
                        return StateError, closeTx, err
×
1352
                }
×
1353

1354
                // If we have no unresolved contracts, then we can move to the
1355
                // final state.
1356
                if len(unresolved) == 0 {
29✔
1357
                        nextState = StateFullyResolved
12✔
1358
                        break
12✔
1359
                }
1360

1361
                // Otherwise we still have unresolved contracts, then we'll
1362
                // stay alive to oversee their resolution.
1363
                nextState = StateWaitingFullResolution
5✔
1364

5✔
1365
                // Add debug logs.
5✔
1366
                for _, r := range unresolved {
10✔
1367
                        log.Debugf("ChannelArbitrator(%v): still have "+
5✔
1368
                                "unresolved contract: %T", c.cfg.ChanPoint, r)
5✔
1369
                }
5✔
1370

1371
        // If we start as fully resolved, then we'll end as fully resolved.
1372
        case StateFullyResolved:
22✔
1373
                // To ensure that the state of the contract in persistent
22✔
1374
                // storage is properly reflected, we'll mark the contract as
22✔
1375
                // fully resolved now.
22✔
1376
                nextState = StateFullyResolved
22✔
1377

22✔
1378
                log.Infof("ChannelPoint(%v) has been fully resolved "+
22✔
1379
                        "on-chain at height=%v", c.cfg.ChanPoint, triggerHeight)
22✔
1380

22✔
1381
                if err := c.cfg.MarkChannelResolved(); err != nil {
22✔
1382
                        log.Errorf("unable to mark channel resolved: %v", err)
×
1383
                        return StateError, closeTx, err
×
1384
                }
×
1385
        }
1386

1387
        log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
132✔
1388
                nextState)
132✔
1389

132✔
1390
        return nextState, closeTx, nil
132✔
1391
}
1392

1393
// sweepAnchors offers all given anchor resolutions to the sweeper. It requests
1394
// sweeping at the minimum fee rate. This fee rate can be upped manually by the
1395
// user via the BumpFee rpc.
1396
func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
1397
        heightHint uint32) error {
19✔
1398

19✔
1399
        // Update the set of activeHTLCs so that the sweeping routine has an
19✔
1400
        // up-to-date view of the set of commitments.
19✔
1401
        c.updateActiveHTLCs()
19✔
1402

19✔
1403
        // Prepare the sweeping requests for all possible versions of
19✔
1404
        // commitments.
19✔
1405
        sweepReqs, err := c.prepareAnchorSweeps(heightHint, anchors)
19✔
1406
        if err != nil {
19✔
1407
                return err
×
1408
        }
×
1409

1410
        // Send out the sweeping requests to the sweeper.
1411
        for _, req := range sweepReqs {
27✔
1412
                _, err = c.cfg.Sweeper.SweepInput(req.input, req.params)
8✔
1413
                if err != nil {
8✔
1414
                        return err
×
1415
                }
×
1416
        }
1417

1418
        return nil
19✔
1419
}
1420

1421
// findCommitmentDeadlineAndValue finds the deadline (relative block height)
1422
// for a commitment transaction by extracting the minimum CLTV from its HTLCs.
1423
// From our PoV, the deadline delta is defined to be the smaller of,
1424
//   - half of the least CLTV from outgoing HTLCs' corresponding incoming
1425
//     HTLCs,  or,
1426
//   - half of the least CLTV from incoming HTLCs if the preimage is available.
1427
//
1428
// We use half of the CTLV value to ensure that we have enough time to sweep
1429
// the second-level HTLCs.
1430
//
1431
// It also finds the total value that are time-sensitive, which is the sum of
1432
// all the outgoing HTLCs plus incoming HTLCs whose preimages are known. It
1433
// then returns the value left after subtracting the budget used for sweeping
1434
// the time-sensitive HTLCs.
1435
//
1436
// NOTE: when the deadline turns out to be 0 blocks, we will replace it with 1
1437
// block because our fee estimator doesn't allow a 0 conf target. This also
1438
// means we've left behind and should increase our fee to make the transaction
1439
// confirmed asap.
1440
func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
1441
        htlcs htlcSet) (fn.Option[int32], btcutil.Amount, error) {
13✔
1442

13✔
1443
        deadlineMinHeight := uint32(math.MaxUint32)
13✔
1444
        totalValue := btcutil.Amount(0)
13✔
1445

13✔
1446
        // First, iterate through the outgoingHTLCs to find the lowest CLTV
13✔
1447
        // value.
13✔
1448
        for _, htlc := range htlcs.outgoingHTLCs {
23✔
1449
                // Skip if the HTLC is dust.
10✔
1450
                if htlc.OutputIndex < 0 {
13✔
1451
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
3✔
1452
                                "for dust htlc=%x",
3✔
1453
                                c.cfg.ChanPoint, htlc.RHash[:])
3✔
1454

3✔
1455
                        continue
3✔
1456
                }
1457

1458
                value := htlc.Amt.ToSatoshis()
7✔
1459

7✔
1460
                // Find the expiry height for this outgoing HTLC's incoming
7✔
1461
                // HTLC.
7✔
1462
                deadlineOpt := c.cfg.FindOutgoingHTLCDeadline(htlc)
7✔
1463

7✔
1464
                // The deadline is default to the current deadlineMinHeight,
7✔
1465
                // and it's overwritten when it's not none.
7✔
1466
                deadline := deadlineMinHeight
7✔
1467
                deadlineOpt.WhenSome(func(d int32) {
12✔
1468
                        deadline = uint32(d)
5✔
1469

5✔
1470
                        // We only consider the value is under protection when
5✔
1471
                        // it's time-sensitive.
5✔
1472
                        totalValue += value
5✔
1473
                })
5✔
1474

1475
                if deadline < deadlineMinHeight {
12✔
1476
                        deadlineMinHeight = deadline
5✔
1477

5✔
1478
                        log.Tracef("ChannelArbitrator(%v): outgoing HTLC has "+
5✔
1479
                                "deadline=%v, value=%v", c.cfg.ChanPoint,
5✔
1480
                                deadlineMinHeight, value)
5✔
1481
                }
5✔
1482
        }
1483

1484
        // Then going through the incomingHTLCs, and update the minHeight when
1485
        // conditions met.
1486
        for _, htlc := range htlcs.incomingHTLCs {
22✔
1487
                // Skip if the HTLC is dust.
9✔
1488
                if htlc.OutputIndex < 0 {
10✔
1489
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
1✔
1490
                                "for dust htlc=%x",
1✔
1491
                                c.cfg.ChanPoint, htlc.RHash[:])
1✔
1492

1✔
1493
                        continue
1✔
1494
                }
1495

1496
                // Since it's an HTLC sent to us, check if we have preimage for
1497
                // this HTLC.
1498
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
8✔
1499
                if err != nil {
8✔
1500
                        return fn.None[int32](), 0, err
×
1501
                }
×
1502

1503
                if !preimageAvailable {
10✔
1504
                        continue
2✔
1505
                }
1506

1507
                value := htlc.Amt.ToSatoshis()
6✔
1508
                totalValue += value
6✔
1509

6✔
1510
                if htlc.RefundTimeout < deadlineMinHeight {
11✔
1511
                        deadlineMinHeight = htlc.RefundTimeout
5✔
1512

5✔
1513
                        log.Tracef("ChannelArbitrator(%v): incoming HTLC has "+
5✔
1514
                                "deadline=%v, amt=%v", c.cfg.ChanPoint,
5✔
1515
                                deadlineMinHeight, value)
5✔
1516
                }
5✔
1517
        }
1518

1519
        // Calculate the deadline. There are two cases to be handled here,
1520
        //   - when the deadlineMinHeight never gets updated, which could
1521
        //     happen when we have no outgoing HTLCs, and, for incoming HTLCs,
1522
        //       * either we have none, or,
1523
        //       * none of the HTLCs are preimageAvailable.
1524
        //   - when our deadlineMinHeight is no greater than the heightHint,
1525
        //     which means we are behind our schedule.
1526
        var deadline uint32
13✔
1527
        switch {
13✔
1528
        // When we couldn't find a deadline height from our HTLCs, we will fall
1529
        // back to the default value as there's no time pressure here.
1530
        case deadlineMinHeight == math.MaxUint32:
4✔
1531
                return fn.None[int32](), 0, nil
4✔
1532

1533
        // When the deadline is passed, we will fall back to the smallest conf
1534
        // target (1 block).
1535
        case deadlineMinHeight <= heightHint:
1✔
1536
                log.Warnf("ChannelArbitrator(%v): deadline is passed with "+
1✔
1537
                        "deadlineMinHeight=%d, heightHint=%d",
1✔
1538
                        c.cfg.ChanPoint, deadlineMinHeight, heightHint)
1✔
1539
                deadline = 1
1✔
1540

1541
        // Use half of the deadline delta, and leave the other half to be used
1542
        // to sweep the HTLCs.
1543
        default:
8✔
1544
                deadline = (deadlineMinHeight - heightHint) / 2
8✔
1545
        }
1546

1547
        // Calculate the value left after subtracting the budget used for
1548
        // sweeping the time-sensitive HTLCs.
1549
        valueLeft := totalValue - calculateBudget(
9✔
1550
                totalValue, c.cfg.Budget.DeadlineHTLCRatio,
9✔
1551
                c.cfg.Budget.DeadlineHTLC,
9✔
1552
        )
9✔
1553

9✔
1554
        log.Debugf("ChannelArbitrator(%v): calculated valueLeft=%v, "+
9✔
1555
                "deadline=%d, using deadlineMinHeight=%d, heightHint=%d",
9✔
1556
                c.cfg.ChanPoint, valueLeft, deadline, deadlineMinHeight,
9✔
1557
                heightHint)
9✔
1558

9✔
1559
        return fn.Some(int32(deadline)), valueLeft, nil
9✔
1560
}
1561

1562
// launchResolvers updates the activeResolvers list and starts the resolvers.
1563
func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver,
1564
        immediate bool) {
13✔
1565

13✔
1566
        c.activeResolversLock.Lock()
13✔
1567
        defer c.activeResolversLock.Unlock()
13✔
1568

13✔
1569
        c.activeResolvers = resolvers
13✔
1570
        for _, contract := range resolvers {
19✔
1571
                c.wg.Add(1)
6✔
1572
                go c.resolveContract(contract, immediate)
6✔
1573
        }
6✔
1574
}
1575

1576
// advanceState is the main driver of our state machine. This method is an
1577
// iterative function which repeatedly attempts to advance the internal state
1578
// of the channel arbitrator. The state will be advanced until we reach a
1579
// redundant transition, meaning that the state transition is a noop. The final
1580
// param is a callback that allows the caller to execute an arbitrary action
1581
// after each state transition.
1582
func (c *ChannelArbitrator) advanceState(
1583
        triggerHeight uint32, trigger transitionTrigger,
1584
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
89✔
1585

89✔
1586
        var (
89✔
1587
                priorState   ArbitratorState
89✔
1588
                forceCloseTx *wire.MsgTx
89✔
1589
        )
89✔
1590

89✔
1591
        // We'll continue to advance our state forward until the state we
89✔
1592
        // transition to is that same state that we started at.
89✔
1593
        for {
264✔
1594
                priorState = c.state
175✔
1595
                log.Debugf("ChannelArbitrator(%v): attempting state step with "+
175✔
1596
                        "trigger=%v from state=%v", c.cfg.ChanPoint, trigger,
175✔
1597
                        priorState)
175✔
1598

175✔
1599
                nextState, closeTx, err := c.stateStep(
175✔
1600
                        triggerHeight, trigger, confCommitSet,
175✔
1601
                )
175✔
1602
                if err != nil {
179✔
1603
                        log.Errorf("ChannelArbitrator(%v): unable to advance "+
4✔
1604
                                "state: %v", c.cfg.ChanPoint, err)
4✔
1605
                        return priorState, nil, err
4✔
1606
                }
4✔
1607

1608
                if forceCloseTx == nil && closeTx != nil {
187✔
1609
                        forceCloseTx = closeTx
16✔
1610
                }
16✔
1611

1612
                // Our termination transition is a noop transition. If we get
1613
                // our prior state back as the next state, then we'll
1614
                // terminate.
1615
                if nextState == priorState {
253✔
1616
                        log.Debugf("ChannelArbitrator(%v): terminating at "+
82✔
1617
                                "state=%v", c.cfg.ChanPoint, nextState)
82✔
1618
                        return nextState, forceCloseTx, nil
82✔
1619
                }
82✔
1620

1621
                // As the prior state was successfully executed, we can now
1622
                // commit the next state. This ensures that we will re-execute
1623
                // the prior state if anything fails.
1624
                if err := c.log.CommitState(nextState); err != nil {
92✔
1625
                        log.Errorf("ChannelArbitrator(%v): unable to commit "+
3✔
1626
                                "next state(%v): %v", c.cfg.ChanPoint,
3✔
1627
                                nextState, err)
3✔
1628
                        return priorState, nil, err
3✔
1629
                }
3✔
1630
                c.state = nextState
86✔
1631
        }
1632
}
1633

1634
// ChainAction is an enum that encompasses all possible on-chain actions
1635
// we'll take for a set of HTLC's.
1636
type ChainAction uint8
1637

1638
const (
1639
        // NoAction is the min chainAction type, indicating that no action
1640
        // needs to be taken for a given HTLC.
1641
        NoAction ChainAction = 0
1642

1643
        // HtlcTimeoutAction indicates that the HTLC will timeout soon. As a
1644
        // result, we should get ready to sweep it on chain after the timeout.
1645
        HtlcTimeoutAction = 1
1646

1647
        // HtlcClaimAction indicates that we should claim the HTLC on chain
1648
        // before its timeout period.
1649
        HtlcClaimAction = 2
1650

1651
        // HtlcFailDustAction indicates that we should fail the upstream HTLC
1652
        // for an outgoing dust HTLC immediately (even before the commitment
1653
        // transaction is confirmed) because it has no output on the commitment
1654
        // transaction. This also includes remote pending outgoing dust HTLCs.
1655
        HtlcFailDustAction = 3
1656

1657
        // HtlcOutgoingWatchAction indicates that we can't yet timeout this
1658
        // HTLC, but we had to go to chain on order to resolve an existing
1659
        // HTLC.  In this case, we'll either: time it out once it expires, or
1660
        // will learn the pre-image if the remote party claims the output. In
1661
        // this case, well add the pre-image to our global store.
1662
        HtlcOutgoingWatchAction = 4
1663

1664
        // HtlcIncomingWatchAction indicates that we don't yet have the
1665
        // pre-image to claim incoming HTLC, but we had to go to chain in order
1666
        // to resolve and existing HTLC. In this case, we'll either: let the
1667
        // other party time it out, or eventually learn of the pre-image, in
1668
        // which case we'll claim on chain.
1669
        HtlcIncomingWatchAction = 5
1670

1671
        // HtlcIncomingDustFinalAction indicates that we should mark an incoming
1672
        // dust htlc as final because it can't be claimed on-chain.
1673
        HtlcIncomingDustFinalAction = 6
1674

1675
        // HtlcFailDanglingAction indicates that we should fail the upstream
1676
        // HTLC for an outgoing HTLC immediately after the commitment
1677
        // transaction has confirmed because it has no corresponding output on
1678
        // the commitment transaction. This category does NOT include any dust
1679
        // HTLCs which are mapped in the "HtlcFailDustAction" category.
1680
        HtlcFailDanglingAction = 7
1681
)
1682

1683
// String returns a human readable string describing a chain action.
1684
func (c ChainAction) String() string {
×
1685
        switch c {
×
1686
        case NoAction:
×
1687
                return "NoAction"
×
1688

1689
        case HtlcTimeoutAction:
×
1690
                return "HtlcTimeoutAction"
×
1691

1692
        case HtlcClaimAction:
×
1693
                return "HtlcClaimAction"
×
1694

1695
        case HtlcFailDustAction:
×
1696
                return "HtlcFailDustAction"
×
1697

1698
        case HtlcOutgoingWatchAction:
×
1699
                return "HtlcOutgoingWatchAction"
×
1700

1701
        case HtlcIncomingWatchAction:
×
1702
                return "HtlcIncomingWatchAction"
×
1703

1704
        case HtlcIncomingDustFinalAction:
×
1705
                return "HtlcIncomingDustFinalAction"
×
1706

1707
        case HtlcFailDanglingAction:
×
1708
                return "HtlcFailDanglingAction"
×
1709

1710
        default:
×
1711
                return "<unknown action>"
×
1712
        }
1713
}
1714

1715
// ChainActionMap is a map of a chain action, to the set of HTLC's that need to
1716
// be acted upon for a given action type. The channel
1717
type ChainActionMap map[ChainAction][]channeldb.HTLC
1718

1719
// Merge merges the passed chain actions with the target chain action map.
1720
func (c ChainActionMap) Merge(actions ChainActionMap) {
69✔
1721
        for chainAction, htlcs := range actions {
82✔
1722
                c[chainAction] = append(c[chainAction], htlcs...)
13✔
1723
        }
13✔
1724
}
1725

1726
// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
1727
// confirmation delta that we need is close, and returns a bool indicating if
1728
// we should go on chain to claim.  We do this rather than waiting up until the
1729
// last minute as we want to ensure that when we *need* (HTLC is timed out) to
1730
// sweep, the commitment is already confirmed.
1731
func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
1732
        broadcastDelta, currentHeight uint32) bool {
28✔
1733

28✔
1734
        // We'll calculate the broadcast cut off for this HTLC. This is the
28✔
1735
        // height that (based on our current fee estimation) we should
28✔
1736
        // broadcast in order to ensure the commitment transaction is confirmed
28✔
1737
        // before the HTLC fully expires.
28✔
1738
        broadcastCutOff := htlc.RefundTimeout - broadcastDelta
28✔
1739

28✔
1740
        log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
28✔
1741
                "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
28✔
1742
                broadcastCutOff, currentHeight)
28✔
1743

28✔
1744
        // TODO(roasbeef): take into account default HTLC delta, don't need to
28✔
1745
        // broadcast immediately
28✔
1746
        //  * can then batch with SINGLE | ANYONECANPAY
28✔
1747

28✔
1748
        // We should on-chain for this HTLC, iff we're within out broadcast
28✔
1749
        // cutoff window.
28✔
1750
        if currentHeight < broadcastCutOff {
49✔
1751
                return false
21✔
1752
        }
21✔
1753

1754
        // In case of incoming htlc we should go to chain.
1755
        if htlc.Incoming {
7✔
UNCOV
1756
                return true
×
UNCOV
1757
        }
×
1758

1759
        // For htlcs that are result of our initiated payments we give some grace
1760
        // period before force closing the channel. During this time we expect
1761
        // both nodes to connect and give a chance to the other node to send its
1762
        // updates and cancel the htlc.
1763
        // This shouldn't add any security risk as there is no incoming htlc to
1764
        // fulfill at this case and the expectation is that when the channel is
1765
        // active the other node will send update_fail_htlc to remove the htlc
1766
        // without closing the channel. It is up to the user to force close the
1767
        // channel if the peer misbehaves and doesn't send the update_fail_htlc.
1768
        // It is useful when this node is most of the time not online and is
1769
        // likely to miss the time slot where the htlc may be cancelled.
1770
        isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex)
7✔
1771
        upTime := c.cfg.Clock.Now().Sub(c.startTimestamp)
7✔
1772
        return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod
7✔
1773
}
1774

1775
// checkCommitChainActions is called for each new block connected to the end of
1776
// the main chain. Given the new block height, this new method will examine all
1777
// active HTLC's, and determine if we need to go on-chain to claim any of them.
1778
// A map of action -> []htlc is returned, detailing what action (if any) should
1779
// be performed for each HTLC. For timed out HTLC's, once the commitment has
1780
// been sufficiently confirmed, the HTLC's should be canceled backwards. For
1781
// redeemed HTLC's, we should send the pre-image back to the incoming link.
1782
func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
1783
        trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, error) {
69✔
1784

69✔
1785
        // TODO(roasbeef): would need to lock channel? channel totem?
69✔
1786
        //  * race condition if adding and we broadcast, etc
69✔
1787
        //  * or would make each instance sync?
69✔
1788

69✔
1789
        log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
69✔
1790
                "height=%v, in_htlc_count=%v, out_htlc_count=%v",
69✔
1791
                c.cfg.ChanPoint, height,
69✔
1792
                len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
69✔
1793

69✔
1794
        actionMap := make(ChainActionMap)
69✔
1795

69✔
1796
        // First, we'll make an initial pass over the set of incoming and
69✔
1797
        // outgoing HTLC's to decide if we need to go on chain at all.
69✔
1798
        haveChainActions := false
69✔
1799
        for _, htlc := range htlcs.outgoingHTLCs {
77✔
1800
                // We'll need to go on-chain for an outgoing HTLC if it was
8✔
1801
                // never resolved downstream, and it's "close" to timing out.
8✔
1802
                //
8✔
1803
                // TODO(yy): If there's no corresponding incoming HTLC, it
8✔
1804
                // means we are the first hop, hence the payer. This is a
8✔
1805
                // tricky case - unlike a forwarding hop, we don't have an
8✔
1806
                // incoming HTLC that will time out, which means as long as we
8✔
1807
                // can learn the preimage, we can settle the invoice (before it
8✔
1808
                // expires?).
8✔
1809
                toChain := c.shouldGoOnChain(
8✔
1810
                        htlc, c.cfg.OutgoingBroadcastDelta, height,
8✔
1811
                )
8✔
1812

8✔
1813
                if toChain {
8✔
UNCOV
1814
                        // Convert to int64 in case of overflow.
×
UNCOV
1815
                        remainingBlocks := int64(htlc.RefundTimeout) -
×
UNCOV
1816
                                int64(height)
×
UNCOV
1817

×
UNCOV
1818
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
×
UNCOV
1819
                                "outgoing htlc %x: timeout=%v, amount=%v, "+
×
UNCOV
1820
                                "blocks_until_expiry=%v, broadcast_delta=%v",
×
UNCOV
1821
                                c.cfg.ChanPoint, htlc.RHash[:],
×
UNCOV
1822
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
×
UNCOV
1823
                                c.cfg.OutgoingBroadcastDelta,
×
UNCOV
1824
                        )
×
UNCOV
1825
                }
×
1826

1827
                haveChainActions = haveChainActions || toChain
8✔
1828
        }
1829

1830
        for _, htlc := range htlcs.incomingHTLCs {
75✔
1831
                // We'll need to go on-chain to pull an incoming HTLC iff we
6✔
1832
                // know the pre-image and it's close to timing out. We need to
6✔
1833
                // ensure that we claim the funds that are rightfully ours
6✔
1834
                // on-chain.
6✔
1835
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
6✔
1836
                if err != nil {
6✔
1837
                        return nil, err
×
1838
                }
×
1839

1840
                if !preimageAvailable {
10✔
1841
                        continue
4✔
1842
                }
1843

1844
                toChain := c.shouldGoOnChain(
2✔
1845
                        htlc, c.cfg.IncomingBroadcastDelta, height,
2✔
1846
                )
2✔
1847

2✔
1848
                if toChain {
2✔
UNCOV
1849
                        // Convert to int64 in case of overflow.
×
UNCOV
1850
                        remainingBlocks := int64(htlc.RefundTimeout) -
×
UNCOV
1851
                                int64(height)
×
UNCOV
1852

×
UNCOV
1853
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
×
UNCOV
1854
                                "incoming htlc %x: timeout=%v, amount=%v, "+
×
UNCOV
1855
                                "blocks_until_expiry=%v, broadcast_delta=%v",
×
UNCOV
1856
                                c.cfg.ChanPoint, htlc.RHash[:],
×
UNCOV
1857
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
×
UNCOV
1858
                                c.cfg.IncomingBroadcastDelta,
×
UNCOV
1859
                        )
×
UNCOV
1860
                }
×
1861

1862
                haveChainActions = haveChainActions || toChain
2✔
1863
        }
1864

1865
        // If we don't have any actions to make, then we'll return an empty
1866
        // action map. We only do this if this was a chain trigger though, as
1867
        // if we're going to broadcast the commitment (or the remote party did)
1868
        // we're *forced* to act on each HTLC.
1869
        if !haveChainActions && trigger == chainTrigger {
110✔
1870
                log.Tracef("ChannelArbitrator(%v): no actions to take at "+
41✔
1871
                        "height=%v", c.cfg.ChanPoint, height)
41✔
1872
                return actionMap, nil
41✔
1873
        }
41✔
1874

1875
        // Now that we know we'll need to go on-chain, we'll examine all of our
1876
        // active outgoing HTLC's to see if we either need to: sweep them after
1877
        // a timeout (then cancel backwards), cancel them backwards
1878
        // immediately, or watch them as they're still active contracts.
1879
        for _, htlc := range htlcs.outgoingHTLCs {
35✔
1880
                switch {
7✔
1881
                // If the HTLC is dust, then we can cancel it backwards
1882
                // immediately as there's no matching contract to arbitrate
1883
                // on-chain. We know the HTLC is dust, if the OutputIndex
1884
                // negative.
1885
                case htlc.OutputIndex < 0:
2✔
1886
                        log.Tracef("ChannelArbitrator(%v): immediately "+
2✔
1887
                                "failing dust htlc=%x", c.cfg.ChanPoint,
2✔
1888
                                htlc.RHash[:])
2✔
1889

2✔
1890
                        actionMap[HtlcFailDustAction] = append(
2✔
1891
                                actionMap[HtlcFailDustAction], htlc,
2✔
1892
                        )
2✔
1893

1894
                // If we don't need to immediately act on this HTLC, then we'll
1895
                // mark it still "live". After we broadcast, we'll monitor it
1896
                // until the HTLC times out to see if we can also redeem it
1897
                // on-chain.
1898
                case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
1899
                        height,
1900
                ):
5✔
1901
                        // TODO(roasbeef): also need to be able to query
5✔
1902
                        // circuit map to see if HTLC hasn't been fully
5✔
1903
                        // resolved
5✔
1904
                        //
5✔
1905
                        //  * can't fail incoming until if outgoing not yet
5✔
1906
                        //  failed
5✔
1907

5✔
1908
                        log.Tracef("ChannelArbitrator(%v): watching chain to "+
5✔
1909
                                "decide action for outgoing htlc=%x",
5✔
1910
                                c.cfg.ChanPoint, htlc.RHash[:])
5✔
1911

5✔
1912
                        actionMap[HtlcOutgoingWatchAction] = append(
5✔
1913
                                actionMap[HtlcOutgoingWatchAction], htlc,
5✔
1914
                        )
5✔
1915

1916
                // Otherwise, we'll update our actionMap to mark that we need
1917
                // to sweep this HTLC on-chain
UNCOV
1918
                default:
×
UNCOV
1919
                        log.Tracef("ChannelArbitrator(%v): going on-chain to "+
×
UNCOV
1920
                                "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
×
UNCOV
1921

×
UNCOV
1922
                        actionMap[HtlcTimeoutAction] = append(
×
UNCOV
1923
                                actionMap[HtlcTimeoutAction], htlc,
×
UNCOV
1924
                        )
×
1925
                }
1926
        }
1927

1928
        // Similarly, for each incoming HTLC, now that we need to go on-chain,
1929
        // we'll either: sweep it immediately if we know the pre-image, or
1930
        // observe the output on-chain if we don't In this last, case we'll
1931
        // either learn of it eventually from the outgoing HTLC, or the sender
1932
        // will timeout the HTLC.
1933
        for _, htlc := range htlcs.incomingHTLCs {
33✔
1934
                // If the HTLC is dust, there is no action to be taken.
5✔
1935
                if htlc.OutputIndex < 0 {
7✔
1936
                        log.Debugf("ChannelArbitrator(%v): no resolution "+
2✔
1937
                                "needed for incoming dust htlc=%x",
2✔
1938
                                c.cfg.ChanPoint, htlc.RHash[:])
2✔
1939

2✔
1940
                        actionMap[HtlcIncomingDustFinalAction] = append(
2✔
1941
                                actionMap[HtlcIncomingDustFinalAction], htlc,
2✔
1942
                        )
2✔
1943

2✔
1944
                        continue
2✔
1945
                }
1946

1947
                log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
3✔
1948
                        "action for incoming htlc=%x", c.cfg.ChanPoint,
3✔
1949
                        htlc.RHash[:])
3✔
1950

3✔
1951
                actionMap[HtlcIncomingWatchAction] = append(
3✔
1952
                        actionMap[HtlcIncomingWatchAction], htlc,
3✔
1953
                )
3✔
1954
        }
1955

1956
        return actionMap, nil
28✔
1957
}
1958

1959
// isPreimageAvailable returns whether the hash preimage is available in either
1960
// the preimage cache or the invoice database.
1961
func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
1962
        error) {
18✔
1963

18✔
1964
        // Start by checking the preimage cache for preimages of
18✔
1965
        // forwarded HTLCs.
18✔
1966
        _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
18✔
1967
                hash,
18✔
1968
        )
18✔
1969
        if preimageAvailable {
26✔
1970
                return true, nil
8✔
1971
        }
8✔
1972

1973
        // Then check if we have an invoice that can be settled by this HTLC.
1974
        //
1975
        // TODO(joostjager): Check that there are still more blocks remaining
1976
        // than the invoice cltv delta. We don't want to go to chain only to
1977
        // have the incoming contest resolver decide that we don't want to
1978
        // settle this invoice.
1979
        invoice, err := c.cfg.Registry.LookupInvoice(context.Background(), hash)
10✔
1980
        switch {
10✔
UNCOV
1981
        case err == nil:
×
1982
        case errors.Is(err, invoices.ErrInvoiceNotFound) ||
1983
                errors.Is(err, invoices.ErrNoInvoicesCreated):
10✔
1984

10✔
1985
                return false, nil
10✔
1986
        default:
×
1987
                return false, err
×
1988
        }
1989

UNCOV
1990
        preimageAvailable = invoice.Terms.PaymentPreimage != nil
×
UNCOV
1991

×
UNCOV
1992
        return preimageAvailable, nil
×
1993
}
1994

1995
// checkLocalChainActions is similar to checkCommitChainActions, but it also
1996
// examines the set of HTLCs on the remote party's commitment. This allows us
1997
// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
1998
// outgoing HTLCs.
1999
func (c *ChannelArbitrator) checkLocalChainActions(
2000
        height uint32, trigger transitionTrigger,
2001
        activeHTLCs map[HtlcSetKey]htlcSet,
2002
        commitsConfirmed bool) (ChainActionMap, error) {
61✔
2003

61✔
2004
        // First, we'll check our local chain actions as normal. This will only
61✔
2005
        // examine HTLCs on our local commitment (timeout or settle).
61✔
2006
        localCommitActions, err := c.checkCommitChainActions(
61✔
2007
                height, trigger, activeHTLCs[LocalHtlcSet],
61✔
2008
        )
61✔
2009
        if err != nil {
61✔
2010
                return nil, err
×
2011
        }
×
2012

2013
        // Next, we'll examine the remote commitment (and maybe a dangling one)
2014
        // to see if the set difference of our HTLCs is non-empty. If so, then
2015
        // we may need to cancel back some HTLCs if we decide go to chain.
2016
        remoteDanglingActions := c.checkRemoteDanglingActions(
61✔
2017
                height, activeHTLCs, commitsConfirmed,
61✔
2018
        )
61✔
2019

61✔
2020
        // Finally, we'll merge the two set of chain actions.
61✔
2021
        localCommitActions.Merge(remoteDanglingActions)
61✔
2022

61✔
2023
        return localCommitActions, nil
61✔
2024
}
2025

2026
// checkRemoteDanglingActions examines the set of remote commitments for any
2027
// HTLCs that are close to timing out. If we find any, then we'll return a set
2028
// of chain actions for HTLCs that are on our commitment, but not theirs to
2029
// cancel immediately.
2030
func (c *ChannelArbitrator) checkRemoteDanglingActions(
2031
        height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
2032
        commitsConfirmed bool) ChainActionMap {
61✔
2033

61✔
2034
        var (
61✔
2035
                pendingRemoteHTLCs []channeldb.HTLC
61✔
2036
                localHTLCs         = make(map[uint64]struct{})
61✔
2037
                remoteHTLCs        = make(map[uint64]channeldb.HTLC)
61✔
2038
                actionMap          = make(ChainActionMap)
61✔
2039
        )
61✔
2040

61✔
2041
        // First, we'll construct two sets of the outgoing HTLCs: those on our
61✔
2042
        // local commitment, and those that are on the remote commitment(s).
61✔
2043
        for htlcSetKey, htlcs := range activeHTLCs {
180✔
2044
                if htlcSetKey.IsRemote {
182✔
2045
                        for _, htlc := range htlcs.outgoingHTLCs {
78✔
2046
                                remoteHTLCs[htlc.HtlcIndex] = htlc
15✔
2047
                        }
15✔
2048
                } else {
56✔
2049
                        for _, htlc := range htlcs.outgoingHTLCs {
62✔
2050
                                localHTLCs[htlc.HtlcIndex] = struct{}{}
6✔
2051
                        }
6✔
2052
                }
2053
        }
2054

2055
        // With both sets constructed, we'll now compute the set difference of
2056
        // our two sets of HTLCs. This'll give us the HTLCs that exist on the
2057
        // remote commitment transaction, but not on ours.
2058
        for htlcIndex, htlc := range remoteHTLCs {
76✔
2059
                if _, ok := localHTLCs[htlcIndex]; ok {
17✔
2060
                        continue
2✔
2061
                }
2062

2063
                pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
13✔
2064
        }
2065

2066
        // Finally, we'll examine all the pending remote HTLCs for those that
2067
        // have expired. If we find any, then we'll recommend that they be
2068
        // failed now so we can free up the incoming HTLC.
2069
        for _, htlc := range pendingRemoteHTLCs {
74✔
2070
                // We'll now check if we need to go to chain in order to cancel
13✔
2071
                // the incoming HTLC.
13✔
2072
                goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
13✔
2073
                        height,
13✔
2074
                )
13✔
2075

13✔
2076
                // If we don't need to go to chain, and no commitments have
13✔
2077
                // been confirmed, then we can move on. Otherwise, if
13✔
2078
                // commitments have been confirmed, then we need to cancel back
13✔
2079
                // *all* of the pending remote HTLCS.
13✔
2080
                if !goToChain && !commitsConfirmed {
17✔
2081
                        continue
4✔
2082
                }
2083

2084
                // Dust htlcs can be canceled back even before the commitment
2085
                // transaction confirms. Dust htlcs are not enforceable onchain.
2086
                // If another version of the commit tx would confirm we either
2087
                // gain or lose those dust amounts but there is no other way
2088
                // than cancelling the incoming back because we will never learn
2089
                // the preimage.
2090
                if htlc.OutputIndex < 0 {
9✔
2091
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2092
                                "htlc=%x from local/remote commitments diff",
×
2093
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2094

×
2095
                        actionMap[HtlcFailDustAction] = append(
×
2096
                                actionMap[HtlcFailDustAction], htlc,
×
2097
                        )
×
2098

×
2099
                        continue
×
2100
                }
2101

2102
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
9✔
2103
                        "local/remote commitments diff",
9✔
2104
                        c.cfg.ChanPoint, htlc.RHash[:])
9✔
2105

9✔
2106
                actionMap[HtlcFailDanglingAction] = append(
9✔
2107
                        actionMap[HtlcFailDanglingAction], htlc,
9✔
2108
                )
9✔
2109
        }
2110

2111
        return actionMap
61✔
2112
}
2113

2114
// checkRemoteChainActions examines the two possible remote commitment chains
2115
// and returns the set of chain actions we need to carry out if the remote
2116
// commitment (non pending) confirms. The pendingConf indicates if the pending
2117
// remote commitment confirmed. This is similar to checkCommitChainActions, but
2118
// we'll immediately fail any HTLCs on the pending remote commit, but not the
2119
// remote commit (or the other way around).
2120
func (c *ChannelArbitrator) checkRemoteChainActions(
2121
        height uint32, trigger transitionTrigger,
2122
        activeHTLCs map[HtlcSetKey]htlcSet,
2123
        pendingConf bool) (ChainActionMap, error) {
8✔
2124

8✔
2125
        // First, we'll examine all the normal chain actions on the remote
8✔
2126
        // commitment that confirmed.
8✔
2127
        confHTLCs := activeHTLCs[RemoteHtlcSet]
8✔
2128
        if pendingConf {
10✔
2129
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
2✔
2130
        }
2✔
2131
        remoteCommitActions, err := c.checkCommitChainActions(
8✔
2132
                height, trigger, confHTLCs,
8✔
2133
        )
8✔
2134
        if err != nil {
8✔
2135
                return nil, err
×
2136
        }
×
2137

2138
        // With these actions computed, we'll now check the diff of the HTLCs on
2139
        // the commitments, and cancel back any that are on the pending but not
2140
        // the non-pending.
2141
        remoteDiffActions := c.checkRemoteDiffActions(
8✔
2142
                activeHTLCs, pendingConf,
8✔
2143
        )
8✔
2144

8✔
2145
        // Finally, we'll merge all the chain actions and the final set of
8✔
2146
        // chain actions.
8✔
2147
        remoteCommitActions.Merge(remoteDiffActions)
8✔
2148
        return remoteCommitActions, nil
8✔
2149
}
2150

2151
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
2152
// confirmed commit and remote pending commit for HTLCS that we need to cancel
2153
// back. If we find any HTLCs on the remote pending but not the remote, then
2154
// we'll mark them to be failed immediately.
2155
func (c *ChannelArbitrator) checkRemoteDiffActions(
2156
        activeHTLCs map[HtlcSetKey]htlcSet,
2157
        pendingConf bool) ChainActionMap {
8✔
2158

8✔
2159
        // First, we'll partition the HTLCs into those that are present on the
8✔
2160
        // confirmed commitment, and those on the dangling commitment.
8✔
2161
        confHTLCs := activeHTLCs[RemoteHtlcSet]
8✔
2162
        danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
8✔
2163
        if pendingConf {
10✔
2164
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
2✔
2165
                danglingHTLCs = activeHTLCs[RemoteHtlcSet]
2✔
2166
        }
2✔
2167

2168
        // Next, we'll create a set of all the HTLCs confirmed commitment.
2169
        remoteHtlcs := make(map[uint64]struct{})
8✔
2170
        for _, htlc := range confHTLCs.outgoingHTLCs {
10✔
2171
                remoteHtlcs[htlc.HtlcIndex] = struct{}{}
2✔
2172
        }
2✔
2173

2174
        // With the remote HTLCs assembled, we'll mark any HTLCs only on the
2175
        // remote pending commitment to be failed asap.
2176
        actionMap := make(ChainActionMap)
8✔
2177
        for _, htlc := range danglingHTLCs.outgoingHTLCs {
12✔
2178
                if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
4✔
UNCOV
2179
                        continue
×
2180
                }
2181

2182
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
4✔
2183
                if err != nil {
4✔
2184
                        log.Errorf("ChannelArbitrator(%v): failed to query "+
×
2185
                                "preimage for dangling htlc=%x from remote "+
×
2186
                                "commitments diff", c.cfg.ChanPoint,
×
2187
                                htlc.RHash[:])
×
2188

×
2189
                        continue
×
2190
                }
2191

2192
                if preimageAvailable {
4✔
2193
                        continue
×
2194
                }
2195

2196
                // Dust HTLCs on the remote commitment can be failed back.
2197
                if htlc.OutputIndex < 0 {
4✔
2198
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2199
                                "htlc=%x from remote commitments diff",
×
2200
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2201

×
2202
                        actionMap[HtlcFailDustAction] = append(
×
2203
                                actionMap[HtlcFailDustAction], htlc,
×
2204
                        )
×
2205

×
2206
                        continue
×
2207
                }
2208

2209
                actionMap[HtlcFailDanglingAction] = append(
4✔
2210
                        actionMap[HtlcFailDanglingAction], htlc,
4✔
2211
                )
4✔
2212

4✔
2213
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
4✔
2214
                        "remote commitments diff",
4✔
2215
                        c.cfg.ChanPoint, htlc.RHash[:])
4✔
2216
        }
2217

2218
        return actionMap
8✔
2219
}
2220

2221
// constructChainActions returns the set of actions that should be taken for
2222
// confirmed HTLCs at the specified height. Our actions will depend on the set
2223
// of HTLCs that were active across all channels at the time of channel
2224
// closure.
2225
func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
2226
        height uint32, trigger transitionTrigger) (ChainActionMap, error) {
21✔
2227

21✔
2228
        // If we've reached this point and have not confirmed commitment set,
21✔
2229
        // then this is an older node that had a pending close channel before
21✔
2230
        // the CommitSet was introduced. In this case, we'll just return the
21✔
2231
        // existing ChainActionMap they had on disk.
21✔
2232
        if confCommitSet == nil || confCommitSet.ConfCommitKey.IsNone() {
28✔
2233
                return c.log.FetchChainActions()
7✔
2234
        }
7✔
2235

2236
        // Otherwise, we have the full commitment set written to disk, and can
2237
        // proceed as normal.
2238
        htlcSets := confCommitSet.toActiveHTLCSets()
14✔
2239
        confCommitKey, err := confCommitSet.ConfCommitKey.UnwrapOrErr(
14✔
2240
                fmt.Errorf("no commitKey available"),
14✔
2241
        )
14✔
2242
        if err != nil {
14✔
2243
                return nil, err
×
2244
        }
×
2245

2246
        switch confCommitKey {
14✔
2247
        // If the local commitment transaction confirmed, then we'll examine
2248
        // that as well as their commitments to the set of chain actions.
2249
        case LocalHtlcSet:
6✔
2250
                return c.checkLocalChainActions(
6✔
2251
                        height, trigger, htlcSets, true,
6✔
2252
                )
6✔
2253

2254
        // If the remote commitment confirmed, then we'll grab all the chain
2255
        // actions for the remote commit, and check the pending commit for any
2256
        // HTLCS we need to handle immediately (dust).
2257
        case RemoteHtlcSet:
6✔
2258
                return c.checkRemoteChainActions(
6✔
2259
                        height, trigger, htlcSets, false,
6✔
2260
                )
6✔
2261

2262
        // Otherwise, the remote pending commitment confirmed, so we'll examine
2263
        // the HTLCs on that unrevoked dangling commitment.
2264
        case RemotePendingHtlcSet:
2✔
2265
                return c.checkRemoteChainActions(
2✔
2266
                        height, trigger, htlcSets, true,
2✔
2267
                )
2✔
2268
        }
2269

2270
        return nil, fmt.Errorf("unable to locate chain actions")
×
2271
}
2272

2273
// prepContractResolutions is called either in the case that we decide we need
2274
// to go to chain, or the remote party goes to chain. Given a set of actions we
2275
// need to take for each HTLC, this method will return a set of contract
2276
// resolvers that will resolve the contracts on-chain if needed, and also a set
2277
// of packets to send to the htlcswitch in order to ensure all incoming HTLC's
2278
// are properly resolved.
2279
func (c *ChannelArbitrator) prepContractResolutions(
2280
        contractResolutions *ContractResolutions, height uint32,
2281
        htlcActions ChainActionMap) ([]ContractResolver, error) {
12✔
2282

12✔
2283
        // We'll also fetch the historical state of this channel, as it should
12✔
2284
        // have been marked as closed by now, and supplement it to each resolver
12✔
2285
        // such that we can properly resolve our pending contracts.
12✔
2286
        var chanState *channeldb.OpenChannel
12✔
2287
        chanState, err := c.cfg.FetchHistoricalChannel()
12✔
2288
        switch {
12✔
2289
        // If we don't find this channel, then it may be the case that it
2290
        // was closed before we started to retain the final state
2291
        // information for open channels.
2292
        case err == channeldb.ErrNoHistoricalBucket:
×
2293
                fallthrough
×
2294
        case err == channeldb.ErrChannelNotFound:
×
2295
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
2296
                        "state", c.cfg.ChanPoint)
×
2297

2298
        case err != nil:
×
2299
                return nil, err
×
2300
        }
2301

2302
        incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
12✔
2303
        outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
12✔
2304

12✔
2305
        // We'll use these two maps to quickly look up an active HTLC with its
12✔
2306
        // matching HTLC resolution.
12✔
2307
        outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution)
12✔
2308
        inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution)
12✔
2309
        for i := 0; i < len(incomingResolutions); i++ {
12✔
UNCOV
2310
                inRes := incomingResolutions[i]
×
UNCOV
2311
                inResolutionMap[inRes.HtlcPoint()] = inRes
×
UNCOV
2312
        }
×
2313
        for i := 0; i < len(outgoingResolutions); i++ {
13✔
2314
                outRes := outgoingResolutions[i]
1✔
2315
                outResolutionMap[outRes.HtlcPoint()] = outRes
1✔
2316
        }
1✔
2317

2318
        // We'll create the resolver kit that we'll be cloning for each
2319
        // resolver so they each can do their duty.
2320
        resolverCfg := ResolverConfig{
12✔
2321
                ChannelArbitratorConfig: c.cfg,
12✔
2322
                Checkpoint: func(res ContractResolver,
12✔
2323
                        reports ...*channeldb.ResolverReport) error {
14✔
2324

2✔
2325
                        return c.log.InsertUnresolvedContracts(reports, res)
2✔
2326
                },
2✔
2327
        }
2328

2329
        commitHash := contractResolutions.CommitHash
12✔
2330

12✔
2331
        var htlcResolvers []ContractResolver
12✔
2332

12✔
2333
        // We instantiate an anchor resolver if the commitment tx has an
12✔
2334
        // anchor.
12✔
2335
        if contractResolutions.AnchorResolution != nil {
14✔
2336
                anchorResolver := newAnchorResolver(
2✔
2337
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
2✔
2338
                        contractResolutions.AnchorResolution.CommitAnchor,
2✔
2339
                        height, c.cfg.ChanPoint, resolverCfg,
2✔
2340
                )
2✔
2341
                anchorResolver.SupplementState(chanState)
2✔
2342

2✔
2343
                htlcResolvers = append(htlcResolvers, anchorResolver)
2✔
2344
        }
2✔
2345

2346
        // If this is a breach close, we'll create a breach resolver, determine
2347
        // the htlc's to fail back, and exit. This is done because the other
2348
        // steps taken for non-breach-closes do not matter for breach-closes.
2349
        if contractResolutions.BreachResolution != nil {
14✔
2350
                breachResolver := newBreachResolver(resolverCfg)
2✔
2351
                htlcResolvers = append(htlcResolvers, breachResolver)
2✔
2352

2✔
2353
                return htlcResolvers, nil
2✔
2354
        }
2✔
2355

2356
        // For each HTLC, we'll either act immediately, meaning we'll instantly
2357
        // fail the HTLC, or we'll act only once the transaction has been
2358
        // confirmed, in which case we'll need an HTLC resolver.
2359
        for htlcAction, htlcs := range htlcActions {
21✔
2360
                switch htlcAction {
11✔
2361
                // If we can claim this HTLC, we'll create an HTLC resolver to
2362
                // claim the HTLC (second-level or directly), then add the pre
2363
                case HtlcClaimAction:
×
2364
                        for _, htlc := range htlcs {
×
2365
                                htlc := htlc
×
2366

×
2367
                                htlcOp := wire.OutPoint{
×
2368
                                        Hash:  commitHash,
×
2369
                                        Index: uint32(htlc.OutputIndex),
×
2370
                                }
×
2371

×
2372
                                resolution, ok := inResolutionMap[htlcOp]
×
2373
                                if !ok {
×
2374
                                        // TODO(roasbeef): panic?
×
2375
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2376
                                                "incoming resolution: %v",
×
2377
                                                c.cfg.ChanPoint, htlcOp)
×
2378
                                        continue
×
2379
                                }
2380

2381
                                resolver := newSuccessResolver(
×
2382
                                        resolution, height, htlc, resolverCfg,
×
2383
                                )
×
2384
                                if chanState != nil {
×
2385
                                        resolver.SupplementState(chanState)
×
2386
                                }
×
2387
                                htlcResolvers = append(htlcResolvers, resolver)
×
2388
                        }
2389

2390
                // If we can timeout the HTLC directly, then we'll create the
2391
                // proper resolver to do so, who will then cancel the packet
2392
                // backwards.
UNCOV
2393
                case HtlcTimeoutAction:
×
UNCOV
2394
                        for _, htlc := range htlcs {
×
UNCOV
2395
                                htlc := htlc
×
UNCOV
2396

×
UNCOV
2397
                                htlcOp := wire.OutPoint{
×
UNCOV
2398
                                        Hash:  commitHash,
×
UNCOV
2399
                                        Index: uint32(htlc.OutputIndex),
×
UNCOV
2400
                                }
×
UNCOV
2401

×
UNCOV
2402
                                resolution, ok := outResolutionMap[htlcOp]
×
UNCOV
2403
                                if !ok {
×
2404
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2405
                                                "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
×
2406
                                        continue
×
2407
                                }
2408

UNCOV
2409
                                resolver := newTimeoutResolver(
×
UNCOV
2410
                                        resolution, height, htlc, resolverCfg,
×
UNCOV
2411
                                )
×
UNCOV
2412
                                if chanState != nil {
×
UNCOV
2413
                                        resolver.SupplementState(chanState)
×
UNCOV
2414
                                }
×
2415

2416
                                // For outgoing HTLCs, we will also need to
2417
                                // supplement the resolver with the expiry
2418
                                // block height of its corresponding incoming
2419
                                // HTLC.
UNCOV
2420
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
×
UNCOV
2421
                                resolver.SupplementDeadline(deadline)
×
UNCOV
2422

×
UNCOV
2423
                                htlcResolvers = append(htlcResolvers, resolver)
×
2424
                        }
2425

2426
                // If this is an incoming HTLC, but we can't act yet, then
2427
                // we'll create an incoming resolver to redeem the HTLC if we
2428
                // learn of the pre-image, or let the remote party time out.
UNCOV
2429
                case HtlcIncomingWatchAction:
×
UNCOV
2430
                        for _, htlc := range htlcs {
×
UNCOV
2431
                                htlc := htlc
×
UNCOV
2432

×
UNCOV
2433
                                htlcOp := wire.OutPoint{
×
UNCOV
2434
                                        Hash:  commitHash,
×
UNCOV
2435
                                        Index: uint32(htlc.OutputIndex),
×
UNCOV
2436
                                }
×
UNCOV
2437

×
UNCOV
2438
                                // TODO(roasbeef): need to handle incoming dust...
×
UNCOV
2439

×
UNCOV
2440
                                // TODO(roasbeef): can't be negative!!!
×
UNCOV
2441
                                resolution, ok := inResolutionMap[htlcOp]
×
UNCOV
2442
                                if !ok {
×
2443
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2444
                                                "incoming resolution: %v",
×
2445
                                                c.cfg.ChanPoint, htlcOp)
×
2446
                                        continue
×
2447
                                }
2448

UNCOV
2449
                                resolver := newIncomingContestResolver(
×
UNCOV
2450
                                        resolution, height, htlc,
×
UNCOV
2451
                                        resolverCfg,
×
UNCOV
2452
                                )
×
UNCOV
2453
                                if chanState != nil {
×
UNCOV
2454
                                        resolver.SupplementState(chanState)
×
UNCOV
2455
                                }
×
UNCOV
2456
                                htlcResolvers = append(htlcResolvers, resolver)
×
2457
                        }
2458

2459
                // Finally, if this is an outgoing HTLC we've sent, then we'll
2460
                // launch a resolver to watch for the pre-image (and settle
2461
                // backwards), or just timeout.
2462
                case HtlcOutgoingWatchAction:
1✔
2463
                        for _, htlc := range htlcs {
2✔
2464
                                htlc := htlc
1✔
2465

1✔
2466
                                htlcOp := wire.OutPoint{
1✔
2467
                                        Hash:  commitHash,
1✔
2468
                                        Index: uint32(htlc.OutputIndex),
1✔
2469
                                }
1✔
2470

1✔
2471
                                resolution, ok := outResolutionMap[htlcOp]
1✔
2472
                                if !ok {
1✔
2473
                                        log.Errorf("ChannelArbitrator(%v) "+
×
2474
                                                "unable to find outgoing "+
×
2475
                                                "resolution: %v",
×
2476
                                                c.cfg.ChanPoint, htlcOp)
×
2477

×
2478
                                        continue
×
2479
                                }
2480

2481
                                resolver := newOutgoingContestResolver(
1✔
2482
                                        resolution, height, htlc, resolverCfg,
1✔
2483
                                )
1✔
2484
                                if chanState != nil {
2✔
2485
                                        resolver.SupplementState(chanState)
1✔
2486
                                }
1✔
2487

2488
                                // For outgoing HTLCs, we will also need to
2489
                                // supplement the resolver with the expiry
2490
                                // block height of its corresponding incoming
2491
                                // HTLC.
2492
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
1✔
2493
                                resolver.SupplementDeadline(deadline)
1✔
2494

1✔
2495
                                htlcResolvers = append(htlcResolvers, resolver)
1✔
2496
                        }
2497
                }
2498
        }
2499

2500
        // If this is was an unilateral closure, then we'll also create a
2501
        // resolver to sweep our commitment output (but only if it wasn't
2502
        // trimmed).
2503
        if contractResolutions.CommitResolution != nil {
10✔
UNCOV
2504
                resolver := newCommitSweepResolver(
×
UNCOV
2505
                        *contractResolutions.CommitResolution, height,
×
UNCOV
2506
                        c.cfg.ChanPoint, resolverCfg,
×
UNCOV
2507
                )
×
UNCOV
2508
                if chanState != nil {
×
UNCOV
2509
                        resolver.SupplementState(chanState)
×
UNCOV
2510
                }
×
UNCOV
2511
                htlcResolvers = append(htlcResolvers, resolver)
×
2512
        }
2513

2514
        return htlcResolvers, nil
10✔
2515
}
2516

2517
// replaceResolver replaces a in the list of active resolvers. If the resolver
2518
// to be replaced is not found, it returns an error.
2519
func (c *ChannelArbitrator) replaceResolver(oldResolver,
2520
        newResolver ContractResolver) error {
1✔
2521

1✔
2522
        c.activeResolversLock.Lock()
1✔
2523
        defer c.activeResolversLock.Unlock()
1✔
2524

1✔
2525
        oldKey := oldResolver.ResolverKey()
1✔
2526
        for i, r := range c.activeResolvers {
2✔
2527
                if bytes.Equal(r.ResolverKey(), oldKey) {
2✔
2528
                        c.activeResolvers[i] = newResolver
1✔
2529
                        return nil
1✔
2530
                }
1✔
2531
        }
2532

2533
        return errors.New("resolver to be replaced not found")
×
2534
}
2535

2536
// resolveContract is a goroutine tasked with fully resolving an unresolved
2537
// contract. Either the initial contract will be resolved after a single step,
2538
// or the contract will itself create another contract to be resolved. In
2539
// either case, one the contract has been fully resolved, we'll signal back to
2540
// the main goroutine so it can properly keep track of the set of unresolved
2541
// contracts.
2542
//
2543
// NOTE: This MUST be run as a goroutine.
2544
func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
2545
        immediate bool) {
6✔
2546

6✔
2547
        defer c.wg.Done()
6✔
2548

6✔
2549
        log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
6✔
2550
                c.cfg.ChanPoint, currentContract)
6✔
2551

6✔
2552
        // Until the contract is fully resolved, we'll continue to iteratively
6✔
2553
        // resolve the contract one step at a time.
6✔
2554
        for !currentContract.IsResolved() {
13✔
2555
                log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
7✔
2556
                        c.cfg.ChanPoint, currentContract)
7✔
2557

7✔
2558
                select {
7✔
2559

2560
                // If we've been signalled to quit, then we'll exit early.
2561
                case <-c.quit:
×
2562
                        return
×
2563

2564
                default:
7✔
2565
                        // Otherwise, we'll attempt to resolve the current
7✔
2566
                        // contract.
7✔
2567
                        nextContract, err := currentContract.Resolve(immediate)
7✔
2568
                        if err != nil {
8✔
2569
                                if err == errResolverShuttingDown {
1✔
UNCOV
2570
                                        return
×
UNCOV
2571
                                }
×
2572

2573
                                log.Errorf("ChannelArbitrator(%v): unable to "+
1✔
2574
                                        "progress %T: %v",
1✔
2575
                                        c.cfg.ChanPoint, currentContract, err)
1✔
2576
                                return
1✔
2577
                        }
2578

2579
                        switch {
6✔
2580
                        // If this contract produced another, then this means
2581
                        // the current contract was only able to be partially
2582
                        // resolved in this step. So we'll do a contract swap
2583
                        // within our logs: the new contract will take the
2584
                        // place of the old one.
2585
                        case nextContract != nil:
1✔
2586
                                log.Debugf("ChannelArbitrator(%v): swapping "+
1✔
2587
                                        "out contract %T for %T ",
1✔
2588
                                        c.cfg.ChanPoint, currentContract,
1✔
2589
                                        nextContract)
1✔
2590

1✔
2591
                                // Swap contract in log.
1✔
2592
                                err := c.log.SwapContract(
1✔
2593
                                        currentContract, nextContract,
1✔
2594
                                )
1✔
2595
                                if err != nil {
1✔
2596
                                        log.Errorf("unable to add recurse "+
×
2597
                                                "contract: %v", err)
×
2598
                                }
×
2599

2600
                                // Swap contract in resolvers list. This is to
2601
                                // make sure that reports are queried from the
2602
                                // new resolver.
2603
                                err = c.replaceResolver(
1✔
2604
                                        currentContract, nextContract,
1✔
2605
                                )
1✔
2606
                                if err != nil {
1✔
2607
                                        log.Errorf("unable to replace "+
×
2608
                                                "contract: %v", err)
×
2609
                                }
×
2610

2611
                                // As this contract produced another, we'll
2612
                                // re-assign, so we can continue our resolution
2613
                                // loop.
2614
                                currentContract = nextContract
1✔
2615

2616
                        // If this contract is actually fully resolved, then
2617
                        // we'll mark it as such within the database.
2618
                        case currentContract.IsResolved():
5✔
2619
                                log.Debugf("ChannelArbitrator(%v): marking "+
5✔
2620
                                        "contract %T fully resolved",
5✔
2621
                                        c.cfg.ChanPoint, currentContract)
5✔
2622

5✔
2623
                                err := c.log.ResolveContract(currentContract)
5✔
2624
                                if err != nil {
5✔
2625
                                        log.Errorf("unable to resolve contract: %v",
×
2626
                                                err)
×
2627
                                }
×
2628

2629
                                // Now that the contract has been resolved,
2630
                                // well signal to the main goroutine.
2631
                                select {
5✔
2632
                                case c.resolutionSignal <- struct{}{}:
4✔
2633
                                case <-c.quit:
1✔
2634
                                        return
1✔
2635
                                }
2636
                        }
2637

2638
                }
2639
        }
2640
}
2641

2642
// signalUpdateMsg is a struct that carries fresh signals to the
2643
// ChannelArbitrator. We need to receive a message like this each time the
2644
// channel becomes active, as it's internal state may change.
2645
type signalUpdateMsg struct {
2646
        // newSignals is the set of new active signals to be sent to the
2647
        // arbitrator.
2648
        newSignals *ContractSignals
2649

2650
        // doneChan is a channel that will be closed on the arbitrator has
2651
        // attached the new signals.
2652
        doneChan chan struct{}
2653
}
2654

2655
// UpdateContractSignals updates the set of signals the ChannelArbitrator needs
2656
// to receive from a channel in real-time in order to keep in sync with the
2657
// latest state of the contract.
2658
func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) {
11✔
2659
        done := make(chan struct{})
11✔
2660

11✔
2661
        select {
11✔
2662
        case c.signalUpdates <- &signalUpdateMsg{
2663
                newSignals: newSignals,
2664
                doneChan:   done,
2665
        }:
11✔
2666
        case <-c.quit:
×
2667
        }
2668

2669
        select {
11✔
2670
        case <-done:
11✔
2671
        case <-c.quit:
×
2672
        }
2673
}
2674

2675
// notifyContractUpdate updates the ChannelArbitrator's unmerged mappings such
2676
// that it can later be merged with activeHTLCs when calling
2677
// checkLocalChainActions or sweepAnchors. These are the only two places that
2678
// activeHTLCs is used.
2679
func (c *ChannelArbitrator) notifyContractUpdate(upd *ContractUpdate) {
12✔
2680
        c.unmergedMtx.Lock()
12✔
2681
        defer c.unmergedMtx.Unlock()
12✔
2682

12✔
2683
        // Update the mapping.
12✔
2684
        c.unmergedSet[upd.HtlcKey] = newHtlcSet(upd.Htlcs)
12✔
2685

12✔
2686
        log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
12✔
2687
                c.cfg.ChanPoint, lnutils.SpewLogClosure(upd))
12✔
2688
}
12✔
2689

2690
// updateActiveHTLCs merges the unmerged set of HTLCs from the link with
2691
// activeHTLCs.
2692
func (c *ChannelArbitrator) updateActiveHTLCs() {
74✔
2693
        c.unmergedMtx.RLock()
74✔
2694
        defer c.unmergedMtx.RUnlock()
74✔
2695

74✔
2696
        // Update the mapping.
74✔
2697
        c.activeHTLCs[LocalHtlcSet] = c.unmergedSet[LocalHtlcSet]
74✔
2698
        c.activeHTLCs[RemoteHtlcSet] = c.unmergedSet[RemoteHtlcSet]
74✔
2699

74✔
2700
        // If the pending set exists, update that as well.
74✔
2701
        if _, ok := c.unmergedSet[RemotePendingHtlcSet]; ok {
83✔
2702
                pendingSet := c.unmergedSet[RemotePendingHtlcSet]
9✔
2703
                c.activeHTLCs[RemotePendingHtlcSet] = pendingSet
9✔
2704
        }
9✔
2705
}
2706

2707
// channelAttendant is the primary goroutine that acts at the judicial
2708
// arbitrator between our channel state, the remote channel peer, and the
2709
// blockchain (Our judge). This goroutine will ensure that we faithfully execute
2710
// all clauses of our contract in the case that we need to go on-chain for a
2711
// dispute. Currently, two such conditions warrant our intervention: when an
2712
// outgoing HTLC is about to timeout, and when we know the pre-image for an
2713
// incoming HTLC, but it hasn't yet been settled off-chain. In these cases,
2714
// we'll: broadcast our commitment, cancel/settle any HTLC's backwards after
2715
// sufficient confirmation, and finally send our set of outputs to the UTXO
2716
// Nursery for incubation, and ultimate sweeping.
2717
//
2718
// NOTE: This MUST be run as a goroutine.
2719
func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
47✔
2720

47✔
2721
        // TODO(roasbeef): tell top chain arb we're done
47✔
2722
        defer func() {
91✔
2723
                c.wg.Done()
44✔
2724
        }()
44✔
2725

2726
        for {
152✔
2727
                select {
105✔
2728

2729
                // A new block has arrived, we'll examine all the active HTLC's
2730
                // to see if any of them have expired, and also update our
2731
                // track of the best current height.
2732
                case blockHeight, ok := <-c.blocks:
21✔
2733
                        if !ok {
27✔
2734
                                return
6✔
2735
                        }
6✔
2736
                        bestHeight = blockHeight
15✔
2737

15✔
2738
                        // If we're not in the default state, then we can
15✔
2739
                        // ignore this signal as we're waiting for contract
15✔
2740
                        // resolution.
15✔
2741
                        if c.state != StateDefault {
24✔
2742
                                continue
9✔
2743
                        }
2744

2745
                        // Now that a new block has arrived, we'll attempt to
2746
                        // advance our state forward.
2747
                        nextState, _, err := c.advanceState(
6✔
2748
                                uint32(bestHeight), chainTrigger, nil,
6✔
2749
                        )
6✔
2750
                        if err != nil {
6✔
2751
                                log.Errorf("Unable to advance state: %v", err)
×
2752
                        }
×
2753

2754
                        // If as a result of this trigger, the contract is
2755
                        // fully resolved, then well exit.
2756
                        if nextState == StateFullyResolved {
6✔
2757
                                return
×
2758
                        }
×
2759

2760
                // A new signal update was just sent. This indicates that the
2761
                // channel under watch is now live, and may modify its internal
2762
                // state, so we'll get the most up to date signals to we can
2763
                // properly do our job.
2764
                case signalUpdate := <-c.signalUpdates:
11✔
2765
                        log.Tracef("ChannelArbitrator(%v): got new signal "+
11✔
2766
                                "update!", c.cfg.ChanPoint)
11✔
2767

11✔
2768
                        // We'll update the ShortChannelID.
11✔
2769
                        c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
11✔
2770

11✔
2771
                        // Now that the signal has been updated, we'll now
11✔
2772
                        // close the done channel to signal to the caller we've
11✔
2773
                        // registered the new ShortChannelID.
11✔
2774
                        close(signalUpdate.doneChan)
11✔
2775

2776
                // We've cooperatively closed the channel, so we're no longer
2777
                // needed. We'll mark the channel as resolved and exit.
2778
                case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
2✔
2779
                        log.Infof("ChannelArbitrator(%v) marking channel "+
2✔
2780
                                "cooperatively closed", c.cfg.ChanPoint)
2✔
2781

2✔
2782
                        err := c.cfg.MarkChannelClosed(
2✔
2783
                                closeInfo.ChannelCloseSummary,
2✔
2784
                                channeldb.ChanStatusCoopBroadcasted,
2✔
2785
                        )
2✔
2786
                        if err != nil {
2✔
2787
                                log.Errorf("Unable to mark channel closed: "+
×
2788
                                        "%v", err)
×
2789
                                return
×
2790
                        }
×
2791

2792
                        // We'll now advance our state machine until it reaches
2793
                        // a terminal state, and the channel is marked resolved.
2794
                        _, _, err = c.advanceState(
2✔
2795
                                closeInfo.CloseHeight, coopCloseTrigger, nil,
2✔
2796
                        )
2✔
2797
                        if err != nil {
3✔
2798
                                log.Errorf("Unable to advance state: %v", err)
1✔
2799
                                return
1✔
2800
                        }
1✔
2801

2802
                // We have broadcasted our commitment, and it is now confirmed
2803
                // on-chain.
2804
                case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
12✔
2805
                        log.Infof("ChannelArbitrator(%v): local on-chain "+
12✔
2806
                                "channel close", c.cfg.ChanPoint)
12✔
2807

12✔
2808
                        if c.state != StateCommitmentBroadcasted {
13✔
2809
                                log.Errorf("ChannelArbitrator(%v): unexpected "+
1✔
2810
                                        "local on-chain channel close",
1✔
2811
                                        c.cfg.ChanPoint)
1✔
2812
                        }
1✔
2813
                        closeTx := closeInfo.CloseTx
12✔
2814

12✔
2815
                        resolutions, err := closeInfo.ContractResolutions.
12✔
2816
                                UnwrapOrErr(
12✔
2817
                                        fmt.Errorf("resolutions not found"),
12✔
2818
                                )
12✔
2819
                        if err != nil {
12✔
2820
                                log.Errorf("ChannelArbitrator(%v): unable to "+
×
2821
                                        "get resolutions: %v", c.cfg.ChanPoint,
×
2822
                                        err)
×
2823

×
2824
                                return
×
2825
                        }
×
2826

2827
                        // We make sure that the htlc resolutions are present
2828
                        // otherwise we would panic dereferencing the pointer.
2829
                        //
2830
                        // TODO(ziggie): Refactor ContractResolutions to use
2831
                        // options.
2832
                        if resolutions.HtlcResolutions == nil {
12✔
2833
                                log.Errorf("ChannelArbitrator(%v): htlc "+
×
2834
                                        "resolutions not found",
×
2835
                                        c.cfg.ChanPoint)
×
2836

×
2837
                                return
×
2838
                        }
×
2839

2840
                        contractRes := &ContractResolutions{
12✔
2841
                                CommitHash:       closeTx.TxHash(),
12✔
2842
                                CommitResolution: resolutions.CommitResolution,
12✔
2843
                                HtlcResolutions:  *resolutions.HtlcResolutions,
12✔
2844
                                AnchorResolution: resolutions.AnchorResolution,
12✔
2845
                        }
12✔
2846

12✔
2847
                        // When processing a unilateral close event, we'll
12✔
2848
                        // transition to the ContractClosed state. We'll log
12✔
2849
                        // out the set of resolutions such that they are
12✔
2850
                        // available to fetch in that state, we'll also write
12✔
2851
                        // the commit set so we can reconstruct our chain
12✔
2852
                        // actions on restart.
12✔
2853
                        err = c.log.LogContractResolutions(contractRes)
12✔
2854
                        if err != nil {
12✔
2855
                                log.Errorf("Unable to write resolutions: %v",
×
2856
                                        err)
×
2857
                                return
×
2858
                        }
×
2859
                        err = c.log.InsertConfirmedCommitSet(
12✔
2860
                                &closeInfo.CommitSet,
12✔
2861
                        )
12✔
2862
                        if err != nil {
12✔
2863
                                log.Errorf("Unable to write commit set: %v",
×
2864
                                        err)
×
2865
                                return
×
2866
                        }
×
2867

2868
                        // After the set of resolutions are successfully
2869
                        // logged, we can safely close the channel. After this
2870
                        // succeeds we won't be getting chain events anymore,
2871
                        // so we must make sure we can recover on restart after
2872
                        // it is marked closed. If the next state transition
2873
                        // fails, we'll start up in the prior state again, and
2874
                        // we won't be longer getting chain events. In this
2875
                        // case we must manually re-trigger the state
2876
                        // transition into StateContractClosed based on the
2877
                        // close status of the channel.
2878
                        err = c.cfg.MarkChannelClosed(
12✔
2879
                                closeInfo.ChannelCloseSummary,
12✔
2880
                                channeldb.ChanStatusLocalCloseInitiator,
12✔
2881
                        )
12✔
2882
                        if err != nil {
12✔
2883
                                log.Errorf("Unable to mark "+
×
2884
                                        "channel closed: %v", err)
×
2885
                                return
×
2886
                        }
×
2887

2888
                        // We'll now advance our state machine until it reaches
2889
                        // a terminal state.
2890
                        _, _, err = c.advanceState(
12✔
2891
                                uint32(closeInfo.SpendingHeight),
12✔
2892
                                localCloseTrigger, &closeInfo.CommitSet,
12✔
2893
                        )
12✔
2894
                        if err != nil {
13✔
2895
                                log.Errorf("Unable to advance state: %v", err)
1✔
2896
                        }
1✔
2897

2898
                // The remote party has broadcast the commitment on-chain.
2899
                // We'll examine our state to determine if we need to act at
2900
                // all.
2901
                case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
8✔
2902
                        log.Infof("ChannelArbitrator(%v): remote party has "+
8✔
2903
                                "closed channel out on-chain", c.cfg.ChanPoint)
8✔
2904

8✔
2905
                        // If we don't have a self output, and there are no
8✔
2906
                        // active HTLC's, then we can immediately mark the
8✔
2907
                        // contract as fully resolved and exit.
8✔
2908
                        contractRes := &ContractResolutions{
8✔
2909
                                CommitHash:       *uniClosure.SpenderTxHash,
8✔
2910
                                CommitResolution: uniClosure.CommitResolution,
8✔
2911
                                HtlcResolutions:  *uniClosure.HtlcResolutions,
8✔
2912
                                AnchorResolution: uniClosure.AnchorResolution,
8✔
2913
                        }
8✔
2914

8✔
2915
                        // When processing a unilateral close event, we'll
8✔
2916
                        // transition to the ContractClosed state. We'll log
8✔
2917
                        // out the set of resolutions such that they are
8✔
2918
                        // available to fetch in that state, we'll also write
8✔
2919
                        // the commit set so we can reconstruct our chain
8✔
2920
                        // actions on restart.
8✔
2921
                        err := c.log.LogContractResolutions(contractRes)
8✔
2922
                        if err != nil {
9✔
2923
                                log.Errorf("Unable to write resolutions: %v",
1✔
2924
                                        err)
1✔
2925
                                return
1✔
2926
                        }
1✔
2927
                        err = c.log.InsertConfirmedCommitSet(
7✔
2928
                                &uniClosure.CommitSet,
7✔
2929
                        )
7✔
2930
                        if err != nil {
7✔
2931
                                log.Errorf("Unable to write commit set: %v",
×
2932
                                        err)
×
2933
                                return
×
2934
                        }
×
2935

2936
                        // After the set of resolutions are successfully
2937
                        // logged, we can safely close the channel. After this
2938
                        // succeeds we won't be getting chain events anymore,
2939
                        // so we must make sure we can recover on restart after
2940
                        // it is marked closed. If the next state transition
2941
                        // fails, we'll start up in the prior state again, and
2942
                        // we won't be longer getting chain events. In this
2943
                        // case we must manually re-trigger the state
2944
                        // transition into StateContractClosed based on the
2945
                        // close status of the channel.
2946
                        closeSummary := &uniClosure.ChannelCloseSummary
7✔
2947
                        err = c.cfg.MarkChannelClosed(
7✔
2948
                                closeSummary,
7✔
2949
                                channeldb.ChanStatusRemoteCloseInitiator,
7✔
2950
                        )
7✔
2951
                        if err != nil {
8✔
2952
                                log.Errorf("Unable to mark channel closed: %v",
1✔
2953
                                        err)
1✔
2954
                                return
1✔
2955
                        }
1✔
2956

2957
                        // We'll now advance our state machine until it reaches
2958
                        // a terminal state.
2959
                        _, _, err = c.advanceState(
6✔
2960
                                uint32(uniClosure.SpendingHeight),
6✔
2961
                                remoteCloseTrigger, &uniClosure.CommitSet,
6✔
2962
                        )
6✔
2963
                        if err != nil {
8✔
2964
                                log.Errorf("Unable to advance state: %v", err)
2✔
2965
                        }
2✔
2966

2967
                // The remote has breached the channel. As this is handled by
2968
                // the ChainWatcher and BreachArbitrator, we don't have to do
2969
                // anything in particular, so just advance our state and
2970
                // gracefully exit.
2971
                case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
1✔
2972
                        log.Infof("ChannelArbitrator(%v): remote party has "+
1✔
2973
                                "breached channel!", c.cfg.ChanPoint)
1✔
2974

1✔
2975
                        // In the breach case, we'll only have anchor and
1✔
2976
                        // breach resolutions.
1✔
2977
                        contractRes := &ContractResolutions{
1✔
2978
                                CommitHash:       breachInfo.CommitHash,
1✔
2979
                                BreachResolution: breachInfo.BreachResolution,
1✔
2980
                                AnchorResolution: breachInfo.AnchorResolution,
1✔
2981
                        }
1✔
2982

1✔
2983
                        // We'll transition to the ContractClosed state and log
1✔
2984
                        // the set of resolutions such that they can be turned
1✔
2985
                        // into resolvers later on. We'll also insert the
1✔
2986
                        // CommitSet of the latest set of commitments.
1✔
2987
                        err := c.log.LogContractResolutions(contractRes)
1✔
2988
                        if err != nil {
1✔
2989
                                log.Errorf("Unable to write resolutions: %v",
×
2990
                                        err)
×
2991
                                return
×
2992
                        }
×
2993
                        err = c.log.InsertConfirmedCommitSet(
1✔
2994
                                &breachInfo.CommitSet,
1✔
2995
                        )
1✔
2996
                        if err != nil {
1✔
2997
                                log.Errorf("Unable to write commit set: %v",
×
2998
                                        err)
×
2999
                                return
×
3000
                        }
×
3001

3002
                        // The channel is finally marked pending closed here as
3003
                        // the BreachArbitrator and channel arbitrator have
3004
                        // persisted the relevant states.
3005
                        closeSummary := &breachInfo.CloseSummary
1✔
3006
                        err = c.cfg.MarkChannelClosed(
1✔
3007
                                closeSummary,
1✔
3008
                                channeldb.ChanStatusRemoteCloseInitiator,
1✔
3009
                        )
1✔
3010
                        if err != nil {
1✔
3011
                                log.Errorf("Unable to mark channel closed: %v",
×
3012
                                        err)
×
3013
                                return
×
3014
                        }
×
3015

3016
                        log.Infof("Breached channel=%v marked pending-closed",
1✔
3017
                                breachInfo.BreachResolution.FundingOutPoint)
1✔
3018

1✔
3019
                        // We'll advance our state machine until it reaches a
1✔
3020
                        // terminal state.
1✔
3021
                        _, _, err = c.advanceState(
1✔
3022
                                uint32(bestHeight), breachCloseTrigger,
1✔
3023
                                &breachInfo.CommitSet,
1✔
3024
                        )
1✔
3025
                        if err != nil {
1✔
3026
                                log.Errorf("Unable to advance state: %v", err)
×
3027
                        }
×
3028

3029
                // A new contract has just been resolved, we'll now check our
3030
                // log to see if all contracts have been resolved. If so, then
3031
                // we can exit as the contract is fully resolved.
3032
                case <-c.resolutionSignal:
4✔
3033
                        log.Infof("ChannelArbitrator(%v): a contract has been "+
4✔
3034
                                "fully resolved!", c.cfg.ChanPoint)
4✔
3035

4✔
3036
                        nextState, _, err := c.advanceState(
4✔
3037
                                uint32(bestHeight), chainTrigger, nil,
4✔
3038
                        )
4✔
3039
                        if err != nil {
4✔
3040
                                log.Errorf("Unable to advance state: %v", err)
×
3041
                        }
×
3042

3043
                        // If we don't have anything further to do after
3044
                        // advancing our state, then we'll exit.
3045
                        if nextState == StateFullyResolved {
7✔
3046
                                log.Infof("ChannelArbitrator(%v): all "+
3✔
3047
                                        "contracts fully resolved, exiting",
3✔
3048
                                        c.cfg.ChanPoint)
3✔
3049

3✔
3050
                                return
3✔
3051
                        }
3✔
3052

3053
                // We've just received a request to forcibly close out the
3054
                // channel. We'll
3055
                case closeReq := <-c.forceCloseReqs:
11✔
3056
                        log.Infof("ChannelArbitrator(%v): received force "+
11✔
3057
                                "close request", c.cfg.ChanPoint)
11✔
3058

11✔
3059
                        if c.state != StateDefault {
12✔
3060
                                select {
1✔
3061
                                case closeReq.closeTx <- nil:
1✔
3062
                                case <-c.quit:
×
3063
                                }
3064

3065
                                select {
1✔
3066
                                case closeReq.errResp <- errAlreadyForceClosed:
1✔
3067
                                case <-c.quit:
×
3068
                                }
3069

3070
                                continue
1✔
3071
                        }
3072

3073
                        nextState, closeTx, err := c.advanceState(
10✔
3074
                                uint32(bestHeight), userTrigger, nil,
10✔
3075
                        )
10✔
3076
                        if err != nil {
11✔
3077
                                log.Errorf("Unable to advance state: %v", err)
1✔
3078
                        }
1✔
3079

3080
                        select {
10✔
3081
                        case closeReq.closeTx <- closeTx:
10✔
3082
                        case <-c.quit:
×
3083
                                return
×
3084
                        }
3085

3086
                        select {
10✔
3087
                        case closeReq.errResp <- err:
10✔
3088
                        case <-c.quit:
×
3089
                                return
×
3090
                        }
3091

3092
                        // If we don't have anything further to do after
3093
                        // advancing our state, then we'll exit.
3094
                        if nextState == StateFullyResolved {
10✔
3095
                                log.Infof("ChannelArbitrator(%v): all "+
×
3096
                                        "contracts resolved, exiting",
×
3097
                                        c.cfg.ChanPoint)
×
3098
                                return
×
3099
                        }
×
3100

3101
                case <-c.quit:
32✔
3102
                        return
32✔
3103
                }
3104
        }
3105
}
3106

3107
// checkLegacyBreach returns StateFullyResolved if the channel was closed with
3108
// a breach transaction before the channel arbitrator launched its own breach
3109
// resolver. StateContractClosed is returned if this is a modern breach close
3110
// with a breach resolver. StateError is returned if the log lookup failed.
3111
func (c *ChannelArbitrator) checkLegacyBreach() (ArbitratorState, error) {
2✔
3112
        // A previous version of the channel arbitrator would make the breach
2✔
3113
        // close skip to StateFullyResolved. If there are no contract
2✔
3114
        // resolutions in the bolt arbitrator log, then this is an older breach
2✔
3115
        // close. Otherwise, if there are resolutions, the state should advance
2✔
3116
        // to StateContractClosed.
2✔
3117
        _, err := c.log.FetchContractResolutions()
2✔
3118
        if err == errNoResolutions {
2✔
3119
                // This is an older breach close still in the database.
×
3120
                return StateFullyResolved, nil
×
3121
        } else if err != nil {
2✔
3122
                return StateError, err
×
3123
        }
×
3124

3125
        // This is a modern breach close with resolvers.
3126
        return StateContractClosed, nil
2✔
3127
}
3128

3129
// sweepRequest wraps the arguments used when calling `SweepInput`.
3130
type sweepRequest struct {
3131
        // input is the input to be swept.
3132
        input input.Input
3133

3134
        // params holds the sweeping parameters.
3135
        params sweep.Params
3136
}
3137

3138
// createSweepRequest creates an anchor sweeping request for a particular
3139
// version (local/remote/remote pending) of the commitment.
3140
func (c *ChannelArbitrator) createSweepRequest(
3141
        anchor *lnwallet.AnchorResolution, htlcs htlcSet, anchorPath string,
3142
        heightHint uint32) (sweepRequest, error) {
8✔
3143

8✔
3144
        // Use the chan id as the exclusive group. This prevents any of the
8✔
3145
        // anchors from being batched together.
8✔
3146
        exclusiveGroup := c.cfg.ShortChanID.ToUint64()
8✔
3147

8✔
3148
        // Find the deadline for this specific anchor.
8✔
3149
        deadline, value, err := c.findCommitmentDeadlineAndValue(
8✔
3150
                heightHint, htlcs,
8✔
3151
        )
8✔
3152
        if err != nil {
8✔
3153
                return sweepRequest{}, err
×
3154
        }
×
3155

3156
        // If we cannot find a deadline, it means there's no HTLCs at stake,
3157
        // which means we can relax our anchor sweeping conditions as we don't
3158
        // have any time sensitive outputs to sweep. However we need to
3159
        // register the anchor output with the sweeper so we are later able to
3160
        // bump the close fee.
3161
        if deadline.IsNone() {
11✔
3162
                log.Infof("ChannelArbitrator(%v): no HTLCs at stake, "+
3✔
3163
                        "sweeping anchor with default deadline",
3✔
3164
                        c.cfg.ChanPoint)
3✔
3165
        }
3✔
3166

3167
        witnessType := input.CommitmentAnchor
8✔
3168

8✔
3169
        // For taproot channels, we need to use the proper witness type.
8✔
3170
        if txscript.IsPayToTaproot(
8✔
3171
                anchor.AnchorSignDescriptor.Output.PkScript,
8✔
3172
        ) {
8✔
UNCOV
3173

×
UNCOV
3174
                witnessType = input.TaprootAnchorSweepSpend
×
UNCOV
3175
        }
×
3176

3177
        // Prepare anchor output for sweeping.
3178
        anchorInput := input.MakeBaseInput(
8✔
3179
                &anchor.CommitAnchor,
8✔
3180
                witnessType,
8✔
3181
                &anchor.AnchorSignDescriptor,
8✔
3182
                heightHint,
8✔
3183
                &input.TxInfo{
8✔
3184
                        Fee:    anchor.CommitFee,
8✔
3185
                        Weight: anchor.CommitWeight,
8✔
3186
                },
8✔
3187
        )
8✔
3188

8✔
3189
        // If we have a deadline, we'll use it to calculate the deadline
8✔
3190
        // height, otherwise default to none.
8✔
3191
        deadlineDesc := "None"
8✔
3192
        deadlineHeight := fn.MapOption(func(d int32) int32 {
13✔
3193
                deadlineDesc = fmt.Sprintf("%d", d)
5✔
3194

5✔
3195
                return d + int32(heightHint)
5✔
3196
        })(deadline)
5✔
3197

3198
        // Calculate the budget based on the value under protection, which is
3199
        // the sum of all HTLCs on this commitment subtracted by their budgets.
3200
        // The anchor output in itself has a small output value of 330 sats so
3201
        // we also include it in the budget to pay for the cpfp transaction.
3202
        budget := calculateBudget(
8✔
3203
                value, c.cfg.Budget.AnchorCPFPRatio, c.cfg.Budget.AnchorCPFP,
8✔
3204
        ) + AnchorOutputValue
8✔
3205

8✔
3206
        log.Infof("ChannelArbitrator(%v): offering anchor from %s commitment "+
8✔
3207
                "%v to sweeper with deadline=%v, budget=%v", c.cfg.ChanPoint,
8✔
3208
                anchorPath, anchor.CommitAnchor, deadlineDesc, budget)
8✔
3209

8✔
3210
        // Sweep anchor output with a confirmation target fee preference.
8✔
3211
        // Because this is a cpfp-operation, the anchor will only be attempted
8✔
3212
        // to sweep when the current fee estimate for the confirmation target
8✔
3213
        // exceeds the commit fee rate.
8✔
3214
        return sweepRequest{
8✔
3215
                input: &anchorInput,
8✔
3216
                params: sweep.Params{
8✔
3217
                        ExclusiveGroup: &exclusiveGroup,
8✔
3218
                        Budget:         budget,
8✔
3219
                        DeadlineHeight: deadlineHeight,
8✔
3220
                },
8✔
3221
        }, nil
8✔
3222
}
3223

3224
// prepareAnchorSweeps creates a list of requests to be used by the sweeper for
3225
// all possible commitment versions.
3226
func (c *ChannelArbitrator) prepareAnchorSweeps(heightHint uint32,
3227
        anchors *lnwallet.AnchorResolutions) ([]sweepRequest, error) {
19✔
3228

19✔
3229
        // requests holds all the possible anchor sweep requests. We can have
19✔
3230
        // up to 3 different versions of commitments (local/remote/remote
19✔
3231
        // dangling) to be CPFPed by the anchors.
19✔
3232
        requests := make([]sweepRequest, 0, 3)
19✔
3233

19✔
3234
        // remotePendingReq holds the request for sweeping the anchor output on
19✔
3235
        // the remote pending commitment. It's only set when there's an actual
19✔
3236
        // pending remote commitment and it's used to decide whether we need to
19✔
3237
        // update the fee budget when sweeping the anchor output on the local
19✔
3238
        // commitment.
19✔
3239
        remotePendingReq := fn.None[sweepRequest]()
19✔
3240

19✔
3241
        // First we check on the remote pending commitment and optionally
19✔
3242
        // create an anchor sweeping request.
19✔
3243
        htlcs, ok := c.activeHTLCs[RemotePendingHtlcSet]
19✔
3244
        if ok && anchors.RemotePending != nil {
21✔
3245
                req, err := c.createSweepRequest(
2✔
3246
                        anchors.RemotePending, htlcs, "remote pending",
2✔
3247
                        heightHint,
2✔
3248
                )
2✔
3249
                if err != nil {
2✔
3250
                        return nil, err
×
3251
                }
×
3252

3253
                // Save the request.
3254
                requests = append(requests, req)
2✔
3255

2✔
3256
                // Set the optional variable.
2✔
3257
                remotePendingReq = fn.Some(req)
2✔
3258
        }
3259

3260
        // Check the local commitment and optionally create an anchor sweeping
3261
        // request. The params used in this request will be influenced by the
3262
        // anchor sweeping request made from the pending remote commitment.
3263
        htlcs, ok = c.activeHTLCs[LocalHtlcSet]
19✔
3264
        if ok && anchors.Local != nil {
22✔
3265
                req, err := c.createSweepRequest(
3✔
3266
                        anchors.Local, htlcs, "local", heightHint,
3✔
3267
                )
3✔
3268
                if err != nil {
3✔
3269
                        return nil, err
×
3270
                }
×
3271

3272
                // If there's an anchor sweeping request from the pending
3273
                // remote commitment, we will compare its budget against the
3274
                // budget used here and choose the params that has a larger
3275
                // budget. The deadline when choosing the remote pending budget
3276
                // instead of the local one will always be earlier or equal to
3277
                // the local deadline because outgoing HTLCs are resolved on
3278
                // the local commitment first before they are removed from the
3279
                // remote one.
3280
                remotePendingReq.WhenSome(func(s sweepRequest) {
5✔
3281
                        if s.params.Budget <= req.params.Budget {
3✔
3282
                                return
1✔
3283
                        }
1✔
3284

3285
                        log.Infof("ChannelArbitrator(%v): replaced local "+
1✔
3286
                                "anchor(%v) sweep params with pending remote "+
1✔
3287
                                "anchor sweep params, \nold:[%v], \nnew:[%v]",
1✔
3288
                                c.cfg.ChanPoint, anchors.Local.CommitAnchor,
1✔
3289
                                req.params, s.params)
1✔
3290

1✔
3291
                        req.params = s.params
1✔
3292
                })
3293

3294
                // Save the request.
3295
                requests = append(requests, req)
3✔
3296
        }
3297

3298
        // Check the remote commitment and create an anchor sweeping request if
3299
        // needed.
3300
        htlcs, ok = c.activeHTLCs[RemoteHtlcSet]
19✔
3301
        if ok && anchors.Remote != nil {
22✔
3302
                req, err := c.createSweepRequest(
3✔
3303
                        anchors.Remote, htlcs, "remote", heightHint,
3✔
3304
                )
3✔
3305
                if err != nil {
3✔
3306
                        return nil, err
×
3307
                }
×
3308

3309
                requests = append(requests, req)
3✔
3310
        }
3311

3312
        return requests, nil
19✔
3313
}
3314

3315
// failIncomingDust resolves the incoming dust HTLCs because they do not have
3316
// an output on the commitment transaction and cannot be resolved onchain. We
3317
// mark them as failed here.
3318
func (c *ChannelArbitrator) failIncomingDust(
3319
        incomingDustHTLCs []channeldb.HTLC) error {
10✔
3320

10✔
3321
        for _, htlc := range incomingDustHTLCs {
11✔
3322
                if !htlc.Incoming || htlc.OutputIndex >= 0 {
1✔
3323
                        return fmt.Errorf("htlc with index %v is not incoming "+
×
3324
                                "dust", htlc.OutputIndex)
×
3325
                }
×
3326

3327
                key := models.CircuitKey{
1✔
3328
                        ChanID: c.cfg.ShortChanID,
1✔
3329
                        HtlcID: htlc.HtlcIndex,
1✔
3330
                }
1✔
3331

1✔
3332
                // Mark this dust htlc as final failed.
1✔
3333
                chainArbCfg := c.cfg.ChainArbitratorConfig
1✔
3334
                err := chainArbCfg.PutFinalHtlcOutcome(
1✔
3335
                        key.ChanID, key.HtlcID, false,
1✔
3336
                )
1✔
3337
                if err != nil {
1✔
3338
                        return err
×
3339
                }
×
3340

3341
                // Send notification.
3342
                chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
1✔
3343
                        key,
1✔
3344
                        channeldb.FinalHtlcInfo{
1✔
3345
                                Settled:  false,
1✔
3346
                                Offchain: false,
1✔
3347
                        },
1✔
3348
                )
1✔
3349
        }
3350

3351
        return nil
10✔
3352
}
3353

3354
// abandonForwards cancels back the incoming HTLCs for their corresponding
3355
// outgoing HTLCs. We use a set here to avoid sending duplicate failure messages
3356
// for the same HTLC. This also needs to be done for locally initiated outgoing
3357
// HTLCs they are special cased in the switch.
3358
func (c *ChannelArbitrator) abandonForwards(htlcs fn.Set[uint64]) error {
13✔
3359
        log.Debugf("ChannelArbitrator(%v): cancelling back %v incoming "+
13✔
3360
                "HTLC(s)", c.cfg.ChanPoint,
13✔
3361
                len(htlcs))
13✔
3362

13✔
3363
        msgsToSend := make([]ResolutionMsg, 0, len(htlcs))
13✔
3364
        failureMsg := &lnwire.FailPermanentChannelFailure{}
13✔
3365

13✔
3366
        for idx := range htlcs {
23✔
3367
                failMsg := ResolutionMsg{
10✔
3368
                        SourceChan: c.cfg.ShortChanID,
10✔
3369
                        HtlcIndex:  idx,
10✔
3370
                        Failure:    failureMsg,
10✔
3371
                }
10✔
3372

10✔
3373
                msgsToSend = append(msgsToSend, failMsg)
10✔
3374
        }
10✔
3375

3376
        // Send the msges to the switch, if there are any.
3377
        if len(msgsToSend) == 0 {
16✔
3378
                return nil
3✔
3379
        }
3✔
3380

3381
        log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
10✔
3382
                c.cfg.ChanPoint, lnutils.SpewLogClosure(msgsToSend))
10✔
3383

10✔
3384
        err := c.cfg.DeliverResolutionMsg(msgsToSend...)
10✔
3385
        if err != nil {
10✔
3386
                log.Errorf("Unable to send resolution msges to switch: %v", err)
×
3387
                return err
×
3388
        }
×
3389

3390
        return nil
10✔
3391
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc