• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12199391122

06 Dec 2024 01:10PM UTC coverage: 49.807% (-9.1%) from 58.933%
12199391122

push

github

web-flow
Merge pull request #9337 from Guayaba221/patch-1

chore: fix typo in ruby.md

100137 of 201051 relevant lines covered (49.81%)

2.07 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.52
/contractcourt/channel_arbitrator.go
1
package contractcourt
2

3
import (
4
        "bytes"
5
        "context"
6
        "errors"
7
        "fmt"
8
        "math"
9
        "sync"
10
        "sync/atomic"
11
        "time"
12

13
        "github.com/btcsuite/btcd/btcutil"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/txscript"
16
        "github.com/btcsuite/btcd/wire"
17
        "github.com/lightningnetwork/lnd/channeldb"
18
        "github.com/lightningnetwork/lnd/fn"
19
        "github.com/lightningnetwork/lnd/graph/db/models"
20
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
21
        "github.com/lightningnetwork/lnd/input"
22
        "github.com/lightningnetwork/lnd/invoices"
23
        "github.com/lightningnetwork/lnd/kvdb"
24
        "github.com/lightningnetwork/lnd/labels"
25
        "github.com/lightningnetwork/lnd/lntypes"
26
        "github.com/lightningnetwork/lnd/lnutils"
27
        "github.com/lightningnetwork/lnd/lnwallet"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/sweep"
30
)
31

32
var (
33
        // errAlreadyForceClosed is an error returned when we attempt to force
34
        // close a channel that's already in the process of doing so.
35
        errAlreadyForceClosed = errors.New("channel is already in the " +
36
                "process of being force closed")
37
)
38

39
const (
40
        // arbitratorBlockBufferSize is the size of the buffer we give to each
41
        // channel arbitrator.
42
        arbitratorBlockBufferSize = 20
43

44
        // AnchorOutputValue is the output value for the anchor output of an
45
        // anchor channel.
46
        // See BOLT 03 for more details:
47
        // https://github.com/lightning/bolts/blob/master/03-transactions.md
48
        AnchorOutputValue = btcutil.Amount(330)
49
)
50

51
// WitnessSubscription represents an intent to be notified once new witnesses
52
// are discovered by various active contract resolvers. A contract resolver may
53
// use this to be notified of when it can satisfy an incoming contract after we
54
// discover the witness for an outgoing contract.
55
type WitnessSubscription struct {
56
        // WitnessUpdates is a channel that newly discovered witnesses will be
57
        // sent over.
58
        //
59
        // TODO(roasbeef): couple with WitnessType?
60
        WitnessUpdates <-chan lntypes.Preimage
61

62
        // CancelSubscription is a function closure that should be used by a
63
        // client to cancel the subscription once they are no longer interested
64
        // in receiving new updates.
65
        CancelSubscription func()
66
}
67

68
// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
69
// this interface to lookup witnesses (preimages typically) of contracts
70
// they're trying to resolve, add new preimages they resolve, and finally
71
// receive new updates each new time a preimage is discovered.
72
//
73
// TODO(roasbeef): need to delete the pre-images once we've used them
74
// and have been sufficiently confirmed?
75
type WitnessBeacon interface {
76
        // SubscribeUpdates returns a channel that will be sent upon *each* time
77
        // a new preimage is discovered.
78
        SubscribeUpdates(chanID lnwire.ShortChannelID, htlc *channeldb.HTLC,
79
                payload *hop.Payload,
80
                nextHopOnionBlob []byte) (*WitnessSubscription, error)
81

82
        // LookupPreImage attempts to lookup a preimage in the global cache.
83
        // True is returned for the second argument if the preimage is found.
84
        LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool)
85

86
        // AddPreimages adds a batch of newly discovered preimages to the global
87
        // cache, and also signals any subscribers of the newly discovered
88
        // witness.
89
        AddPreimages(preimages ...lntypes.Preimage) error
90
}
91

92
// ArbChannel is an abstraction that allows the channel arbitrator to interact
93
// with an open channel.
94
type ArbChannel interface {
95
        // ForceCloseChan should force close the contract that this attendant
96
        // is watching over. We'll use this when we decide that we need to go
97
        // to chain. It should in addition tell the switch to remove the
98
        // corresponding link, such that we won't accept any new updates. The
99
        // returned summary contains all items needed to eventually resolve all
100
        // outputs on chain.
101
        ForceCloseChan() (*wire.MsgTx, error)
102

103
        // NewAnchorResolutions returns the anchor resolutions for currently
104
        // valid commitment transactions.
105
        NewAnchorResolutions() (*lnwallet.AnchorResolutions, error)
106
}
107

108
// ChannelArbitratorConfig contains all the functionality that the
109
// ChannelArbitrator needs in order to properly arbitrate any contract dispute
110
// on chain.
111
type ChannelArbitratorConfig struct {
112
        // ChanPoint is the channel point that uniquely identifies this
113
        // channel.
114
        ChanPoint wire.OutPoint
115

116
        // Channel is the full channel data structure. For legacy channels, this
117
        // field may not always be set after a restart.
118
        Channel ArbChannel
119

120
        // ShortChanID describes the exact location of the channel within the
121
        // chain. We'll use this to address any messages that we need to send
122
        // to the switch during contract resolution.
123
        ShortChanID lnwire.ShortChannelID
124

125
        // ChainEvents is an active subscription to the chain watcher for this
126
        // channel to be notified of any on-chain activity related to this
127
        // channel.
128
        ChainEvents *ChainEventSubscription
129

130
        // MarkCommitmentBroadcasted should mark the channel as the commitment
131
        // being broadcast, and we are waiting for the commitment to confirm.
132
        MarkCommitmentBroadcasted func(*wire.MsgTx, lntypes.ChannelParty) error
133

134
        // MarkChannelClosed marks the channel closed in the database, with the
135
        // passed close summary. After this method successfully returns we can
136
        // no longer expect to receive chain events for this channel, and must
137
        // be able to recover from a failure without getting the close event
138
        // again. It takes an optional channel status which will update the
139
        // channel status in the record that we keep of historical channels.
140
        MarkChannelClosed func(*channeldb.ChannelCloseSummary,
141
                ...channeldb.ChannelStatus) error
142

143
        // IsPendingClose is a boolean indicating whether the channel is marked
144
        // as pending close in the database.
145
        IsPendingClose bool
146

147
        // ClosingHeight is the height at which the channel was closed. Note
148
        // that this value is only valid if IsPendingClose is true.
149
        ClosingHeight uint32
150

151
        // CloseType is the type of the close event in case IsPendingClose is
152
        // true. Otherwise this value is unset.
153
        CloseType channeldb.ClosureType
154

155
        // MarkChannelResolved is a function closure that serves to mark a
156
        // channel as "fully resolved". A channel itself can be considered
157
        // fully resolved once all active contracts have individually been
158
        // fully resolved.
159
        //
160
        // TODO(roasbeef): need RPC's to combine for pendingchannels RPC
161
        MarkChannelResolved func() error
162

163
        // PutResolverReport records a resolver report for the channel. If the
164
        // transaction provided is nil, the function should write the report
165
        // in a new transaction.
166
        PutResolverReport func(tx kvdb.RwTx,
167
                report *channeldb.ResolverReport) error
168

169
        // FetchHistoricalChannel retrieves the historical state of a channel.
170
        // This is mostly used to supplement the ContractResolvers with
171
        // additional information required for proper contract resolution.
172
        FetchHistoricalChannel func() (*channeldb.OpenChannel, error)
173

174
        // FindOutgoingHTLCDeadline returns the deadline in absolute block
175
        // height for the specified outgoing HTLC. For an outgoing HTLC, its
176
        // deadline is defined by the timeout height of its corresponding
177
        // incoming HTLC - this is the expiry height the that remote peer can
178
        // spend his/her outgoing HTLC via the timeout path.
179
        FindOutgoingHTLCDeadline func(htlc channeldb.HTLC) fn.Option[int32]
180

181
        ChainArbitratorConfig
182
}
183

184
// ReportOutputType describes the type of output that is being reported
185
// on.
186
type ReportOutputType uint8
187

188
const (
189
        // ReportOutputIncomingHtlc is an incoming hash time locked contract on
190
        // the commitment tx.
191
        ReportOutputIncomingHtlc ReportOutputType = iota
192

193
        // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on
194
        // the commitment tx.
195
        ReportOutputOutgoingHtlc
196

197
        // ReportOutputUnencumbered is an uncontested output on the commitment
198
        // transaction paying to us directly.
199
        ReportOutputUnencumbered
200

201
        // ReportOutputAnchor is an anchor output on the commitment tx.
202
        ReportOutputAnchor
203
)
204

205
// ContractReport provides a summary of a commitment tx output.
206
type ContractReport struct {
207
        // Outpoint is the final output that will be swept back to the wallet.
208
        Outpoint wire.OutPoint
209

210
        // Type indicates the type of the reported output.
211
        Type ReportOutputType
212

213
        // Amount is the final value that will be swept in back to the wallet.
214
        Amount btcutil.Amount
215

216
        // MaturityHeight is the absolute block height that this output will
217
        // mature at.
218
        MaturityHeight uint32
219

220
        // Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
221
        // the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
222
        // to its expiry height, while a stage 2 htlc's maturity height will be
223
        // set to its confirmation height plus the maturity requirement.
224
        Stage uint32
225

226
        // LimboBalance is the total number of frozen coins within this
227
        // contract.
228
        LimboBalance btcutil.Amount
229

230
        // RecoveredBalance is the total value that has been successfully swept
231
        // back to the user's wallet.
232
        RecoveredBalance btcutil.Amount
233
}
234

235
// resolverReport creates a resolve report using some of the information in the
236
// contract report.
237
func (c *ContractReport) resolverReport(spendTx *chainhash.Hash,
238
        resolverType channeldb.ResolverType,
239
        outcome channeldb.ResolverOutcome) *channeldb.ResolverReport {
4✔
240

4✔
241
        return &channeldb.ResolverReport{
4✔
242
                OutPoint:        c.Outpoint,
4✔
243
                Amount:          c.Amount,
4✔
244
                ResolverType:    resolverType,
4✔
245
                ResolverOutcome: outcome,
4✔
246
                SpendTxID:       spendTx,
4✔
247
        }
4✔
248
}
4✔
249

250
// htlcSet represents the set of active HTLCs on a given commitment
251
// transaction.
252
type htlcSet struct {
253
        // incomingHTLCs is a map of all incoming HTLCs on the target
254
        // commitment transaction. We may potentially go onchain to claim the
255
        // funds sent to us within this set.
256
        incomingHTLCs map[uint64]channeldb.HTLC
257

258
        // outgoingHTLCs is a map of all outgoing HTLCs on the target
259
        // commitment transaction. We may potentially go onchain to reclaim the
260
        // funds that are currently in limbo.
261
        outgoingHTLCs map[uint64]channeldb.HTLC
262
}
263

264
// newHtlcSet constructs a new HTLC set from a slice of HTLC's.
265
func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
4✔
266
        outHTLCs := make(map[uint64]channeldb.HTLC)
4✔
267
        inHTLCs := make(map[uint64]channeldb.HTLC)
4✔
268
        for _, htlc := range htlcs {
8✔
269
                if htlc.Incoming {
8✔
270
                        inHTLCs[htlc.HtlcIndex] = htlc
4✔
271
                        continue
4✔
272
                }
273

274
                outHTLCs[htlc.HtlcIndex] = htlc
4✔
275
        }
276

277
        return htlcSet{
4✔
278
                incomingHTLCs: inHTLCs,
4✔
279
                outgoingHTLCs: outHTLCs,
4✔
280
        }
4✔
281
}
282

283
// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
284
// commitment transaction.
285
type HtlcSetKey struct {
286
        // IsRemote denotes if the HTLCs are on the remote commitment
287
        // transaction.
288
        IsRemote bool
289

290
        // IsPending denotes if the commitment transaction that HTLCS are on
291
        // are pending (the higher of two unrevoked commitments).
292
        IsPending bool
293
}
294

295
var (
296
        // LocalHtlcSet is the HtlcSetKey used for local commitments.
297
        LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
298

299
        // RemoteHtlcSet is the HtlcSetKey used for remote commitments.
300
        RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
301

302
        // RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
303
        // commitment transactions.
304
        RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
305
)
306

307
// String returns a human readable string describing the target HtlcSetKey.
308
func (h HtlcSetKey) String() string {
4✔
309
        switch h {
4✔
310
        case LocalHtlcSet:
4✔
311
                return "LocalHtlcSet"
4✔
312
        case RemoteHtlcSet:
4✔
313
                return "RemoteHtlcSet"
4✔
314
        case RemotePendingHtlcSet:
×
315
                return "RemotePendingHtlcSet"
×
316
        default:
×
317
                return "unknown HtlcSetKey"
×
318
        }
319
}
320

321
// ChannelArbitrator is the on-chain arbitrator for a particular channel. The
322
// struct will keep in sync with the current set of HTLCs on the commitment
323
// transaction. The job of the attendant is to go on-chain to either settle or
324
// cancel an HTLC as necessary iff: an HTLC times out, or we known the
325
// pre-image to an HTLC, but it wasn't settled by the link off-chain. The
326
// ChannelArbitrator will factor in an expected confirmation delta when
327
// broadcasting to ensure that we avoid any possibility of race conditions, and
328
// sweep the output(s) without contest.
329
type ChannelArbitrator struct {
330
        started int32 // To be used atomically.
331
        stopped int32 // To be used atomically.
332

333
        // startTimestamp is the time when this ChannelArbitrator was started.
334
        startTimestamp time.Time
335

336
        // log is a persistent log that the attendant will use to checkpoint
337
        // its next action, and the state of any unresolved contracts.
338
        log ArbitratorLog
339

340
        // activeHTLCs is the set of active incoming/outgoing HTLC's on all
341
        // currently valid commitment transactions.
342
        activeHTLCs map[HtlcSetKey]htlcSet
343

344
        // unmergedSet is used to update the activeHTLCs map in two callsites:
345
        // checkLocalChainActions and sweepAnchors. It contains the latest
346
        // updates from the link. It is not deleted from, its entries may be
347
        // replaced on subsequent calls to notifyContractUpdate.
348
        unmergedSet map[HtlcSetKey]htlcSet
349
        unmergedMtx sync.RWMutex
350

351
        // cfg contains all the functionality that the ChannelArbitrator requires
352
        // to do its duty.
353
        cfg ChannelArbitratorConfig
354

355
        // blocks is a channel that the arbitrator will receive new blocks on.
356
        // This channel should be buffered by so that it does not block the
357
        // sender.
358
        blocks chan int32
359

360
        // signalUpdates is a channel that any new live signals for the channel
361
        // we're watching over will be sent.
362
        signalUpdates chan *signalUpdateMsg
363

364
        // activeResolvers is a slice of any active resolvers. This is used to
365
        // be able to signal them for shutdown in the case that we shutdown.
366
        activeResolvers []ContractResolver
367

368
        // activeResolversLock prevents simultaneous read and write to the
369
        // resolvers slice.
370
        activeResolversLock sync.RWMutex
371

372
        // resolutionSignal is a channel that will be sent upon by contract
373
        // resolvers once their contract has been fully resolved. With each
374
        // send, we'll check to see if the contract is fully resolved.
375
        resolutionSignal chan struct{}
376

377
        // forceCloseReqs is a channel that requests to forcibly close the
378
        // contract will be sent over.
379
        forceCloseReqs chan *forceCloseReq
380

381
        // state is the current state of the arbitrator. This state is examined
382
        // upon start up to decide which actions to take.
383
        state ArbitratorState
384

385
        wg   sync.WaitGroup
386
        quit chan struct{}
387
}
388

389
// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
390
// the passed config struct.
391
func NewChannelArbitrator(cfg ChannelArbitratorConfig,
392
        htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
4✔
393

4✔
394
        // Create a new map for unmerged HTLC's as we will overwrite the values
4✔
395
        // and want to avoid modifying activeHTLCs directly. This soft copying
4✔
396
        // is done to ensure that activeHTLCs isn't reset as an empty map later
4✔
397
        // on.
4✔
398
        unmerged := make(map[HtlcSetKey]htlcSet)
4✔
399
        unmerged[LocalHtlcSet] = htlcSets[LocalHtlcSet]
4✔
400
        unmerged[RemoteHtlcSet] = htlcSets[RemoteHtlcSet]
4✔
401

4✔
402
        // If the pending set exists, write that as well.
4✔
403
        if _, ok := htlcSets[RemotePendingHtlcSet]; ok {
4✔
404
                unmerged[RemotePendingHtlcSet] = htlcSets[RemotePendingHtlcSet]
×
405
        }
×
406

407
        return &ChannelArbitrator{
4✔
408
                log:              log,
4✔
409
                blocks:           make(chan int32, arbitratorBlockBufferSize),
4✔
410
                signalUpdates:    make(chan *signalUpdateMsg),
4✔
411
                resolutionSignal: make(chan struct{}),
4✔
412
                forceCloseReqs:   make(chan *forceCloseReq),
4✔
413
                activeHTLCs:      htlcSets,
4✔
414
                unmergedSet:      unmerged,
4✔
415
                cfg:              cfg,
4✔
416
                quit:             make(chan struct{}),
4✔
417
        }
4✔
418
}
419

420
// chanArbStartState contains the information from disk that we need to start
421
// up a channel arbitrator.
422
type chanArbStartState struct {
423
        currentState ArbitratorState
424
        commitSet    *CommitSet
425
}
426

427
// getStartState retrieves the information from disk that our channel arbitrator
428
// requires to start.
429
func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
430
        error) {
4✔
431

4✔
432
        // First, we'll read our last state from disk, so our internal state
4✔
433
        // machine can act accordingly.
4✔
434
        state, err := c.log.CurrentState(tx)
4✔
435
        if err != nil {
4✔
436
                return nil, err
×
437
        }
×
438

439
        // Next we'll fetch our confirmed commitment set. This will only exist
440
        // if the channel has been closed out on chain for modern nodes. For
441
        // older nodes, this won't be found at all, and will rely on the
442
        // existing written chain actions. Additionally, if this channel hasn't
443
        // logged any actions in the log, then this field won't be present.
444
        commitSet, err := c.log.FetchConfirmedCommitSet(tx)
4✔
445
        if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
4✔
446
                return nil, err
×
447
        }
×
448

449
        return &chanArbStartState{
4✔
450
                currentState: state,
4✔
451
                commitSet:    commitSet,
4✔
452
        }, nil
4✔
453
}
454

455
// Start starts all the goroutines that the ChannelArbitrator needs to operate.
456
// If takes a start state, which will be looked up on disk if it is not
457
// provided.
458
func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
4✔
459
        if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
4✔
460
                return nil
×
461
        }
×
462
        c.startTimestamp = c.cfg.Clock.Now()
4✔
463

4✔
464
        // If the state passed in is nil, we look it up now.
4✔
465
        if state == nil {
8✔
466
                var err error
4✔
467
                state, err = c.getStartState(nil)
4✔
468
                if err != nil {
4✔
469
                        return err
×
470
                }
×
471
        }
472

473
        log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
4✔
474
                c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs),
4✔
475
                state.currentState)
4✔
476

4✔
477
        // Set our state from our starting state.
4✔
478
        c.state = state.currentState
4✔
479

4✔
480
        _, bestHeight, err := c.cfg.ChainIO.GetBestBlock()
4✔
481
        if err != nil {
4✔
482
                return err
×
483
        }
×
484

485
        c.wg.Add(1)
4✔
486
        go c.channelAttendant(bestHeight, state.commitSet)
4✔
487

4✔
488
        return nil
4✔
489
}
490

491
// progressStateMachineAfterRestart attempts to progress the state machine
492
// after a restart. This makes sure that if the state transition failed, we
493
// will try to progress the state machine again. Moreover it will relaunch
494
// resolvers if the channel is still in the pending close state and has not
495
// been fully resolved yet.
496
func (c *ChannelArbitrator) progressStateMachineAfterRestart(bestHeight int32,
497
        commitSet *CommitSet) error {
4✔
498

4✔
499
        // If the channel has been marked pending close in the database, and we
4✔
500
        // haven't transitioned the state machine to StateContractClosed (or a
4✔
501
        // succeeding state), then a state transition most likely failed. We'll
4✔
502
        // try to recover from this by manually advancing the state by setting
4✔
503
        // the corresponding close trigger.
4✔
504
        trigger := chainTrigger
4✔
505
        triggerHeight := uint32(bestHeight)
4✔
506
        if c.cfg.IsPendingClose {
8✔
507
                switch c.state {
4✔
508
                case StateDefault:
×
509
                        fallthrough
×
510
                case StateBroadcastCommit:
×
511
                        fallthrough
×
512
                case StateCommitmentBroadcasted:
×
513
                        switch c.cfg.CloseType {
×
514

515
                        case channeldb.CooperativeClose:
×
516
                                trigger = coopCloseTrigger
×
517

518
                        case channeldb.BreachClose:
×
519
                                trigger = breachCloseTrigger
×
520

521
                        case channeldb.LocalForceClose:
×
522
                                trigger = localCloseTrigger
×
523

524
                        case channeldb.RemoteForceClose:
×
525
                                trigger = remoteCloseTrigger
×
526
                        }
527

528
                        log.Warnf("ChannelArbitrator(%v): detected stalled "+
×
529
                                "state=%v for closed channel",
×
530
                                c.cfg.ChanPoint, c.state)
×
531
                }
532

533
                triggerHeight = c.cfg.ClosingHeight
4✔
534
        }
535

536
        log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
4✔
537
                "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
4✔
538
                triggerHeight)
4✔
539

4✔
540
        // We'll now attempt to advance our state forward based on the current
4✔
541
        // on-chain state, and our set of active contracts.
4✔
542
        startingState := c.state
4✔
543
        nextState, _, err := c.advanceState(
4✔
544
                triggerHeight, trigger, commitSet,
4✔
545
        )
4✔
546
        if err != nil {
4✔
547
                switch err {
×
548

549
                // If we detect that we tried to fetch resolutions, but failed,
550
                // this channel was marked closed in the database before
551
                // resolutions successfully written. In this case there is not
552
                // much we can do, so we don't return the error.
553
                case errScopeBucketNoExist:
×
554
                        fallthrough
×
555
                case errNoResolutions:
×
556
                        log.Warnf("ChannelArbitrator(%v): detected closed"+
×
557
                                "channel with no contract resolutions written.",
×
558
                                c.cfg.ChanPoint)
×
559

560
                default:
×
561
                        return err
×
562
                }
563
        }
564

565
        // If we start and ended at the awaiting full resolution state, then
566
        // we'll relaunch our set of unresolved contracts.
567
        if startingState == StateWaitingFullResolution &&
4✔
568
                nextState == StateWaitingFullResolution {
8✔
569

4✔
570
                // In order to relaunch the resolvers, we'll need to fetch the
4✔
571
                // set of HTLCs that were present in the commitment transaction
4✔
572
                // at the time it was confirmed. commitSet.ConfCommitKey can't
4✔
573
                // be nil at this point since we're in
4✔
574
                // StateWaitingFullResolution. We can only be in
4✔
575
                // StateWaitingFullResolution after we've transitioned from
4✔
576
                // StateContractClosed which can only be triggered by the local
4✔
577
                // or remote close trigger. This trigger is only fired when we
4✔
578
                // receive a chain event from the chain watcher that the
4✔
579
                // commitment has been confirmed on chain, and before we
4✔
580
                // advance our state step, we call InsertConfirmedCommitSet.
4✔
581
                err := c.relaunchResolvers(commitSet, triggerHeight)
4✔
582
                if err != nil {
4✔
583
                        return err
×
584
                }
×
585
        }
586

587
        return nil
4✔
588
}
589

590
// maybeAugmentTaprootResolvers will update the contract resolution information
591
// for taproot channels. This ensures that all the resolvers have the latest
592
// resolution, which may also include data such as the control block and tap
593
// tweaks.
594
func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
595
        resolver ContractResolver,
596
        contractResolutions *ContractResolutions) {
4✔
597

4✔
598
        if !chanType.IsTaproot() {
8✔
599
                return
4✔
600
        }
4✔
601

602
        // The on disk resolutions contains all the ctrl block
603
        // information, so we'll set that now for the relevant
604
        // resolvers.
605
        switch r := resolver.(type) {
4✔
606
        case *commitSweepResolver:
4✔
607
                if contractResolutions.CommitResolution != nil {
8✔
608
                        //nolint:ll
4✔
609
                        r.commitResolution = *contractResolutions.CommitResolution
4✔
610
                }
4✔
611
        case *htlcOutgoingContestResolver:
4✔
612
                //nolint:ll
4✔
613
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
4✔
614
                for _, htlcRes := range htlcResolutions {
8✔
615
                        htlcRes := htlcRes
4✔
616

4✔
617
                        if r.htlcResolution.ClaimOutpoint ==
4✔
618
                                htlcRes.ClaimOutpoint {
8✔
619

4✔
620
                                r.htlcResolution = htlcRes
4✔
621
                        }
4✔
622
                }
623

624
        case *htlcTimeoutResolver:
4✔
625
                //nolint:ll
4✔
626
                htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
4✔
627
                for _, htlcRes := range htlcResolutions {
8✔
628
                        htlcRes := htlcRes
4✔
629

4✔
630
                        if r.htlcResolution.ClaimOutpoint ==
4✔
631
                                htlcRes.ClaimOutpoint {
8✔
632

4✔
633
                                r.htlcResolution = htlcRes
4✔
634
                        }
4✔
635
                }
636

637
        case *htlcIncomingContestResolver:
4✔
638
                //nolint:ll
4✔
639
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
4✔
640
                for _, htlcRes := range htlcResolutions {
8✔
641
                        htlcRes := htlcRes
4✔
642

4✔
643
                        if r.htlcResolution.ClaimOutpoint ==
4✔
644
                                htlcRes.ClaimOutpoint {
8✔
645

4✔
646
                                r.htlcResolution = htlcRes
4✔
647
                        }
4✔
648
                }
649
        case *htlcSuccessResolver:
×
650
                //nolint:ll
×
651
                htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
×
652
                for _, htlcRes := range htlcResolutions {
×
653
                        htlcRes := htlcRes
×
654

×
655
                        if r.htlcResolution.ClaimOutpoint ==
×
656
                                htlcRes.ClaimOutpoint {
×
657

×
658
                                r.htlcResolution = htlcRes
×
659
                        }
×
660
                }
661
        }
662
}
663

664
// relauchResolvers relaunches the set of resolvers for unresolved contracts in
665
// order to provide them with information that's not immediately available upon
666
// starting the ChannelArbitrator. This information should ideally be stored in
667
// the database, so this only serves as a intermediate work-around to prevent a
668
// migration.
669
func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
670
        heightHint uint32) error {
4✔
671

4✔
672
        // We'll now query our log to see if there are any active unresolved
4✔
673
        // contracts. If this is the case, then we'll relaunch all contract
4✔
674
        // resolvers.
4✔
675
        unresolvedContracts, err := c.log.FetchUnresolvedContracts()
4✔
676
        if err != nil {
4✔
677
                return err
×
678
        }
×
679

680
        // Retrieve the commitment tx hash from the log.
681
        contractResolutions, err := c.log.FetchContractResolutions()
4✔
682
        if err != nil {
4✔
683
                log.Errorf("unable to fetch contract resolutions: %v",
×
684
                        err)
×
685
                return err
×
686
        }
×
687
        commitHash := contractResolutions.CommitHash
4✔
688

4✔
689
        // In prior versions of lnd, the information needed to supplement the
4✔
690
        // resolvers (in most cases, the full amount of the HTLC) was found in
4✔
691
        // the chain action map, which is now deprecated.  As a result, if the
4✔
692
        // commitSet is nil (an older node with unresolved HTLCs at time of
4✔
693
        // upgrade), then we'll use the chain action information in place. The
4✔
694
        // chain actions may exclude some information, but we cannot recover it
4✔
695
        // for these older nodes at the moment.
4✔
696
        var confirmedHTLCs []channeldb.HTLC
4✔
697
        if commitSet != nil && commitSet.ConfCommitKey.IsSome() {
8✔
698
                confCommitKey, err := commitSet.ConfCommitKey.UnwrapOrErr(
4✔
699
                        fmt.Errorf("no commitKey available"),
4✔
700
                )
4✔
701
                if err != nil {
4✔
702
                        return err
×
703
                }
×
704
                confirmedHTLCs = commitSet.HtlcSets[confCommitKey]
4✔
705
        } else {
×
706
                chainActions, err := c.log.FetchChainActions()
×
707
                if err != nil {
×
708
                        log.Errorf("unable to fetch chain actions: %v", err)
×
709
                        return err
×
710
                }
×
711
                for _, htlcs := range chainActions {
×
712
                        confirmedHTLCs = append(confirmedHTLCs, htlcs...)
×
713
                }
×
714
        }
715

716
        // Reconstruct the htlc outpoints and data from the chain action log.
717
        // The purpose of the constructed htlc map is to supplement to
718
        // resolvers restored from database with extra data. Ideally this data
719
        // is stored as part of the resolver in the log. This is a workaround
720
        // to prevent a db migration. We use all available htlc sets here in
721
        // order to ensure we have complete coverage.
722
        htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
4✔
723
        for _, htlc := range confirmedHTLCs {
8✔
724
                htlc := htlc
4✔
725
                outpoint := wire.OutPoint{
4✔
726
                        Hash:  commitHash,
4✔
727
                        Index: uint32(htlc.OutputIndex),
4✔
728
                }
4✔
729
                htlcMap[outpoint] = &htlc
4✔
730
        }
4✔
731

732
        // We'll also fetch the historical state of this channel, as it should
733
        // have been marked as closed by now, and supplement it to each resolver
734
        // such that we can properly resolve our pending contracts.
735
        var chanState *channeldb.OpenChannel
4✔
736
        chanState, err = c.cfg.FetchHistoricalChannel()
4✔
737
        switch {
4✔
738
        // If we don't find this channel, then it may be the case that it
739
        // was closed before we started to retain the final state
740
        // information for open channels.
741
        case err == channeldb.ErrNoHistoricalBucket:
×
742
                fallthrough
×
743
        case err == channeldb.ErrChannelNotFound:
×
744
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
745
                        "state", c.cfg.ChanPoint)
×
746

747
        case err != nil:
×
748
                return err
×
749
        }
750

751
        log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
4✔
752
                "resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
4✔
753

4✔
754
        for i := range unresolvedContracts {
8✔
755
                resolver := unresolvedContracts[i]
4✔
756

4✔
757
                if chanState != nil {
8✔
758
                        resolver.SupplementState(chanState)
4✔
759

4✔
760
                        // For taproot channels, we'll need to also make sure
4✔
761
                        // the control block information was set properly.
4✔
762
                        maybeAugmentTaprootResolvers(
4✔
763
                                chanState.ChanType, resolver,
4✔
764
                                contractResolutions,
4✔
765
                        )
4✔
766
                }
4✔
767

768
                unresolvedContracts[i] = resolver
4✔
769

4✔
770
                htlcResolver, ok := resolver.(htlcContractResolver)
4✔
771
                if !ok {
8✔
772
                        continue
4✔
773
                }
774

775
                htlcPoint := htlcResolver.HtlcPoint()
4✔
776
                htlc, ok := htlcMap[htlcPoint]
4✔
777
                if !ok {
4✔
778
                        return fmt.Errorf(
×
779
                                "htlc resolver %T unavailable", resolver,
×
780
                        )
×
781
                }
×
782

783
                htlcResolver.Supplement(*htlc)
4✔
784

4✔
785
                // If this is an outgoing HTLC, we will also need to supplement
4✔
786
                // the resolver with the expiry block height of its
4✔
787
                // corresponding incoming HTLC.
4✔
788
                if !htlc.Incoming {
8✔
789
                        deadline := c.cfg.FindOutgoingHTLCDeadline(*htlc)
4✔
790
                        htlcResolver.SupplementDeadline(deadline)
4✔
791
                }
4✔
792
        }
793

794
        // The anchor resolver is stateless and can always be re-instantiated.
795
        if contractResolutions.AnchorResolution != nil {
8✔
796
                anchorResolver := newAnchorResolver(
4✔
797
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
4✔
798
                        contractResolutions.AnchorResolution.CommitAnchor,
4✔
799
                        heightHint, c.cfg.ChanPoint,
4✔
800
                        ResolverConfig{
4✔
801
                                ChannelArbitratorConfig: c.cfg,
4✔
802
                        },
4✔
803
                )
4✔
804

4✔
805
                anchorResolver.SupplementState(chanState)
4✔
806

4✔
807
                unresolvedContracts = append(unresolvedContracts, anchorResolver)
4✔
808

4✔
809
                // TODO(roasbeef): this isn't re-launched?
4✔
810
        }
4✔
811

812
        c.launchResolvers(unresolvedContracts, true)
4✔
813

4✔
814
        return nil
4✔
815
}
816

817
// Report returns htlc reports for the active resolvers.
818
func (c *ChannelArbitrator) Report() []*ContractReport {
4✔
819
        c.activeResolversLock.RLock()
4✔
820
        defer c.activeResolversLock.RUnlock()
4✔
821

4✔
822
        var reports []*ContractReport
4✔
823
        for _, resolver := range c.activeResolvers {
8✔
824
                r, ok := resolver.(reportingContractResolver)
4✔
825
                if !ok {
4✔
826
                        continue
×
827
                }
828

829
                report := r.report()
4✔
830
                if report == nil {
8✔
831
                        continue
4✔
832
                }
833

834
                reports = append(reports, report)
4✔
835
        }
836

837
        return reports
4✔
838
}
839

840
// Stop signals the ChannelArbitrator for a graceful shutdown.
841
func (c *ChannelArbitrator) Stop() error {
4✔
842
        if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
4✔
843
                return nil
×
844
        }
×
845

846
        log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
4✔
847

4✔
848
        if c.cfg.ChainEvents.Cancel != nil {
8✔
849
                go c.cfg.ChainEvents.Cancel()
4✔
850
        }
4✔
851

852
        c.activeResolversLock.RLock()
4✔
853
        for _, activeResolver := range c.activeResolvers {
8✔
854
                activeResolver.Stop()
4✔
855
        }
4✔
856
        c.activeResolversLock.RUnlock()
4✔
857

4✔
858
        close(c.quit)
4✔
859
        c.wg.Wait()
4✔
860

4✔
861
        return nil
4✔
862
}
863

864
// transitionTrigger is an enum that denotes exactly *why* a state transition
865
// was initiated. This is useful as depending on the initial trigger, we may
866
// skip certain states as those actions are expected to have already taken
867
// place as a result of the external trigger.
868
type transitionTrigger uint8
869

870
const (
871
        // chainTrigger is a transition trigger that has been attempted due to
872
        // changing on-chain conditions such as a block which times out HTLC's
873
        // being attached.
874
        chainTrigger transitionTrigger = iota
875

876
        // userTrigger is a transition trigger driven by user action. Examples
877
        // of such a trigger include a user requesting a force closure of the
878
        // channel.
879
        userTrigger
880

881
        // remoteCloseTrigger is a transition trigger driven by the remote
882
        // peer's commitment being confirmed.
883
        remoteCloseTrigger
884

885
        // localCloseTrigger is a transition trigger driven by our commitment
886
        // being confirmed.
887
        localCloseTrigger
888

889
        // coopCloseTrigger is a transition trigger driven by a cooperative
890
        // close transaction being confirmed.
891
        coopCloseTrigger
892

893
        // breachCloseTrigger is a transition trigger driven by a remote breach
894
        // being confirmed. In this case the channel arbitrator will wait for
895
        // the BreachArbitrator to finish and then clean up gracefully.
896
        breachCloseTrigger
897
)
898

899
// String returns a human readable string describing the passed
900
// transitionTrigger.
901
func (t transitionTrigger) String() string {
4✔
902
        switch t {
4✔
903
        case chainTrigger:
4✔
904
                return "chainTrigger"
4✔
905

906
        case remoteCloseTrigger:
4✔
907
                return "remoteCloseTrigger"
4✔
908

909
        case userTrigger:
4✔
910
                return "userTrigger"
4✔
911

912
        case localCloseTrigger:
4✔
913
                return "localCloseTrigger"
4✔
914

915
        case coopCloseTrigger:
4✔
916
                return "coopCloseTrigger"
4✔
917

918
        case breachCloseTrigger:
4✔
919
                return "breachCloseTrigger"
4✔
920

921
        default:
×
922
                return "unknown trigger"
×
923
        }
924
}
925

926
// stateStep is a help method that examines our internal state, and attempts
927
// the appropriate state transition if necessary. The next state we transition
928
// to is returned, Additionally, if the next transition results in a commitment
929
// broadcast, the commitment transaction itself is returned.
930
func (c *ChannelArbitrator) stateStep(
931
        triggerHeight uint32, trigger transitionTrigger,
932
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
4✔
933

4✔
934
        var (
4✔
935
                nextState ArbitratorState
4✔
936
                closeTx   *wire.MsgTx
4✔
937
        )
4✔
938
        switch c.state {
4✔
939

940
        // If we're in the default state, then we'll check our set of actions
941
        // to see if while we were down, conditions have changed.
942
        case StateDefault:
4✔
943
                log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+
4✔
944
                        "examining active HTLC's", c.cfg.ChanPoint,
4✔
945
                        triggerHeight)
4✔
946

4✔
947
                // As a new block has been connected to the end of the main
4✔
948
                // chain, we'll check to see if we need to make any on-chain
4✔
949
                // claims on behalf of the channel contract that we're
4✔
950
                // arbitrating for. If a commitment has confirmed, then we'll
4✔
951
                // use the set snapshot from the chain, otherwise we'll use our
4✔
952
                // current set.
4✔
953
                var (
4✔
954
                        chainActions ChainActionMap
4✔
955
                        err          error
4✔
956
                )
4✔
957

4✔
958
                // Normally if we force close the channel locally we will have
4✔
959
                // no confCommitSet. However when the remote commitment confirms
4✔
960
                // without us ever broadcasting our local commitment we need to
4✔
961
                // make sure we cancel all upstream HTLCs for outgoing dust
4✔
962
                // HTLCs as well hence we need to fetch the chain actions here
4✔
963
                // as well.
4✔
964
                if confCommitSet == nil {
8✔
965
                        // Update the set of activeHTLCs so
4✔
966
                        // checkLocalChainActions has an up-to-date view of the
4✔
967
                        // commitments.
4✔
968
                        c.updateActiveHTLCs()
4✔
969
                        htlcs := c.activeHTLCs
4✔
970
                        chainActions, err = c.checkLocalChainActions(
4✔
971
                                triggerHeight, trigger, htlcs, false,
4✔
972
                        )
4✔
973
                        if err != nil {
4✔
974
                                return StateDefault, nil, err
×
975
                        }
×
976
                } else {
4✔
977
                        chainActions, err = c.constructChainActions(
4✔
978
                                confCommitSet, triggerHeight, trigger,
4✔
979
                        )
4✔
980
                        if err != nil {
4✔
981
                                return StateDefault, nil, err
×
982
                        }
×
983
                }
984

985
                // If there are no actions to be made, then we'll remain in the
986
                // default state. If this isn't a self initiated event (we're
987
                // checking due to a chain update), then we'll exit now.
988
                if len(chainActions) == 0 && trigger == chainTrigger {
8✔
989
                        log.Debugf("ChannelArbitrator(%v): no actions for "+
4✔
990
                                "chain trigger, terminating", c.cfg.ChanPoint)
4✔
991

4✔
992
                        return StateDefault, closeTx, nil
4✔
993
                }
4✔
994

995
                // Otherwise, we'll log that we checked the HTLC actions as the
996
                // commitment transaction has already been broadcast.
997
                log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
4✔
998
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
4✔
999

4✔
1000
                // Cancel upstream HTLCs for all outgoing dust HTLCs available
4✔
1001
                // either on the local or the remote/remote pending commitment
4✔
1002
                // transaction.
4✔
1003
                dustHTLCs := chainActions[HtlcFailDustAction]
4✔
1004
                if len(dustHTLCs) > 0 {
8✔
1005
                        log.Debugf("ChannelArbitrator(%v): canceling %v dust "+
4✔
1006
                                "HTLCs backwards", c.cfg.ChanPoint,
4✔
1007
                                len(dustHTLCs))
4✔
1008

4✔
1009
                        getIdx := func(htlc channeldb.HTLC) uint64 {
8✔
1010
                                return htlc.HtlcIndex
4✔
1011
                        }
4✔
1012
                        dustHTLCSet := fn.NewSet(fn.Map(getIdx, dustHTLCs)...)
4✔
1013
                        err = c.abandonForwards(dustHTLCSet)
4✔
1014
                        if err != nil {
4✔
1015
                                return StateError, closeTx, err
×
1016
                        }
×
1017
                }
1018

1019
                // Depending on the type of trigger, we'll either "tunnel"
1020
                // through to a farther state, or just proceed linearly to the
1021
                // next state.
1022
                switch trigger {
4✔
1023

1024
                // If this is a chain trigger, then we'll go straight to the
1025
                // next state, as we still need to broadcast the commitment
1026
                // transaction.
1027
                case chainTrigger:
4✔
1028
                        fallthrough
4✔
1029
                case userTrigger:
4✔
1030
                        nextState = StateBroadcastCommit
4✔
1031

1032
                // If the trigger is a cooperative close being confirmed, then
1033
                // we can go straight to StateFullyResolved, as there won't be
1034
                // any contracts to resolve.
1035
                case coopCloseTrigger:
4✔
1036
                        nextState = StateFullyResolved
4✔
1037

1038
                // Otherwise, if this state advance was triggered by a
1039
                // commitment being confirmed on chain, then we'll jump
1040
                // straight to the state where the contract has already been
1041
                // closed, and we will inspect the set of unresolved contracts.
1042
                case localCloseTrigger:
4✔
1043
                        log.Errorf("ChannelArbitrator(%v): unexpected local "+
4✔
1044
                                "commitment confirmed while in StateDefault",
4✔
1045
                                c.cfg.ChanPoint)
4✔
1046
                        fallthrough
4✔
1047
                case remoteCloseTrigger:
4✔
1048
                        nextState = StateContractClosed
4✔
1049

1050
                case breachCloseTrigger:
4✔
1051
                        nextContractState, err := c.checkLegacyBreach()
4✔
1052
                        if nextContractState == StateError {
4✔
1053
                                return nextContractState, nil, err
×
1054
                        }
×
1055

1056
                        nextState = nextContractState
4✔
1057
                }
1058

1059
        // If we're in this state, then we've decided to broadcast the
1060
        // commitment transaction. We enter this state either due to an outside
1061
        // sub-system, or because an on-chain action has been triggered.
1062
        case StateBroadcastCommit:
4✔
1063
                // Under normal operation, we can only enter
4✔
1064
                // StateBroadcastCommit via a user or chain trigger. On restart,
4✔
1065
                // this state may be reexecuted after closing the channel, but
4✔
1066
                // failing to commit to StateContractClosed or
4✔
1067
                // StateFullyResolved. In that case, one of the four close
4✔
1068
                // triggers will be presented, signifying that we should skip
4✔
1069
                // rebroadcasting, and go straight to resolving the on-chain
4✔
1070
                // contract or marking the channel resolved.
4✔
1071
                switch trigger {
4✔
1072
                case localCloseTrigger, remoteCloseTrigger:
×
1073
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1074
                                "close after closing channel, fast-forwarding "+
×
1075
                                "to %s to resolve contract",
×
1076
                                c.cfg.ChanPoint, trigger, StateContractClosed)
×
1077
                        return StateContractClosed, closeTx, nil
×
1078

1079
                case breachCloseTrigger:
4✔
1080
                        nextContractState, err := c.checkLegacyBreach()
4✔
1081
                        if nextContractState == StateError {
4✔
1082
                                log.Infof("ChannelArbitrator(%v): unable to "+
×
1083
                                        "advance breach close resolution: %v",
×
1084
                                        c.cfg.ChanPoint, nextContractState)
×
1085
                                return StateError, closeTx, err
×
1086
                        }
×
1087

1088
                        log.Infof("ChannelArbitrator(%v): detected %s close "+
4✔
1089
                                "after closing channel, fast-forwarding to %s"+
4✔
1090
                                " to resolve contract", c.cfg.ChanPoint,
4✔
1091
                                trigger, nextContractState)
4✔
1092

4✔
1093
                        return nextContractState, closeTx, nil
4✔
1094

1095
                case coopCloseTrigger:
×
1096
                        log.Infof("ChannelArbitrator(%v): detected %s "+
×
1097
                                "close after closing channel, fast-forwarding "+
×
1098
                                "to %s to resolve contract",
×
1099
                                c.cfg.ChanPoint, trigger, StateFullyResolved)
×
1100
                        return StateFullyResolved, closeTx, nil
×
1101
                }
1102

1103
                log.Infof("ChannelArbitrator(%v): force closing "+
4✔
1104
                        "chan", c.cfg.ChanPoint)
4✔
1105

4✔
1106
                // Now that we have all the actions decided for the set of
4✔
1107
                // HTLC's, we'll broadcast the commitment transaction, and
4✔
1108
                // signal the link to exit.
4✔
1109

4✔
1110
                // We'll tell the switch that it should remove the link for
4✔
1111
                // this channel, in addition to fetching the force close
4✔
1112
                // summary needed to close this channel on chain.
4✔
1113
                forceCloseTx, err := c.cfg.Channel.ForceCloseChan()
4✔
1114
                if err != nil {
4✔
1115
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1116
                                "force close: %v", c.cfg.ChanPoint, err)
×
1117

×
1118
                        // We tried to force close (HTLC may be expiring from
×
1119
                        // our PoV, etc), but we think we've lost data. In this
×
1120
                        // case, we'll not force close, but terminate the state
×
1121
                        // machine here to wait to see what confirms on chain.
×
1122
                        if errors.Is(err, lnwallet.ErrForceCloseLocalDataLoss) {
×
1123
                                log.Error("ChannelArbitrator(%v): broadcast "+
×
1124
                                        "failed due to local data loss, "+
×
1125
                                        "waiting for on chain confimation...",
×
1126
                                        c.cfg.ChanPoint)
×
1127

×
1128
                                return StateBroadcastCommit, nil, nil
×
1129
                        }
×
1130

1131
                        return StateError, closeTx, err
×
1132
                }
1133
                closeTx = forceCloseTx
4✔
1134

4✔
1135
                // Before publishing the transaction, we store it to the
4✔
1136
                // database, such that we can re-publish later in case it
4✔
1137
                // didn't propagate. We initiated the force close, so we
4✔
1138
                // mark broadcast with local initiator set to true.
4✔
1139
                err = c.cfg.MarkCommitmentBroadcasted(closeTx, lntypes.Local)
4✔
1140
                if err != nil {
4✔
1141
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1142
                                "mark commitment broadcasted: %v",
×
1143
                                c.cfg.ChanPoint, err)
×
1144
                        return StateError, closeTx, err
×
1145
                }
×
1146

1147
                // With the close transaction in hand, broadcast the
1148
                // transaction to the network, thereby entering the post
1149
                // channel resolution state.
1150
                log.Infof("Broadcasting force close transaction %v, "+
4✔
1151
                        "ChannelPoint(%v): %v", closeTx.TxHash(),
4✔
1152
                        c.cfg.ChanPoint, lnutils.SpewLogClosure(closeTx))
4✔
1153

4✔
1154
                // At this point, we'll now broadcast the commitment
4✔
1155
                // transaction itself.
4✔
1156
                label := labels.MakeLabel(
4✔
1157
                        labels.LabelTypeChannelClose, &c.cfg.ShortChanID,
4✔
1158
                )
4✔
1159
                if err := c.cfg.PublishTx(closeTx, label); err != nil {
8✔
1160
                        log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
4✔
1161
                                "close tx: %v", c.cfg.ChanPoint, err)
4✔
1162

4✔
1163
                        // This makes sure we don't fail at startup if the
4✔
1164
                        // commitment transaction has too low fees to make it
4✔
1165
                        // into mempool. The rebroadcaster makes sure this
4✔
1166
                        // transaction is republished regularly until confirmed
4✔
1167
                        // or replaced.
4✔
1168
                        if !errors.Is(err, lnwallet.ErrDoubleSpend) &&
4✔
1169
                                !errors.Is(err, lnwallet.ErrMempoolFee) {
8✔
1170

4✔
1171
                                return StateError, closeTx, err
4✔
1172
                        }
4✔
1173
                }
1174

1175
                // We go to the StateCommitmentBroadcasted state, where we'll
1176
                // be waiting for the commitment to be confirmed.
1177
                nextState = StateCommitmentBroadcasted
4✔
1178

1179
        // In this state we have broadcasted our own commitment, and will need
1180
        // to wait for a commitment (not necessarily the one we broadcasted!)
1181
        // to be confirmed.
1182
        case StateCommitmentBroadcasted:
4✔
1183
                switch trigger {
4✔
1184

1185
                // We are waiting for a commitment to be confirmed.
1186
                case chainTrigger, userTrigger:
4✔
1187
                        // The commitment transaction has been broadcast, but it
4✔
1188
                        // doesn't necessarily need to be the commitment
4✔
1189
                        // transaction version that is going to be confirmed. To
4✔
1190
                        // be sure that any of those versions can be anchored
4✔
1191
                        // down, we now submit all anchor resolutions to the
4✔
1192
                        // sweeper. The sweeper will keep trying to sweep all of
4✔
1193
                        // them.
4✔
1194
                        //
4✔
1195
                        // Note that the sweeper is idempotent. If we ever
4✔
1196
                        // happen to end up at this point in the code again, no
4✔
1197
                        // harm is done by re-offering the anchors to the
4✔
1198
                        // sweeper.
4✔
1199
                        anchors, err := c.cfg.Channel.NewAnchorResolutions()
4✔
1200
                        if err != nil {
4✔
1201
                                return StateError, closeTx, err
×
1202
                        }
×
1203

1204
                        err = c.sweepAnchors(anchors, triggerHeight)
4✔
1205
                        if err != nil {
4✔
1206
                                return StateError, closeTx, err
×
1207
                        }
×
1208

1209
                        nextState = StateCommitmentBroadcasted
4✔
1210

1211
                // If this state advance was triggered by any of the
1212
                // commitments being confirmed, then we'll jump to the state
1213
                // where the contract has been closed.
1214
                case localCloseTrigger, remoteCloseTrigger:
4✔
1215
                        nextState = StateContractClosed
4✔
1216

1217
                // If a coop close was confirmed, jump straight to the fully
1218
                // resolved state.
1219
                case coopCloseTrigger:
×
1220
                        nextState = StateFullyResolved
×
1221

1222
                case breachCloseTrigger:
×
1223
                        nextContractState, err := c.checkLegacyBreach()
×
1224
                        if nextContractState == StateError {
×
1225
                                return nextContractState, closeTx, err
×
1226
                        }
×
1227

1228
                        nextState = nextContractState
×
1229
                }
1230

1231
                log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
4✔
1232
                        "state %v to %v", c.cfg.ChanPoint, trigger, c.state,
4✔
1233
                        nextState)
4✔
1234

1235
        // If we're in this state, then the contract has been fully closed to
1236
        // outside sub-systems, so we'll process the prior set of on-chain
1237
        // contract actions and launch a set of resolvers.
1238
        case StateContractClosed:
4✔
1239
                // First, we'll fetch our chain actions, and both sets of
4✔
1240
                // resolutions so we can process them.
4✔
1241
                contractResolutions, err := c.log.FetchContractResolutions()
4✔
1242
                if err != nil {
4✔
1243
                        log.Errorf("unable to fetch contract resolutions: %v",
×
1244
                                err)
×
1245
                        return StateError, closeTx, err
×
1246
                }
×
1247

1248
                // If the resolution is empty, and we have no HTLCs at all to
1249
                // send to, then we're done here. We don't need to launch any
1250
                // resolvers, and can go straight to our final state.
1251
                if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
8✔
1252
                        log.Infof("ChannelArbitrator(%v): contract "+
4✔
1253
                                "resolutions empty, marking channel as fully resolved!",
4✔
1254
                                c.cfg.ChanPoint)
4✔
1255
                        nextState = StateFullyResolved
4✔
1256
                        break
4✔
1257
                }
1258

1259
                // First, we'll reconstruct a fresh set of chain actions as the
1260
                // set of actions we need to act on may differ based on if it
1261
                // was our commitment, or they're commitment that hit the chain.
1262
                htlcActions, err := c.constructChainActions(
4✔
1263
                        confCommitSet, triggerHeight, trigger,
4✔
1264
                )
4✔
1265
                if err != nil {
4✔
1266
                        return StateError, closeTx, err
×
1267
                }
×
1268

1269
                // In case its a breach transaction we fail back all upstream
1270
                // HTLCs for their corresponding outgoing HTLCs on the remote
1271
                // commitment set (remote and remote pending set).
1272
                if contractResolutions.BreachResolution != nil {
8✔
1273
                        // cancelBreachedHTLCs is a set which holds HTLCs whose
4✔
1274
                        // corresponding incoming HTLCs will be failed back
4✔
1275
                        // because the peer broadcasted an old state.
4✔
1276
                        cancelBreachedHTLCs := fn.NewSet[uint64]()
4✔
1277

4✔
1278
                        // We'll use the CommitSet, we'll fail back all
4✔
1279
                        // upstream HTLCs for their corresponding outgoing
4✔
1280
                        // HTLC that exist on either of the remote commitments.
4✔
1281
                        // The map is used to deduplicate any shared HTLC's.
4✔
1282
                        for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
8✔
1283
                                if !htlcSetKey.IsRemote {
8✔
1284
                                        continue
4✔
1285
                                }
1286

1287
                                for _, htlc := range htlcs {
8✔
1288
                                        // Only outgoing HTLCs have a
4✔
1289
                                        // corresponding incoming HTLC.
4✔
1290
                                        if htlc.Incoming {
8✔
1291
                                                continue
4✔
1292
                                        }
1293

1294
                                        cancelBreachedHTLCs.Add(htlc.HtlcIndex)
4✔
1295
                                }
1296
                        }
1297

1298
                        err := c.abandonForwards(cancelBreachedHTLCs)
4✔
1299
                        if err != nil {
4✔
1300
                                return StateError, closeTx, err
×
1301
                        }
×
1302
                } else {
4✔
1303
                        // If it's not a breach, we resolve all incoming dust
4✔
1304
                        // HTLCs immediately after the commitment is confirmed.
4✔
1305
                        err = c.failIncomingDust(
4✔
1306
                                htlcActions[HtlcIncomingDustFinalAction],
4✔
1307
                        )
4✔
1308
                        if err != nil {
4✔
1309
                                return StateError, closeTx, err
×
1310
                        }
×
1311

1312
                        // We fail the upstream HTLCs for all remote pending
1313
                        // outgoing HTLCs as soon as the commitment is
1314
                        // confirmed. The upstream HTLCs for outgoing dust
1315
                        // HTLCs have already been resolved before we reach
1316
                        // this point.
1317
                        getIdx := func(htlc channeldb.HTLC) uint64 {
8✔
1318
                                return htlc.HtlcIndex
4✔
1319
                        }
4✔
1320
                        remoteDangling := fn.NewSet(fn.Map(
4✔
1321
                                getIdx, htlcActions[HtlcFailDanglingAction],
4✔
1322
                        )...)
4✔
1323
                        err := c.abandonForwards(remoteDangling)
4✔
1324
                        if err != nil {
4✔
1325
                                return StateError, closeTx, err
×
1326
                        }
×
1327
                }
1328

1329
                // Now that we know we'll need to act, we'll process all the
1330
                // resolvers, then create the structures we need to resolve all
1331
                // outstanding contracts.
1332
                resolvers, err := c.prepContractResolutions(
4✔
1333
                        contractResolutions, triggerHeight, htlcActions,
4✔
1334
                )
4✔
1335
                if err != nil {
4✔
1336
                        log.Errorf("ChannelArbitrator(%v): unable to "+
×
1337
                                "resolve contracts: %v", c.cfg.ChanPoint, err)
×
1338
                        return StateError, closeTx, err
×
1339
                }
×
1340

1341
                log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
4✔
1342
                        "resolvers", c.cfg.ChanPoint, len(resolvers))
4✔
1343

4✔
1344
                err = c.log.InsertUnresolvedContracts(nil, resolvers...)
4✔
1345
                if err != nil {
4✔
1346
                        return StateError, closeTx, err
×
1347
                }
×
1348

1349
                // Finally, we'll launch all the required contract resolvers.
1350
                // Once they're all resolved, we're no longer needed.
1351
                c.launchResolvers(resolvers, false)
4✔
1352

4✔
1353
                nextState = StateWaitingFullResolution
4✔
1354

1355
        // This is our terminal state. We'll keep returning this state until
1356
        // all contracts are fully resolved.
1357
        case StateWaitingFullResolution:
4✔
1358
                log.Infof("ChannelArbitrator(%v): still awaiting contract "+
4✔
1359
                        "resolution", c.cfg.ChanPoint)
4✔
1360

4✔
1361
                unresolved, err := c.log.FetchUnresolvedContracts()
4✔
1362
                if err != nil {
4✔
1363
                        return StateError, closeTx, err
×
1364
                }
×
1365

1366
                // If we have no unresolved contracts, then we can move to the
1367
                // final state.
1368
                if len(unresolved) == 0 {
8✔
1369
                        nextState = StateFullyResolved
4✔
1370
                        break
4✔
1371
                }
1372

1373
                // Otherwise we still have unresolved contracts, then we'll
1374
                // stay alive to oversee their resolution.
1375
                nextState = StateWaitingFullResolution
4✔
1376

4✔
1377
                // Add debug logs.
4✔
1378
                for _, r := range unresolved {
8✔
1379
                        log.Debugf("ChannelArbitrator(%v): still have "+
4✔
1380
                                "unresolved contract: %T", c.cfg.ChanPoint, r)
4✔
1381
                }
4✔
1382

1383
        // If we start as fully resolved, then we'll end as fully resolved.
1384
        case StateFullyResolved:
4✔
1385
                // To ensure that the state of the contract in persistent
4✔
1386
                // storage is properly reflected, we'll mark the contract as
4✔
1387
                // fully resolved now.
4✔
1388
                nextState = StateFullyResolved
4✔
1389

4✔
1390
                log.Infof("ChannelPoint(%v) has been fully resolved "+
4✔
1391
                        "on-chain at height=%v", c.cfg.ChanPoint, triggerHeight)
4✔
1392

4✔
1393
                if err := c.cfg.MarkChannelResolved(); err != nil {
4✔
1394
                        log.Errorf("unable to mark channel resolved: %v", err)
×
1395
                        return StateError, closeTx, err
×
1396
                }
×
1397
        }
1398

1399
        log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
4✔
1400
                nextState)
4✔
1401

4✔
1402
        return nextState, closeTx, nil
4✔
1403
}
1404

1405
// sweepAnchors offers all given anchor resolutions to the sweeper. It requests
1406
// sweeping at the minimum fee rate. This fee rate can be upped manually by the
1407
// user via the BumpFee rpc.
1408
func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
1409
        heightHint uint32) error {
4✔
1410

4✔
1411
        // Update the set of activeHTLCs so that the sweeping routine has an
4✔
1412
        // up-to-date view of the set of commitments.
4✔
1413
        c.updateActiveHTLCs()
4✔
1414

4✔
1415
        // Prepare the sweeping requests for all possible versions of
4✔
1416
        // commitments.
4✔
1417
        sweepReqs, err := c.prepareAnchorSweeps(heightHint, anchors)
4✔
1418
        if err != nil {
4✔
1419
                return err
×
1420
        }
×
1421

1422
        // Send out the sweeping requests to the sweeper.
1423
        for _, req := range sweepReqs {
8✔
1424
                _, err = c.cfg.Sweeper.SweepInput(req.input, req.params)
4✔
1425
                if err != nil {
4✔
1426
                        return err
×
1427
                }
×
1428
        }
1429

1430
        return nil
4✔
1431
}
1432

1433
// findCommitmentDeadlineAndValue finds the deadline (relative block height)
1434
// for a commitment transaction by extracting the minimum CLTV from its HTLCs.
1435
// From our PoV, the deadline delta is defined to be the smaller of,
1436
//   - half of the least CLTV from outgoing HTLCs' corresponding incoming
1437
//     HTLCs,  or,
1438
//   - half of the least CLTV from incoming HTLCs if the preimage is available.
1439
//
1440
// We use half of the CTLV value to ensure that we have enough time to sweep
1441
// the second-level HTLCs.
1442
//
1443
// It also finds the total value that are time-sensitive, which is the sum of
1444
// all the outgoing HTLCs plus incoming HTLCs whose preimages are known. It
1445
// then returns the value left after subtracting the budget used for sweeping
1446
// the time-sensitive HTLCs.
1447
//
1448
// NOTE: when the deadline turns out to be 0 blocks, we will replace it with 1
1449
// block because our fee estimator doesn't allow a 0 conf target. This also
1450
// means we've left behind and should increase our fee to make the transaction
1451
// confirmed asap.
1452
func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
1453
        htlcs htlcSet) (fn.Option[int32], btcutil.Amount, error) {
4✔
1454

4✔
1455
        deadlineMinHeight := uint32(math.MaxUint32)
4✔
1456
        totalValue := btcutil.Amount(0)
4✔
1457

4✔
1458
        // First, iterate through the outgoingHTLCs to find the lowest CLTV
4✔
1459
        // value.
4✔
1460
        for _, htlc := range htlcs.outgoingHTLCs {
8✔
1461
                // Skip if the HTLC is dust.
4✔
1462
                if htlc.OutputIndex < 0 {
8✔
1463
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
4✔
1464
                                "for dust htlc=%x",
4✔
1465
                                c.cfg.ChanPoint, htlc.RHash[:])
4✔
1466

4✔
1467
                        continue
4✔
1468
                }
1469

1470
                value := htlc.Amt.ToSatoshis()
4✔
1471

4✔
1472
                // Find the expiry height for this outgoing HTLC's incoming
4✔
1473
                // HTLC.
4✔
1474
                deadlineOpt := c.cfg.FindOutgoingHTLCDeadline(htlc)
4✔
1475

4✔
1476
                // The deadline is default to the current deadlineMinHeight,
4✔
1477
                // and it's overwritten when it's not none.
4✔
1478
                deadline := deadlineMinHeight
4✔
1479
                deadlineOpt.WhenSome(func(d int32) {
8✔
1480
                        deadline = uint32(d)
4✔
1481

4✔
1482
                        // We only consider the value is under protection when
4✔
1483
                        // it's time-sensitive.
4✔
1484
                        totalValue += value
4✔
1485
                })
4✔
1486

1487
                if deadline < deadlineMinHeight {
8✔
1488
                        deadlineMinHeight = deadline
4✔
1489

4✔
1490
                        log.Tracef("ChannelArbitrator(%v): outgoing HTLC has "+
4✔
1491
                                "deadline=%v, value=%v", c.cfg.ChanPoint,
4✔
1492
                                deadlineMinHeight, value)
4✔
1493
                }
4✔
1494
        }
1495

1496
        // Then going through the incomingHTLCs, and update the minHeight when
1497
        // conditions met.
1498
        for _, htlc := range htlcs.incomingHTLCs {
8✔
1499
                // Skip if the HTLC is dust.
4✔
1500
                if htlc.OutputIndex < 0 {
4✔
1501
                        log.Debugf("ChannelArbitrator(%v): skipped deadline "+
×
1502
                                "for dust htlc=%x",
×
1503
                                c.cfg.ChanPoint, htlc.RHash[:])
×
1504

×
1505
                        continue
×
1506
                }
1507

1508
                // Since it's an HTLC sent to us, check if we have preimage for
1509
                // this HTLC.
1510
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
4✔
1511
                if err != nil {
4✔
1512
                        return fn.None[int32](), 0, err
×
1513
                }
×
1514

1515
                if !preimageAvailable {
8✔
1516
                        continue
4✔
1517
                }
1518

1519
                value := htlc.Amt.ToSatoshis()
4✔
1520
                totalValue += value
4✔
1521

4✔
1522
                if htlc.RefundTimeout < deadlineMinHeight {
8✔
1523
                        deadlineMinHeight = htlc.RefundTimeout
4✔
1524

4✔
1525
                        log.Tracef("ChannelArbitrator(%v): incoming HTLC has "+
4✔
1526
                                "deadline=%v, amt=%v", c.cfg.ChanPoint,
4✔
1527
                                deadlineMinHeight, value)
4✔
1528
                }
4✔
1529
        }
1530

1531
        // Calculate the deadline. There are two cases to be handled here,
1532
        //   - when the deadlineMinHeight never gets updated, which could
1533
        //     happen when we have no outgoing HTLCs, and, for incoming HTLCs,
1534
        //       * either we have none, or,
1535
        //       * none of the HTLCs are preimageAvailable.
1536
        //   - when our deadlineMinHeight is no greater than the heightHint,
1537
        //     which means we are behind our schedule.
1538
        var deadline uint32
4✔
1539
        switch {
4✔
1540
        // When we couldn't find a deadline height from our HTLCs, we will fall
1541
        // back to the default value as there's no time pressure here.
1542
        case deadlineMinHeight == math.MaxUint32:
4✔
1543
                return fn.None[int32](), 0, nil
4✔
1544

1545
        // When the deadline is passed, we will fall back to the smallest conf
1546
        // target (1 block).
1547
        case deadlineMinHeight <= heightHint:
×
1548
                log.Warnf("ChannelArbitrator(%v): deadline is passed with "+
×
1549
                        "deadlineMinHeight=%d, heightHint=%d",
×
1550
                        c.cfg.ChanPoint, deadlineMinHeight, heightHint)
×
1551
                deadline = 1
×
1552

1553
        // Use half of the deadline delta, and leave the other half to be used
1554
        // to sweep the HTLCs.
1555
        default:
4✔
1556
                deadline = (deadlineMinHeight - heightHint) / 2
4✔
1557
        }
1558

1559
        // Calculate the value left after subtracting the budget used for
1560
        // sweeping the time-sensitive HTLCs.
1561
        valueLeft := totalValue - calculateBudget(
4✔
1562
                totalValue, c.cfg.Budget.DeadlineHTLCRatio,
4✔
1563
                c.cfg.Budget.DeadlineHTLC,
4✔
1564
        )
4✔
1565

4✔
1566
        log.Debugf("ChannelArbitrator(%v): calculated valueLeft=%v, "+
4✔
1567
                "deadline=%d, using deadlineMinHeight=%d, heightHint=%d",
4✔
1568
                c.cfg.ChanPoint, valueLeft, deadline, deadlineMinHeight,
4✔
1569
                heightHint)
4✔
1570

4✔
1571
        return fn.Some(int32(deadline)), valueLeft, nil
4✔
1572
}
1573

1574
// launchResolvers updates the activeResolvers list and starts the resolvers.
1575
func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver,
1576
        immediate bool) {
4✔
1577

4✔
1578
        c.activeResolversLock.Lock()
4✔
1579
        defer c.activeResolversLock.Unlock()
4✔
1580

4✔
1581
        c.activeResolvers = resolvers
4✔
1582
        for _, contract := range resolvers {
8✔
1583
                c.wg.Add(1)
4✔
1584
                go c.resolveContract(contract, immediate)
4✔
1585
        }
4✔
1586
}
1587

1588
// advanceState is the main driver of our state machine. This method is an
1589
// iterative function which repeatedly attempts to advance the internal state
1590
// of the channel arbitrator. The state will be advanced until we reach a
1591
// redundant transition, meaning that the state transition is a noop. The final
1592
// param is a callback that allows the caller to execute an arbitrary action
1593
// after each state transition.
1594
func (c *ChannelArbitrator) advanceState(
1595
        triggerHeight uint32, trigger transitionTrigger,
1596
        confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
4✔
1597

4✔
1598
        var (
4✔
1599
                priorState   ArbitratorState
4✔
1600
                forceCloseTx *wire.MsgTx
4✔
1601
        )
4✔
1602

4✔
1603
        // We'll continue to advance our state forward until the state we
4✔
1604
        // transition to is that same state that we started at.
4✔
1605
        for {
8✔
1606
                priorState = c.state
4✔
1607
                log.Debugf("ChannelArbitrator(%v): attempting state step with "+
4✔
1608
                        "trigger=%v from state=%v", c.cfg.ChanPoint, trigger,
4✔
1609
                        priorState)
4✔
1610

4✔
1611
                nextState, closeTx, err := c.stateStep(
4✔
1612
                        triggerHeight, trigger, confCommitSet,
4✔
1613
                )
4✔
1614
                if err != nil {
8✔
1615
                        log.Errorf("ChannelArbitrator(%v): unable to advance "+
4✔
1616
                                "state: %v", c.cfg.ChanPoint, err)
4✔
1617
                        return priorState, nil, err
4✔
1618
                }
4✔
1619

1620
                if forceCloseTx == nil && closeTx != nil {
8✔
1621
                        forceCloseTx = closeTx
4✔
1622
                }
4✔
1623

1624
                // Our termination transition is a noop transition. If we get
1625
                // our prior state back as the next state, then we'll
1626
                // terminate.
1627
                if nextState == priorState {
8✔
1628
                        log.Debugf("ChannelArbitrator(%v): terminating at "+
4✔
1629
                                "state=%v", c.cfg.ChanPoint, nextState)
4✔
1630
                        return nextState, forceCloseTx, nil
4✔
1631
                }
4✔
1632

1633
                // As the prior state was successfully executed, we can now
1634
                // commit the next state. This ensures that we will re-execute
1635
                // the prior state if anything fails.
1636
                if err := c.log.CommitState(nextState); err != nil {
4✔
1637
                        log.Errorf("ChannelArbitrator(%v): unable to commit "+
×
1638
                                "next state(%v): %v", c.cfg.ChanPoint,
×
1639
                                nextState, err)
×
1640
                        return priorState, nil, err
×
1641
                }
×
1642
                c.state = nextState
4✔
1643
        }
1644
}
1645

1646
// ChainAction is an enum that encompasses all possible on-chain actions
1647
// we'll take for a set of HTLC's.
1648
type ChainAction uint8
1649

1650
const (
1651
        // NoAction is the min chainAction type, indicating that no action
1652
        // needs to be taken for a given HTLC.
1653
        NoAction ChainAction = 0
1654

1655
        // HtlcTimeoutAction indicates that the HTLC will timeout soon. As a
1656
        // result, we should get ready to sweep it on chain after the timeout.
1657
        HtlcTimeoutAction = 1
1658

1659
        // HtlcClaimAction indicates that we should claim the HTLC on chain
1660
        // before its timeout period.
1661
        HtlcClaimAction = 2
1662

1663
        // HtlcFailDustAction indicates that we should fail the upstream HTLC
1664
        // for an outgoing dust HTLC immediately (even before the commitment
1665
        // transaction is confirmed) because it has no output on the commitment
1666
        // transaction. This also includes remote pending outgoing dust HTLCs.
1667
        HtlcFailDustAction = 3
1668

1669
        // HtlcOutgoingWatchAction indicates that we can't yet timeout this
1670
        // HTLC, but we had to go to chain on order to resolve an existing
1671
        // HTLC.  In this case, we'll either: time it out once it expires, or
1672
        // will learn the pre-image if the remote party claims the output. In
1673
        // this case, well add the pre-image to our global store.
1674
        HtlcOutgoingWatchAction = 4
1675

1676
        // HtlcIncomingWatchAction indicates that we don't yet have the
1677
        // pre-image to claim incoming HTLC, but we had to go to chain in order
1678
        // to resolve and existing HTLC. In this case, we'll either: let the
1679
        // other party time it out, or eventually learn of the pre-image, in
1680
        // which case we'll claim on chain.
1681
        HtlcIncomingWatchAction = 5
1682

1683
        // HtlcIncomingDustFinalAction indicates that we should mark an incoming
1684
        // dust htlc as final because it can't be claimed on-chain.
1685
        HtlcIncomingDustFinalAction = 6
1686

1687
        // HtlcFailDanglingAction indicates that we should fail the upstream
1688
        // HTLC for an outgoing HTLC immediately after the commitment
1689
        // transaction has confirmed because it has no corresponding output on
1690
        // the commitment transaction. This category does NOT include any dust
1691
        // HTLCs which are mapped in the "HtlcFailDustAction" category.
1692
        HtlcFailDanglingAction = 7
1693
)
1694

1695
// String returns a human readable string describing a chain action.
1696
func (c ChainAction) String() string {
×
1697
        switch c {
×
1698
        case NoAction:
×
1699
                return "NoAction"
×
1700

1701
        case HtlcTimeoutAction:
×
1702
                return "HtlcTimeoutAction"
×
1703

1704
        case HtlcClaimAction:
×
1705
                return "HtlcClaimAction"
×
1706

1707
        case HtlcFailDustAction:
×
1708
                return "HtlcFailDustAction"
×
1709

1710
        case HtlcOutgoingWatchAction:
×
1711
                return "HtlcOutgoingWatchAction"
×
1712

1713
        case HtlcIncomingWatchAction:
×
1714
                return "HtlcIncomingWatchAction"
×
1715

1716
        case HtlcIncomingDustFinalAction:
×
1717
                return "HtlcIncomingDustFinalAction"
×
1718

1719
        case HtlcFailDanglingAction:
×
1720
                return "HtlcFailDanglingAction"
×
1721

1722
        default:
×
1723
                return "<unknown action>"
×
1724
        }
1725
}
1726

1727
// ChainActionMap is a map of a chain action, to the set of HTLC's that need to
1728
// be acted upon for a given action type. The channel
1729
type ChainActionMap map[ChainAction][]channeldb.HTLC
1730

1731
// Merge merges the passed chain actions with the target chain action map.
1732
func (c ChainActionMap) Merge(actions ChainActionMap) {
4✔
1733
        for chainAction, htlcs := range actions {
8✔
1734
                c[chainAction] = append(c[chainAction], htlcs...)
4✔
1735
        }
4✔
1736
}
1737

1738
// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
1739
// confirmation delta that we need is close, and returns a bool indicating if
1740
// we should go on chain to claim.  We do this rather than waiting up until the
1741
// last minute as we want to ensure that when we *need* (HTLC is timed out) to
1742
// sweep, the commitment is already confirmed.
1743
func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
1744
        broadcastDelta, currentHeight uint32) bool {
4✔
1745

4✔
1746
        // We'll calculate the broadcast cut off for this HTLC. This is the
4✔
1747
        // height that (based on our current fee estimation) we should
4✔
1748
        // broadcast in order to ensure the commitment transaction is confirmed
4✔
1749
        // before the HTLC fully expires.
4✔
1750
        broadcastCutOff := htlc.RefundTimeout - broadcastDelta
4✔
1751

4✔
1752
        log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
4✔
1753
                "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
4✔
1754
                broadcastCutOff, currentHeight)
4✔
1755

4✔
1756
        // TODO(roasbeef): take into account default HTLC delta, don't need to
4✔
1757
        // broadcast immediately
4✔
1758
        //  * can then batch with SINGLE | ANYONECANPAY
4✔
1759

4✔
1760
        // We should on-chain for this HTLC, iff we're within out broadcast
4✔
1761
        // cutoff window.
4✔
1762
        if currentHeight < broadcastCutOff {
8✔
1763
                return false
4✔
1764
        }
4✔
1765

1766
        // In case of incoming htlc we should go to chain.
1767
        if htlc.Incoming {
8✔
1768
                return true
4✔
1769
        }
4✔
1770

1771
        // For htlcs that are result of our initiated payments we give some grace
1772
        // period before force closing the channel. During this time we expect
1773
        // both nodes to connect and give a chance to the other node to send its
1774
        // updates and cancel the htlc.
1775
        // This shouldn't add any security risk as there is no incoming htlc to
1776
        // fulfill at this case and the expectation is that when the channel is
1777
        // active the other node will send update_fail_htlc to remove the htlc
1778
        // without closing the channel. It is up to the user to force close the
1779
        // channel if the peer misbehaves and doesn't send the update_fail_htlc.
1780
        // It is useful when this node is most of the time not online and is
1781
        // likely to miss the time slot where the htlc may be cancelled.
1782
        isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex)
4✔
1783
        upTime := c.cfg.Clock.Now().Sub(c.startTimestamp)
4✔
1784
        return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod
4✔
1785
}
1786

1787
// checkCommitChainActions is called for each new block connected to the end of
1788
// the main chain. Given the new block height, this new method will examine all
1789
// active HTLC's, and determine if we need to go on-chain to claim any of them.
1790
// A map of action -> []htlc is returned, detailing what action (if any) should
1791
// be performed for each HTLC. For timed out HTLC's, once the commitment has
1792
// been sufficiently confirmed, the HTLC's should be canceled backwards. For
1793
// redeemed HTLC's, we should send the pre-image back to the incoming link.
1794
func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
1795
        trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, error) {
4✔
1796

4✔
1797
        // TODO(roasbeef): would need to lock channel? channel totem?
4✔
1798
        //  * race condition if adding and we broadcast, etc
4✔
1799
        //  * or would make each instance sync?
4✔
1800

4✔
1801
        log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
4✔
1802
                "height=%v, in_htlc_count=%v, out_htlc_count=%v",
4✔
1803
                c.cfg.ChanPoint, height,
4✔
1804
                len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
4✔
1805

4✔
1806
        actionMap := make(ChainActionMap)
4✔
1807

4✔
1808
        // First, we'll make an initial pass over the set of incoming and
4✔
1809
        // outgoing HTLC's to decide if we need to go on chain at all.
4✔
1810
        haveChainActions := false
4✔
1811
        for _, htlc := range htlcs.outgoingHTLCs {
8✔
1812
                // We'll need to go on-chain for an outgoing HTLC if it was
4✔
1813
                // never resolved downstream, and it's "close" to timing out.
4✔
1814
                //
4✔
1815
                // TODO(yy): If there's no corresponding incoming HTLC, it
4✔
1816
                // means we are the first hop, hence the payer. This is a
4✔
1817
                // tricky case - unlike a forwarding hop, we don't have an
4✔
1818
                // incoming HTLC that will time out, which means as long as we
4✔
1819
                // can learn the preimage, we can settle the invoice (before it
4✔
1820
                // expires?).
4✔
1821
                toChain := c.shouldGoOnChain(
4✔
1822
                        htlc, c.cfg.OutgoingBroadcastDelta, height,
4✔
1823
                )
4✔
1824

4✔
1825
                if toChain {
8✔
1826
                        // Convert to int64 in case of overflow.
4✔
1827
                        remainingBlocks := int64(htlc.RefundTimeout) -
4✔
1828
                                int64(height)
4✔
1829

4✔
1830
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
4✔
1831
                                "outgoing htlc %x: timeout=%v, amount=%v, "+
4✔
1832
                                "blocks_until_expiry=%v, broadcast_delta=%v",
4✔
1833
                                c.cfg.ChanPoint, htlc.RHash[:],
4✔
1834
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
4✔
1835
                                c.cfg.OutgoingBroadcastDelta,
4✔
1836
                        )
4✔
1837
                }
4✔
1838

1839
                haveChainActions = haveChainActions || toChain
4✔
1840
        }
1841

1842
        for _, htlc := range htlcs.incomingHTLCs {
8✔
1843
                // We'll need to go on-chain to pull an incoming HTLC iff we
4✔
1844
                // know the pre-image and it's close to timing out. We need to
4✔
1845
                // ensure that we claim the funds that are rightfully ours
4✔
1846
                // on-chain.
4✔
1847
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
4✔
1848
                if err != nil {
4✔
1849
                        return nil, err
×
1850
                }
×
1851

1852
                if !preimageAvailable {
8✔
1853
                        continue
4✔
1854
                }
1855

1856
                toChain := c.shouldGoOnChain(
4✔
1857
                        htlc, c.cfg.IncomingBroadcastDelta, height,
4✔
1858
                )
4✔
1859

4✔
1860
                if toChain {
8✔
1861
                        // Convert to int64 in case of overflow.
4✔
1862
                        remainingBlocks := int64(htlc.RefundTimeout) -
4✔
1863
                                int64(height)
4✔
1864

4✔
1865
                        log.Infof("ChannelArbitrator(%v): go to chain for "+
4✔
1866
                                "incoming htlc %x: timeout=%v, amount=%v, "+
4✔
1867
                                "blocks_until_expiry=%v, broadcast_delta=%v",
4✔
1868
                                c.cfg.ChanPoint, htlc.RHash[:],
4✔
1869
                                htlc.RefundTimeout, htlc.Amt, remainingBlocks,
4✔
1870
                                c.cfg.IncomingBroadcastDelta,
4✔
1871
                        )
4✔
1872
                }
4✔
1873

1874
                haveChainActions = haveChainActions || toChain
4✔
1875
        }
1876

1877
        // If we don't have any actions to make, then we'll return an empty
1878
        // action map. We only do this if this was a chain trigger though, as
1879
        // if we're going to broadcast the commitment (or the remote party did)
1880
        // we're *forced* to act on each HTLC.
1881
        if !haveChainActions && trigger == chainTrigger {
8✔
1882
                log.Tracef("ChannelArbitrator(%v): no actions to take at "+
4✔
1883
                        "height=%v", c.cfg.ChanPoint, height)
4✔
1884
                return actionMap, nil
4✔
1885
        }
4✔
1886

1887
        // Now that we know we'll need to go on-chain, we'll examine all of our
1888
        // active outgoing HTLC's to see if we either need to: sweep them after
1889
        // a timeout (then cancel backwards), cancel them backwards
1890
        // immediately, or watch them as they're still active contracts.
1891
        for _, htlc := range htlcs.outgoingHTLCs {
8✔
1892
                switch {
4✔
1893
                // If the HTLC is dust, then we can cancel it backwards
1894
                // immediately as there's no matching contract to arbitrate
1895
                // on-chain. We know the HTLC is dust, if the OutputIndex
1896
                // negative.
1897
                case htlc.OutputIndex < 0:
4✔
1898
                        log.Tracef("ChannelArbitrator(%v): immediately "+
4✔
1899
                                "failing dust htlc=%x", c.cfg.ChanPoint,
4✔
1900
                                htlc.RHash[:])
4✔
1901

4✔
1902
                        actionMap[HtlcFailDustAction] = append(
4✔
1903
                                actionMap[HtlcFailDustAction], htlc,
4✔
1904
                        )
4✔
1905

1906
                // If we don't need to immediately act on this HTLC, then we'll
1907
                // mark it still "live". After we broadcast, we'll monitor it
1908
                // until the HTLC times out to see if we can also redeem it
1909
                // on-chain.
1910
                case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
1911
                        height,
1912
                ):
4✔
1913
                        // TODO(roasbeef): also need to be able to query
4✔
1914
                        // circuit map to see if HTLC hasn't been fully
4✔
1915
                        // resolved
4✔
1916
                        //
4✔
1917
                        //  * can't fail incoming until if outgoing not yet
4✔
1918
                        //  failed
4✔
1919

4✔
1920
                        log.Tracef("ChannelArbitrator(%v): watching chain to "+
4✔
1921
                                "decide action for outgoing htlc=%x",
4✔
1922
                                c.cfg.ChanPoint, htlc.RHash[:])
4✔
1923

4✔
1924
                        actionMap[HtlcOutgoingWatchAction] = append(
4✔
1925
                                actionMap[HtlcOutgoingWatchAction], htlc,
4✔
1926
                        )
4✔
1927

1928
                // Otherwise, we'll update our actionMap to mark that we need
1929
                // to sweep this HTLC on-chain
1930
                default:
4✔
1931
                        log.Tracef("ChannelArbitrator(%v): going on-chain to "+
4✔
1932
                                "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
4✔
1933

4✔
1934
                        actionMap[HtlcTimeoutAction] = append(
4✔
1935
                                actionMap[HtlcTimeoutAction], htlc,
4✔
1936
                        )
4✔
1937
                }
1938
        }
1939

1940
        // Similarly, for each incoming HTLC, now that we need to go on-chain,
1941
        // we'll either: sweep it immediately if we know the pre-image, or
1942
        // observe the output on-chain if we don't In this last, case we'll
1943
        // either learn of it eventually from the outgoing HTLC, or the sender
1944
        // will timeout the HTLC.
1945
        for _, htlc := range htlcs.incomingHTLCs {
8✔
1946
                // If the HTLC is dust, there is no action to be taken.
4✔
1947
                if htlc.OutputIndex < 0 {
8✔
1948
                        log.Debugf("ChannelArbitrator(%v): no resolution "+
4✔
1949
                                "needed for incoming dust htlc=%x",
4✔
1950
                                c.cfg.ChanPoint, htlc.RHash[:])
4✔
1951

4✔
1952
                        actionMap[HtlcIncomingDustFinalAction] = append(
4✔
1953
                                actionMap[HtlcIncomingDustFinalAction], htlc,
4✔
1954
                        )
4✔
1955

4✔
1956
                        continue
4✔
1957
                }
1958

1959
                log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
4✔
1960
                        "action for incoming htlc=%x", c.cfg.ChanPoint,
4✔
1961
                        htlc.RHash[:])
4✔
1962

4✔
1963
                actionMap[HtlcIncomingWatchAction] = append(
4✔
1964
                        actionMap[HtlcIncomingWatchAction], htlc,
4✔
1965
                )
4✔
1966
        }
1967

1968
        return actionMap, nil
4✔
1969
}
1970

1971
// isPreimageAvailable returns whether the hash preimage is available in either
1972
// the preimage cache or the invoice database.
1973
func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
1974
        error) {
4✔
1975

4✔
1976
        // Start by checking the preimage cache for preimages of
4✔
1977
        // forwarded HTLCs.
4✔
1978
        _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
4✔
1979
                hash,
4✔
1980
        )
4✔
1981
        if preimageAvailable {
8✔
1982
                return true, nil
4✔
1983
        }
4✔
1984

1985
        // Then check if we have an invoice that can be settled by this HTLC.
1986
        //
1987
        // TODO(joostjager): Check that there are still more blocks remaining
1988
        // than the invoice cltv delta. We don't want to go to chain only to
1989
        // have the incoming contest resolver decide that we don't want to
1990
        // settle this invoice.
1991
        invoice, err := c.cfg.Registry.LookupInvoice(context.Background(), hash)
4✔
1992
        switch {
4✔
1993
        case err == nil:
4✔
1994
        case errors.Is(err, invoices.ErrInvoiceNotFound) ||
1995
                errors.Is(err, invoices.ErrNoInvoicesCreated):
4✔
1996

4✔
1997
                return false, nil
4✔
1998
        default:
×
1999
                return false, err
×
2000
        }
2001

2002
        preimageAvailable = invoice.Terms.PaymentPreimage != nil
4✔
2003

4✔
2004
        return preimageAvailable, nil
4✔
2005
}
2006

2007
// checkLocalChainActions is similar to checkCommitChainActions, but it also
2008
// examines the set of HTLCs on the remote party's commitment. This allows us
2009
// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
2010
// outgoing HTLCs.
2011
func (c *ChannelArbitrator) checkLocalChainActions(
2012
        height uint32, trigger transitionTrigger,
2013
        activeHTLCs map[HtlcSetKey]htlcSet,
2014
        commitsConfirmed bool) (ChainActionMap, error) {
4✔
2015

4✔
2016
        // First, we'll check our local chain actions as normal. This will only
4✔
2017
        // examine HTLCs on our local commitment (timeout or settle).
4✔
2018
        localCommitActions, err := c.checkCommitChainActions(
4✔
2019
                height, trigger, activeHTLCs[LocalHtlcSet],
4✔
2020
        )
4✔
2021
        if err != nil {
4✔
2022
                return nil, err
×
2023
        }
×
2024

2025
        // Next, we'll examine the remote commitment (and maybe a dangling one)
2026
        // to see if the set difference of our HTLCs is non-empty. If so, then
2027
        // we may need to cancel back some HTLCs if we decide go to chain.
2028
        remoteDanglingActions := c.checkRemoteDanglingActions(
4✔
2029
                height, activeHTLCs, commitsConfirmed,
4✔
2030
        )
4✔
2031

4✔
2032
        // Finally, we'll merge the two set of chain actions.
4✔
2033
        localCommitActions.Merge(remoteDanglingActions)
4✔
2034

4✔
2035
        return localCommitActions, nil
4✔
2036
}
2037

2038
// checkRemoteDanglingActions examines the set of remote commitments for any
2039
// HTLCs that are close to timing out. If we find any, then we'll return a set
2040
// of chain actions for HTLCs that are on our commitment, but not theirs to
2041
// cancel immediately.
2042
func (c *ChannelArbitrator) checkRemoteDanglingActions(
2043
        height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
2044
        commitsConfirmed bool) ChainActionMap {
4✔
2045

4✔
2046
        var (
4✔
2047
                pendingRemoteHTLCs []channeldb.HTLC
4✔
2048
                localHTLCs         = make(map[uint64]struct{})
4✔
2049
                remoteHTLCs        = make(map[uint64]channeldb.HTLC)
4✔
2050
                actionMap          = make(ChainActionMap)
4✔
2051
        )
4✔
2052

4✔
2053
        // First, we'll construct two sets of the outgoing HTLCs: those on our
4✔
2054
        // local commitment, and those that are on the remote commitment(s).
4✔
2055
        for htlcSetKey, htlcs := range activeHTLCs {
8✔
2056
                if htlcSetKey.IsRemote {
8✔
2057
                        for _, htlc := range htlcs.outgoingHTLCs {
8✔
2058
                                remoteHTLCs[htlc.HtlcIndex] = htlc
4✔
2059
                        }
4✔
2060
                } else {
4✔
2061
                        for _, htlc := range htlcs.outgoingHTLCs {
8✔
2062
                                localHTLCs[htlc.HtlcIndex] = struct{}{}
4✔
2063
                        }
4✔
2064
                }
2065
        }
2066

2067
        // With both sets constructed, we'll now compute the set difference of
2068
        // our two sets of HTLCs. This'll give us the HTLCs that exist on the
2069
        // remote commitment transaction, but not on ours.
2070
        for htlcIndex, htlc := range remoteHTLCs {
8✔
2071
                if _, ok := localHTLCs[htlcIndex]; ok {
8✔
2072
                        continue
4✔
2073
                }
2074

2075
                pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
4✔
2076
        }
2077

2078
        // Finally, we'll examine all the pending remote HTLCs for those that
2079
        // have expired. If we find any, then we'll recommend that they be
2080
        // failed now so we can free up the incoming HTLC.
2081
        for _, htlc := range pendingRemoteHTLCs {
8✔
2082
                // We'll now check if we need to go to chain in order to cancel
4✔
2083
                // the incoming HTLC.
4✔
2084
                goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
4✔
2085
                        height,
4✔
2086
                )
4✔
2087

4✔
2088
                // If we don't need to go to chain, and no commitments have
4✔
2089
                // been confirmed, then we can move on. Otherwise, if
4✔
2090
                // commitments have been confirmed, then we need to cancel back
4✔
2091
                // *all* of the pending remote HTLCS.
4✔
2092
                if !goToChain && !commitsConfirmed {
8✔
2093
                        continue
4✔
2094
                }
2095

2096
                // Dust htlcs can be canceled back even before the commitment
2097
                // transaction confirms. Dust htlcs are not enforceable onchain.
2098
                // If another version of the commit tx would confirm we either
2099
                // gain or lose those dust amounts but there is no other way
2100
                // than cancelling the incoming back because we will never learn
2101
                // the preimage.
2102
                if htlc.OutputIndex < 0 {
4✔
2103
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2104
                                "htlc=%x from local/remote commitments diff",
×
2105
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2106

×
2107
                        actionMap[HtlcFailDustAction] = append(
×
2108
                                actionMap[HtlcFailDustAction], htlc,
×
2109
                        )
×
2110

×
2111
                        continue
×
2112
                }
2113

2114
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
4✔
2115
                        "local/remote commitments diff",
4✔
2116
                        c.cfg.ChanPoint, htlc.RHash[:])
4✔
2117

4✔
2118
                actionMap[HtlcFailDanglingAction] = append(
4✔
2119
                        actionMap[HtlcFailDanglingAction], htlc,
4✔
2120
                )
4✔
2121
        }
2122

2123
        return actionMap
4✔
2124
}
2125

2126
// checkRemoteChainActions examines the two possible remote commitment chains
2127
// and returns the set of chain actions we need to carry out if the remote
2128
// commitment (non pending) confirms. The pendingConf indicates if the pending
2129
// remote commitment confirmed. This is similar to checkCommitChainActions, but
2130
// we'll immediately fail any HTLCs on the pending remote commit, but not the
2131
// remote commit (or the other way around).
2132
func (c *ChannelArbitrator) checkRemoteChainActions(
2133
        height uint32, trigger transitionTrigger,
2134
        activeHTLCs map[HtlcSetKey]htlcSet,
2135
        pendingConf bool) (ChainActionMap, error) {
4✔
2136

4✔
2137
        // First, we'll examine all the normal chain actions on the remote
4✔
2138
        // commitment that confirmed.
4✔
2139
        confHTLCs := activeHTLCs[RemoteHtlcSet]
4✔
2140
        if pendingConf {
4✔
2141
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
×
2142
        }
×
2143
        remoteCommitActions, err := c.checkCommitChainActions(
4✔
2144
                height, trigger, confHTLCs,
4✔
2145
        )
4✔
2146
        if err != nil {
4✔
2147
                return nil, err
×
2148
        }
×
2149

2150
        // With these actions computed, we'll now check the diff of the HTLCs on
2151
        // the commitments, and cancel back any that are on the pending but not
2152
        // the non-pending.
2153
        remoteDiffActions := c.checkRemoteDiffActions(
4✔
2154
                activeHTLCs, pendingConf,
4✔
2155
        )
4✔
2156

4✔
2157
        // Finally, we'll merge all the chain actions and the final set of
4✔
2158
        // chain actions.
4✔
2159
        remoteCommitActions.Merge(remoteDiffActions)
4✔
2160
        return remoteCommitActions, nil
4✔
2161
}
2162

2163
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
2164
// confirmed commit and remote pending commit for HTLCS that we need to cancel
2165
// back. If we find any HTLCs on the remote pending but not the remote, then
2166
// we'll mark them to be failed immediately.
2167
func (c *ChannelArbitrator) checkRemoteDiffActions(
2168
        activeHTLCs map[HtlcSetKey]htlcSet,
2169
        pendingConf bool) ChainActionMap {
4✔
2170

4✔
2171
        // First, we'll partition the HTLCs into those that are present on the
4✔
2172
        // confirmed commitment, and those on the dangling commitment.
4✔
2173
        confHTLCs := activeHTLCs[RemoteHtlcSet]
4✔
2174
        danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
4✔
2175
        if pendingConf {
4✔
2176
                confHTLCs = activeHTLCs[RemotePendingHtlcSet]
×
2177
                danglingHTLCs = activeHTLCs[RemoteHtlcSet]
×
2178
        }
×
2179

2180
        // Next, we'll create a set of all the HTLCs confirmed commitment.
2181
        remoteHtlcs := make(map[uint64]struct{})
4✔
2182
        for _, htlc := range confHTLCs.outgoingHTLCs {
8✔
2183
                remoteHtlcs[htlc.HtlcIndex] = struct{}{}
4✔
2184
        }
4✔
2185

2186
        // With the remote HTLCs assembled, we'll mark any HTLCs only on the
2187
        // remote pending commitment to be failed asap.
2188
        actionMap := make(ChainActionMap)
4✔
2189
        for _, htlc := range danglingHTLCs.outgoingHTLCs {
8✔
2190
                if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
8✔
2191
                        continue
4✔
2192
                }
2193

2194
                preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
×
2195
                if err != nil {
×
2196
                        log.Errorf("ChannelArbitrator(%v): failed to query "+
×
2197
                                "preimage for dangling htlc=%x from remote "+
×
2198
                                "commitments diff", c.cfg.ChanPoint,
×
2199
                                htlc.RHash[:])
×
2200

×
2201
                        continue
×
2202
                }
2203

2204
                if preimageAvailable {
×
2205
                        continue
×
2206
                }
2207

2208
                // Dust HTLCs on the remote commitment can be failed back.
2209
                if htlc.OutputIndex < 0 {
×
2210
                        log.Infof("ChannelArbitrator(%v): fail dangling dust "+
×
2211
                                "htlc=%x from remote commitments diff",
×
2212
                                c.cfg.ChanPoint, htlc.RHash[:])
×
2213

×
2214
                        actionMap[HtlcFailDustAction] = append(
×
2215
                                actionMap[HtlcFailDustAction], htlc,
×
2216
                        )
×
2217

×
2218
                        continue
×
2219
                }
2220

2221
                actionMap[HtlcFailDanglingAction] = append(
×
2222
                        actionMap[HtlcFailDanglingAction], htlc,
×
2223
                )
×
2224

×
2225
                log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
×
2226
                        "remote commitments diff",
×
2227
                        c.cfg.ChanPoint, htlc.RHash[:])
×
2228
        }
2229

2230
        return actionMap
4✔
2231
}
2232

2233
// constructChainActions returns the set of actions that should be taken for
2234
// confirmed HTLCs at the specified height. Our actions will depend on the set
2235
// of HTLCs that were active across all channels at the time of channel
2236
// closure.
2237
func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
2238
        height uint32, trigger transitionTrigger) (ChainActionMap, error) {
4✔
2239

4✔
2240
        // If we've reached this point and have not confirmed commitment set,
4✔
2241
        // then this is an older node that had a pending close channel before
4✔
2242
        // the CommitSet was introduced. In this case, we'll just return the
4✔
2243
        // existing ChainActionMap they had on disk.
4✔
2244
        if confCommitSet == nil || confCommitSet.ConfCommitKey.IsNone() {
4✔
2245
                return c.log.FetchChainActions()
×
2246
        }
×
2247

2248
        // Otherwise, we have the full commitment set written to disk, and can
2249
        // proceed as normal.
2250
        htlcSets := confCommitSet.toActiveHTLCSets()
4✔
2251
        confCommitKey, err := confCommitSet.ConfCommitKey.UnwrapOrErr(
4✔
2252
                fmt.Errorf("no commitKey available"),
4✔
2253
        )
4✔
2254
        if err != nil {
4✔
2255
                return nil, err
×
2256
        }
×
2257

2258
        switch confCommitKey {
4✔
2259
        // If the local commitment transaction confirmed, then we'll examine
2260
        // that as well as their commitments to the set of chain actions.
2261
        case LocalHtlcSet:
4✔
2262
                return c.checkLocalChainActions(
4✔
2263
                        height, trigger, htlcSets, true,
4✔
2264
                )
4✔
2265

2266
        // If the remote commitment confirmed, then we'll grab all the chain
2267
        // actions for the remote commit, and check the pending commit for any
2268
        // HTLCS we need to handle immediately (dust).
2269
        case RemoteHtlcSet:
4✔
2270
                return c.checkRemoteChainActions(
4✔
2271
                        height, trigger, htlcSets, false,
4✔
2272
                )
4✔
2273

2274
        // Otherwise, the remote pending commitment confirmed, so we'll examine
2275
        // the HTLCs on that unrevoked dangling commitment.
2276
        case RemotePendingHtlcSet:
×
2277
                return c.checkRemoteChainActions(
×
2278
                        height, trigger, htlcSets, true,
×
2279
                )
×
2280
        }
2281

2282
        return nil, fmt.Errorf("unable to locate chain actions")
×
2283
}
2284

2285
// prepContractResolutions is called either in the case that we decide we need
2286
// to go to chain, or the remote party goes to chain. Given a set of actions we
2287
// need to take for each HTLC, this method will return a set of contract
2288
// resolvers that will resolve the contracts on-chain if needed, and also a set
2289
// of packets to send to the htlcswitch in order to ensure all incoming HTLC's
2290
// are properly resolved.
2291
func (c *ChannelArbitrator) prepContractResolutions(
2292
        contractResolutions *ContractResolutions, height uint32,
2293
        htlcActions ChainActionMap) ([]ContractResolver, error) {
4✔
2294

4✔
2295
        // We'll also fetch the historical state of this channel, as it should
4✔
2296
        // have been marked as closed by now, and supplement it to each resolver
4✔
2297
        // such that we can properly resolve our pending contracts.
4✔
2298
        var chanState *channeldb.OpenChannel
4✔
2299
        chanState, err := c.cfg.FetchHistoricalChannel()
4✔
2300
        switch {
4✔
2301
        // If we don't find this channel, then it may be the case that it
2302
        // was closed before we started to retain the final state
2303
        // information for open channels.
2304
        case err == channeldb.ErrNoHistoricalBucket:
×
2305
                fallthrough
×
2306
        case err == channeldb.ErrChannelNotFound:
×
2307
                log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
×
2308
                        "state", c.cfg.ChanPoint)
×
2309

2310
        case err != nil:
×
2311
                return nil, err
×
2312
        }
2313

2314
        incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
4✔
2315
        outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
4✔
2316

4✔
2317
        // We'll use these two maps to quickly look up an active HTLC with its
4✔
2318
        // matching HTLC resolution.
4✔
2319
        outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution)
4✔
2320
        inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution)
4✔
2321
        for i := 0; i < len(incomingResolutions); i++ {
8✔
2322
                inRes := incomingResolutions[i]
4✔
2323
                inResolutionMap[inRes.HtlcPoint()] = inRes
4✔
2324
        }
4✔
2325
        for i := 0; i < len(outgoingResolutions); i++ {
8✔
2326
                outRes := outgoingResolutions[i]
4✔
2327
                outResolutionMap[outRes.HtlcPoint()] = outRes
4✔
2328
        }
4✔
2329

2330
        // We'll create the resolver kit that we'll be cloning for each
2331
        // resolver so they each can do their duty.
2332
        resolverCfg := ResolverConfig{
4✔
2333
                ChannelArbitratorConfig: c.cfg,
4✔
2334
                Checkpoint: func(res ContractResolver,
4✔
2335
                        reports ...*channeldb.ResolverReport) error {
8✔
2336

4✔
2337
                        return c.log.InsertUnresolvedContracts(reports, res)
4✔
2338
                },
4✔
2339
        }
2340

2341
        commitHash := contractResolutions.CommitHash
4✔
2342

4✔
2343
        var htlcResolvers []ContractResolver
4✔
2344

4✔
2345
        // We instantiate an anchor resolver if the commitment tx has an
4✔
2346
        // anchor.
4✔
2347
        if contractResolutions.AnchorResolution != nil {
8✔
2348
                anchorResolver := newAnchorResolver(
4✔
2349
                        contractResolutions.AnchorResolution.AnchorSignDescriptor,
4✔
2350
                        contractResolutions.AnchorResolution.CommitAnchor,
4✔
2351
                        height, c.cfg.ChanPoint, resolverCfg,
4✔
2352
                )
4✔
2353
                anchorResolver.SupplementState(chanState)
4✔
2354

4✔
2355
                htlcResolvers = append(htlcResolvers, anchorResolver)
4✔
2356
        }
4✔
2357

2358
        // If this is a breach close, we'll create a breach resolver, determine
2359
        // the htlc's to fail back, and exit. This is done because the other
2360
        // steps taken for non-breach-closes do not matter for breach-closes.
2361
        if contractResolutions.BreachResolution != nil {
8✔
2362
                breachResolver := newBreachResolver(resolverCfg)
4✔
2363
                htlcResolvers = append(htlcResolvers, breachResolver)
4✔
2364

4✔
2365
                return htlcResolvers, nil
4✔
2366
        }
4✔
2367

2368
        // For each HTLC, we'll either act immediately, meaning we'll instantly
2369
        // fail the HTLC, or we'll act only once the transaction has been
2370
        // confirmed, in which case we'll need an HTLC resolver.
2371
        for htlcAction, htlcs := range htlcActions {
8✔
2372
                switch htlcAction {
4✔
2373
                // If we can claim this HTLC, we'll create an HTLC resolver to
2374
                // claim the HTLC (second-level or directly), then add the pre
2375
                case HtlcClaimAction:
×
2376
                        for _, htlc := range htlcs {
×
2377
                                htlc := htlc
×
2378

×
2379
                                htlcOp := wire.OutPoint{
×
2380
                                        Hash:  commitHash,
×
2381
                                        Index: uint32(htlc.OutputIndex),
×
2382
                                }
×
2383

×
2384
                                resolution, ok := inResolutionMap[htlcOp]
×
2385
                                if !ok {
×
2386
                                        // TODO(roasbeef): panic?
×
2387
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2388
                                                "incoming resolution: %v",
×
2389
                                                c.cfg.ChanPoint, htlcOp)
×
2390
                                        continue
×
2391
                                }
2392

2393
                                resolver := newSuccessResolver(
×
2394
                                        resolution, height, htlc, resolverCfg,
×
2395
                                )
×
2396
                                if chanState != nil {
×
2397
                                        resolver.SupplementState(chanState)
×
2398
                                }
×
2399
                                htlcResolvers = append(htlcResolvers, resolver)
×
2400
                        }
2401

2402
                // If we can timeout the HTLC directly, then we'll create the
2403
                // proper resolver to do so, who will then cancel the packet
2404
                // backwards.
2405
                case HtlcTimeoutAction:
4✔
2406
                        for _, htlc := range htlcs {
8✔
2407
                                htlc := htlc
4✔
2408

4✔
2409
                                htlcOp := wire.OutPoint{
4✔
2410
                                        Hash:  commitHash,
4✔
2411
                                        Index: uint32(htlc.OutputIndex),
4✔
2412
                                }
4✔
2413

4✔
2414
                                resolution, ok := outResolutionMap[htlcOp]
4✔
2415
                                if !ok {
4✔
2416
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2417
                                                "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
×
2418
                                        continue
×
2419
                                }
2420

2421
                                resolver := newTimeoutResolver(
4✔
2422
                                        resolution, height, htlc, resolverCfg,
4✔
2423
                                )
4✔
2424
                                if chanState != nil {
8✔
2425
                                        resolver.SupplementState(chanState)
4✔
2426
                                }
4✔
2427

2428
                                // For outgoing HTLCs, we will also need to
2429
                                // supplement the resolver with the expiry
2430
                                // block height of its corresponding incoming
2431
                                // HTLC.
2432
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
4✔
2433
                                resolver.SupplementDeadline(deadline)
4✔
2434

4✔
2435
                                htlcResolvers = append(htlcResolvers, resolver)
4✔
2436
                        }
2437

2438
                // If this is an incoming HTLC, but we can't act yet, then
2439
                // we'll create an incoming resolver to redeem the HTLC if we
2440
                // learn of the pre-image, or let the remote party time out.
2441
                case HtlcIncomingWatchAction:
4✔
2442
                        for _, htlc := range htlcs {
8✔
2443
                                htlc := htlc
4✔
2444

4✔
2445
                                htlcOp := wire.OutPoint{
4✔
2446
                                        Hash:  commitHash,
4✔
2447
                                        Index: uint32(htlc.OutputIndex),
4✔
2448
                                }
4✔
2449

4✔
2450
                                // TODO(roasbeef): need to handle incoming dust...
4✔
2451

4✔
2452
                                // TODO(roasbeef): can't be negative!!!
4✔
2453
                                resolution, ok := inResolutionMap[htlcOp]
4✔
2454
                                if !ok {
4✔
2455
                                        log.Errorf("ChannelArbitrator(%v) unable to find "+
×
2456
                                                "incoming resolution: %v",
×
2457
                                                c.cfg.ChanPoint, htlcOp)
×
2458
                                        continue
×
2459
                                }
2460

2461
                                resolver := newIncomingContestResolver(
4✔
2462
                                        resolution, height, htlc,
4✔
2463
                                        resolverCfg,
4✔
2464
                                )
4✔
2465
                                if chanState != nil {
8✔
2466
                                        resolver.SupplementState(chanState)
4✔
2467
                                }
4✔
2468
                                htlcResolvers = append(htlcResolvers, resolver)
4✔
2469
                        }
2470

2471
                // Finally, if this is an outgoing HTLC we've sent, then we'll
2472
                // launch a resolver to watch for the pre-image (and settle
2473
                // backwards), or just timeout.
2474
                case HtlcOutgoingWatchAction:
4✔
2475
                        for _, htlc := range htlcs {
8✔
2476
                                htlc := htlc
4✔
2477

4✔
2478
                                htlcOp := wire.OutPoint{
4✔
2479
                                        Hash:  commitHash,
4✔
2480
                                        Index: uint32(htlc.OutputIndex),
4✔
2481
                                }
4✔
2482

4✔
2483
                                resolution, ok := outResolutionMap[htlcOp]
4✔
2484
                                if !ok {
4✔
2485
                                        log.Errorf("ChannelArbitrator(%v) "+
×
2486
                                                "unable to find outgoing "+
×
2487
                                                "resolution: %v",
×
2488
                                                c.cfg.ChanPoint, htlcOp)
×
2489

×
2490
                                        continue
×
2491
                                }
2492

2493
                                resolver := newOutgoingContestResolver(
4✔
2494
                                        resolution, height, htlc, resolverCfg,
4✔
2495
                                )
4✔
2496
                                if chanState != nil {
8✔
2497
                                        resolver.SupplementState(chanState)
4✔
2498
                                }
4✔
2499

2500
                                // For outgoing HTLCs, we will also need to
2501
                                // supplement the resolver with the expiry
2502
                                // block height of its corresponding incoming
2503
                                // HTLC.
2504
                                deadline := c.cfg.FindOutgoingHTLCDeadline(htlc)
4✔
2505
                                resolver.SupplementDeadline(deadline)
4✔
2506

4✔
2507
                                htlcResolvers = append(htlcResolvers, resolver)
4✔
2508
                        }
2509
                }
2510
        }
2511

2512
        // If this is was an unilateral closure, then we'll also create a
2513
        // resolver to sweep our commitment output (but only if it wasn't
2514
        // trimmed).
2515
        if contractResolutions.CommitResolution != nil {
8✔
2516
                resolver := newCommitSweepResolver(
4✔
2517
                        *contractResolutions.CommitResolution, height,
4✔
2518
                        c.cfg.ChanPoint, resolverCfg,
4✔
2519
                )
4✔
2520
                if chanState != nil {
8✔
2521
                        resolver.SupplementState(chanState)
4✔
2522
                }
4✔
2523
                htlcResolvers = append(htlcResolvers, resolver)
4✔
2524
        }
2525

2526
        return htlcResolvers, nil
4✔
2527
}
2528

2529
// replaceResolver replaces a in the list of active resolvers. If the resolver
2530
// to be replaced is not found, it returns an error.
2531
func (c *ChannelArbitrator) replaceResolver(oldResolver,
2532
        newResolver ContractResolver) error {
4✔
2533

4✔
2534
        c.activeResolversLock.Lock()
4✔
2535
        defer c.activeResolversLock.Unlock()
4✔
2536

4✔
2537
        oldKey := oldResolver.ResolverKey()
4✔
2538
        for i, r := range c.activeResolvers {
8✔
2539
                if bytes.Equal(r.ResolverKey(), oldKey) {
8✔
2540
                        c.activeResolvers[i] = newResolver
4✔
2541
                        return nil
4✔
2542
                }
4✔
2543
        }
2544

2545
        return errors.New("resolver to be replaced not found")
×
2546
}
2547

2548
// resolveContract is a goroutine tasked with fully resolving an unresolved
2549
// contract. Either the initial contract will be resolved after a single step,
2550
// or the contract will itself create another contract to be resolved. In
2551
// either case, one the contract has been fully resolved, we'll signal back to
2552
// the main goroutine so it can properly keep track of the set of unresolved
2553
// contracts.
2554
//
2555
// NOTE: This MUST be run as a goroutine.
2556
func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
2557
        immediate bool) {
4✔
2558

4✔
2559
        defer c.wg.Done()
4✔
2560

4✔
2561
        log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
4✔
2562
                c.cfg.ChanPoint, currentContract)
4✔
2563

4✔
2564
        // Until the contract is fully resolved, we'll continue to iteratively
4✔
2565
        // resolve the contract one step at a time.
4✔
2566
        for !currentContract.IsResolved() {
8✔
2567
                log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
4✔
2568
                        c.cfg.ChanPoint, currentContract)
4✔
2569

4✔
2570
                select {
4✔
2571

2572
                // If we've been signalled to quit, then we'll exit early.
2573
                case <-c.quit:
×
2574
                        return
×
2575

2576
                default:
4✔
2577
                        // Otherwise, we'll attempt to resolve the current
4✔
2578
                        // contract.
4✔
2579
                        nextContract, err := currentContract.Resolve(immediate)
4✔
2580
                        if err != nil {
8✔
2581
                                if err == errResolverShuttingDown {
8✔
2582
                                        return
4✔
2583
                                }
4✔
2584

2585
                                log.Errorf("ChannelArbitrator(%v): unable to "+
4✔
2586
                                        "progress %T: %v",
4✔
2587
                                        c.cfg.ChanPoint, currentContract, err)
4✔
2588
                                return
4✔
2589
                        }
2590

2591
                        switch {
4✔
2592
                        // If this contract produced another, then this means
2593
                        // the current contract was only able to be partially
2594
                        // resolved in this step. So we'll do a contract swap
2595
                        // within our logs: the new contract will take the
2596
                        // place of the old one.
2597
                        case nextContract != nil:
4✔
2598
                                log.Debugf("ChannelArbitrator(%v): swapping "+
4✔
2599
                                        "out contract %T for %T ",
4✔
2600
                                        c.cfg.ChanPoint, currentContract,
4✔
2601
                                        nextContract)
4✔
2602

4✔
2603
                                // Swap contract in log.
4✔
2604
                                err := c.log.SwapContract(
4✔
2605
                                        currentContract, nextContract,
4✔
2606
                                )
4✔
2607
                                if err != nil {
4✔
2608
                                        log.Errorf("unable to add recurse "+
×
2609
                                                "contract: %v", err)
×
2610
                                }
×
2611

2612
                                // Swap contract in resolvers list. This is to
2613
                                // make sure that reports are queried from the
2614
                                // new resolver.
2615
                                err = c.replaceResolver(
4✔
2616
                                        currentContract, nextContract,
4✔
2617
                                )
4✔
2618
                                if err != nil {
4✔
2619
                                        log.Errorf("unable to replace "+
×
2620
                                                "contract: %v", err)
×
2621
                                }
×
2622

2623
                                // As this contract produced another, we'll
2624
                                // re-assign, so we can continue our resolution
2625
                                // loop.
2626
                                currentContract = nextContract
4✔
2627

2628
                        // If this contract is actually fully resolved, then
2629
                        // we'll mark it as such within the database.
2630
                        case currentContract.IsResolved():
4✔
2631
                                log.Debugf("ChannelArbitrator(%v): marking "+
4✔
2632
                                        "contract %T fully resolved",
4✔
2633
                                        c.cfg.ChanPoint, currentContract)
4✔
2634

4✔
2635
                                err := c.log.ResolveContract(currentContract)
4✔
2636
                                if err != nil {
4✔
2637
                                        log.Errorf("unable to resolve contract: %v",
×
2638
                                                err)
×
2639
                                }
×
2640

2641
                                // Now that the contract has been resolved,
2642
                                // well signal to the main goroutine.
2643
                                select {
4✔
2644
                                case c.resolutionSignal <- struct{}{}:
4✔
2645
                                case <-c.quit:
4✔
2646
                                        return
4✔
2647
                                }
2648
                        }
2649

2650
                }
2651
        }
2652
}
2653

2654
// signalUpdateMsg is a struct that carries fresh signals to the
2655
// ChannelArbitrator. We need to receive a message like this each time the
2656
// channel becomes active, as it's internal state may change.
2657
type signalUpdateMsg struct {
2658
        // newSignals is the set of new active signals to be sent to the
2659
        // arbitrator.
2660
        newSignals *ContractSignals
2661

2662
        // doneChan is a channel that will be closed on the arbitrator has
2663
        // attached the new signals.
2664
        doneChan chan struct{}
2665
}
2666

2667
// UpdateContractSignals updates the set of signals the ChannelArbitrator needs
2668
// to receive from a channel in real-time in order to keep in sync with the
2669
// latest state of the contract.
2670
func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) {
4✔
2671
        done := make(chan struct{})
4✔
2672

4✔
2673
        select {
4✔
2674
        case c.signalUpdates <- &signalUpdateMsg{
2675
                newSignals: newSignals,
2676
                doneChan:   done,
2677
        }:
4✔
2678
        case <-c.quit:
×
2679
        }
2680

2681
        select {
4✔
2682
        case <-done:
4✔
2683
        case <-c.quit:
×
2684
        }
2685
}
2686

2687
// notifyContractUpdate updates the ChannelArbitrator's unmerged mappings such
2688
// that it can later be merged with activeHTLCs when calling
2689
// checkLocalChainActions or sweepAnchors. These are the only two places that
2690
// activeHTLCs is used.
2691
func (c *ChannelArbitrator) notifyContractUpdate(upd *ContractUpdate) {
4✔
2692
        c.unmergedMtx.Lock()
4✔
2693
        defer c.unmergedMtx.Unlock()
4✔
2694

4✔
2695
        // Update the mapping.
4✔
2696
        c.unmergedSet[upd.HtlcKey] = newHtlcSet(upd.Htlcs)
4✔
2697

4✔
2698
        log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
4✔
2699
                c.cfg.ChanPoint, lnutils.SpewLogClosure(upd))
4✔
2700
}
4✔
2701

2702
// updateActiveHTLCs merges the unmerged set of HTLCs from the link with
2703
// activeHTLCs.
2704
func (c *ChannelArbitrator) updateActiveHTLCs() {
4✔
2705
        c.unmergedMtx.RLock()
4✔
2706
        defer c.unmergedMtx.RUnlock()
4✔
2707

4✔
2708
        // Update the mapping.
4✔
2709
        c.activeHTLCs[LocalHtlcSet] = c.unmergedSet[LocalHtlcSet]
4✔
2710
        c.activeHTLCs[RemoteHtlcSet] = c.unmergedSet[RemoteHtlcSet]
4✔
2711

4✔
2712
        // If the pending set exists, update that as well.
4✔
2713
        if _, ok := c.unmergedSet[RemotePendingHtlcSet]; ok {
8✔
2714
                pendingSet := c.unmergedSet[RemotePendingHtlcSet]
4✔
2715
                c.activeHTLCs[RemotePendingHtlcSet] = pendingSet
4✔
2716
        }
4✔
2717
}
2718

2719
// channelAttendant is the primary goroutine that acts at the judicial
2720
// arbitrator between our channel state, the remote channel peer, and the
2721
// blockchain (Our judge). This goroutine will ensure that we faithfully execute
2722
// all clauses of our contract in the case that we need to go on-chain for a
2723
// dispute. Currently, two such conditions warrant our intervention: when an
2724
// outgoing HTLC is about to timeout, and when we know the pre-image for an
2725
// incoming HTLC, but it hasn't yet been settled off-chain. In these cases,
2726
// we'll: broadcast our commitment, cancel/settle any HTLC's backwards after
2727
// sufficient confirmation, and finally send our set of outputs to the UTXO
2728
// Nursery for incubation, and ultimate sweeping.
2729
//
2730
// NOTE: This MUST be run as a goroutine.
2731
//
2732
//nolint:funlen
2733
func (c *ChannelArbitrator) channelAttendant(bestHeight int32,
2734
        commitSet *CommitSet) {
4✔
2735

4✔
2736
        // TODO(roasbeef): tell top chain arb we're done
4✔
2737
        defer func() {
8✔
2738
                c.wg.Done()
4✔
2739
        }()
4✔
2740

2741
        err := c.progressStateMachineAfterRestart(bestHeight, commitSet)
4✔
2742
        if err != nil {
4✔
2743
                // In case of an error, we return early but we do not shutdown
×
2744
                // LND, because there might be other channels that still can be
×
2745
                // resolved and we don't want to interfere with that.
×
2746
                // We continue to run the channel attendant in case the channel
×
2747
                // closes via other means for example the remote pary force
×
2748
                // closes the channel. So we log the error and continue.
×
2749
                log.Errorf("Unable to progress state machine after "+
×
2750
                        "restart: %v", err)
×
2751
        }
×
2752

2753
        for {
8✔
2754
                select {
4✔
2755

2756
                // A new block has arrived, we'll examine all the active HTLC's
2757
                // to see if any of them have expired, and also update our
2758
                // track of the best current height.
2759
                case blockHeight, ok := <-c.blocks:
4✔
2760
                        if !ok {
8✔
2761
                                return
4✔
2762
                        }
4✔
2763
                        bestHeight = blockHeight
4✔
2764

4✔
2765
                        // If we're not in the default state, then we can
4✔
2766
                        // ignore this signal as we're waiting for contract
4✔
2767
                        // resolution.
4✔
2768
                        if c.state != StateDefault {
8✔
2769
                                continue
4✔
2770
                        }
2771

2772
                        // Now that a new block has arrived, we'll attempt to
2773
                        // advance our state forward.
2774
                        nextState, _, err := c.advanceState(
4✔
2775
                                uint32(bestHeight), chainTrigger, nil,
4✔
2776
                        )
4✔
2777
                        if err != nil {
4✔
2778
                                log.Errorf("Unable to advance state: %v", err)
×
2779
                        }
×
2780

2781
                        // If as a result of this trigger, the contract is
2782
                        // fully resolved, then well exit.
2783
                        if nextState == StateFullyResolved {
4✔
2784
                                return
×
2785
                        }
×
2786

2787
                // A new signal update was just sent. This indicates that the
2788
                // channel under watch is now live, and may modify its internal
2789
                // state, so we'll get the most up to date signals to we can
2790
                // properly do our job.
2791
                case signalUpdate := <-c.signalUpdates:
4✔
2792
                        log.Tracef("ChannelArbitrator(%v): got new signal "+
4✔
2793
                                "update!", c.cfg.ChanPoint)
4✔
2794

4✔
2795
                        // We'll update the ShortChannelID.
4✔
2796
                        c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
4✔
2797

4✔
2798
                        // Now that the signal has been updated, we'll now
4✔
2799
                        // close the done channel to signal to the caller we've
4✔
2800
                        // registered the new ShortChannelID.
4✔
2801
                        close(signalUpdate.doneChan)
4✔
2802

2803
                // We've cooperatively closed the channel, so we're no longer
2804
                // needed. We'll mark the channel as resolved and exit.
2805
                case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
4✔
2806
                        log.Infof("ChannelArbitrator(%v) marking channel "+
4✔
2807
                                "cooperatively closed", c.cfg.ChanPoint)
4✔
2808

4✔
2809
                        err := c.cfg.MarkChannelClosed(
4✔
2810
                                closeInfo.ChannelCloseSummary,
4✔
2811
                                channeldb.ChanStatusCoopBroadcasted,
4✔
2812
                        )
4✔
2813
                        if err != nil {
4✔
2814
                                log.Errorf("Unable to mark channel closed: "+
×
2815
                                        "%v", err)
×
2816
                                return
×
2817
                        }
×
2818

2819
                        // We'll now advance our state machine until it reaches
2820
                        // a terminal state, and the channel is marked resolved.
2821
                        _, _, err = c.advanceState(
4✔
2822
                                closeInfo.CloseHeight, coopCloseTrigger, nil,
4✔
2823
                        )
4✔
2824
                        if err != nil {
4✔
2825
                                log.Errorf("Unable to advance state: %v", err)
×
2826
                                return
×
2827
                        }
×
2828

2829
                // We have broadcasted our commitment, and it is now confirmed
2830
                // on-chain.
2831
                case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
4✔
2832
                        log.Infof("ChannelArbitrator(%v): local on-chain "+
4✔
2833
                                "channel close", c.cfg.ChanPoint)
4✔
2834

4✔
2835
                        if c.state != StateCommitmentBroadcasted {
8✔
2836
                                log.Errorf("ChannelArbitrator(%v): unexpected "+
4✔
2837
                                        "local on-chain channel close",
4✔
2838
                                        c.cfg.ChanPoint)
4✔
2839
                        }
4✔
2840
                        closeTx := closeInfo.CloseTx
4✔
2841

4✔
2842
                        resolutions, err := closeInfo.ContractResolutions.
4✔
2843
                                UnwrapOrErr(
4✔
2844
                                        fmt.Errorf("resolutions not found"),
4✔
2845
                                )
4✔
2846
                        if err != nil {
4✔
2847
                                log.Errorf("ChannelArbitrator(%v): unable to "+
×
2848
                                        "get resolutions: %v", c.cfg.ChanPoint,
×
2849
                                        err)
×
2850

×
2851
                                return
×
2852
                        }
×
2853

2854
                        // We make sure that the htlc resolutions are present
2855
                        // otherwise we would panic dereferencing the pointer.
2856
                        //
2857
                        // TODO(ziggie): Refactor ContractResolutions to use
2858
                        // options.
2859
                        if resolutions.HtlcResolutions == nil {
4✔
2860
                                log.Errorf("ChannelArbitrator(%v): htlc "+
×
2861
                                        "resolutions not found",
×
2862
                                        c.cfg.ChanPoint)
×
2863

×
2864
                                return
×
2865
                        }
×
2866

2867
                        contractRes := &ContractResolutions{
4✔
2868
                                CommitHash:       closeTx.TxHash(),
4✔
2869
                                CommitResolution: resolutions.CommitResolution,
4✔
2870
                                HtlcResolutions:  *resolutions.HtlcResolutions,
4✔
2871
                                AnchorResolution: resolutions.AnchorResolution,
4✔
2872
                        }
4✔
2873

4✔
2874
                        // When processing a unilateral close event, we'll
4✔
2875
                        // transition to the ContractClosed state. We'll log
4✔
2876
                        // out the set of resolutions such that they are
4✔
2877
                        // available to fetch in that state, we'll also write
4✔
2878
                        // the commit set so we can reconstruct our chain
4✔
2879
                        // actions on restart.
4✔
2880
                        err = c.log.LogContractResolutions(contractRes)
4✔
2881
                        if err != nil {
4✔
2882
                                log.Errorf("Unable to write resolutions: %v",
×
2883
                                        err)
×
2884
                                return
×
2885
                        }
×
2886
                        err = c.log.InsertConfirmedCommitSet(
4✔
2887
                                &closeInfo.CommitSet,
4✔
2888
                        )
4✔
2889
                        if err != nil {
4✔
2890
                                log.Errorf("Unable to write commit set: %v",
×
2891
                                        err)
×
2892
                                return
×
2893
                        }
×
2894

2895
                        // After the set of resolutions are successfully
2896
                        // logged, we can safely close the channel. After this
2897
                        // succeeds we won't be getting chain events anymore,
2898
                        // so we must make sure we can recover on restart after
2899
                        // it is marked closed. If the next state transition
2900
                        // fails, we'll start up in the prior state again, and
2901
                        // we won't be longer getting chain events. In this
2902
                        // case we must manually re-trigger the state
2903
                        // transition into StateContractClosed based on the
2904
                        // close status of the channel.
2905
                        err = c.cfg.MarkChannelClosed(
4✔
2906
                                closeInfo.ChannelCloseSummary,
4✔
2907
                                channeldb.ChanStatusLocalCloseInitiator,
4✔
2908
                        )
4✔
2909
                        if err != nil {
4✔
2910
                                log.Errorf("Unable to mark "+
×
2911
                                        "channel closed: %v", err)
×
2912
                                return
×
2913
                        }
×
2914

2915
                        // We'll now advance our state machine until it reaches
2916
                        // a terminal state.
2917
                        _, _, err = c.advanceState(
4✔
2918
                                uint32(closeInfo.SpendingHeight),
4✔
2919
                                localCloseTrigger, &closeInfo.CommitSet,
4✔
2920
                        )
4✔
2921
                        if err != nil {
4✔
2922
                                log.Errorf("Unable to advance state: %v", err)
×
2923
                        }
×
2924

2925
                // The remote party has broadcast the commitment on-chain.
2926
                // We'll examine our state to determine if we need to act at
2927
                // all.
2928
                case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
4✔
2929
                        log.Infof("ChannelArbitrator(%v): remote party has "+
4✔
2930
                                "closed channel out on-chain", c.cfg.ChanPoint)
4✔
2931

4✔
2932
                        // If we don't have a self output, and there are no
4✔
2933
                        // active HTLC's, then we can immediately mark the
4✔
2934
                        // contract as fully resolved and exit.
4✔
2935
                        contractRes := &ContractResolutions{
4✔
2936
                                CommitHash:       *uniClosure.SpenderTxHash,
4✔
2937
                                CommitResolution: uniClosure.CommitResolution,
4✔
2938
                                HtlcResolutions:  *uniClosure.HtlcResolutions,
4✔
2939
                                AnchorResolution: uniClosure.AnchorResolution,
4✔
2940
                        }
4✔
2941

4✔
2942
                        // When processing a unilateral close event, we'll
4✔
2943
                        // transition to the ContractClosed state. We'll log
4✔
2944
                        // out the set of resolutions such that they are
4✔
2945
                        // available to fetch in that state, we'll also write
4✔
2946
                        // the commit set so we can reconstruct our chain
4✔
2947
                        // actions on restart.
4✔
2948
                        err := c.log.LogContractResolutions(contractRes)
4✔
2949
                        if err != nil {
4✔
2950
                                log.Errorf("Unable to write resolutions: %v",
×
2951
                                        err)
×
2952
                                return
×
2953
                        }
×
2954
                        err = c.log.InsertConfirmedCommitSet(
4✔
2955
                                &uniClosure.CommitSet,
4✔
2956
                        )
4✔
2957
                        if err != nil {
4✔
2958
                                log.Errorf("Unable to write commit set: %v",
×
2959
                                        err)
×
2960
                                return
×
2961
                        }
×
2962

2963
                        // After the set of resolutions are successfully
2964
                        // logged, we can safely close the channel. After this
2965
                        // succeeds we won't be getting chain events anymore,
2966
                        // so we must make sure we can recover on restart after
2967
                        // it is marked closed. If the next state transition
2968
                        // fails, we'll start up in the prior state again, and
2969
                        // we won't be longer getting chain events. In this
2970
                        // case we must manually re-trigger the state
2971
                        // transition into StateContractClosed based on the
2972
                        // close status of the channel.
2973
                        closeSummary := &uniClosure.ChannelCloseSummary
4✔
2974
                        err = c.cfg.MarkChannelClosed(
4✔
2975
                                closeSummary,
4✔
2976
                                channeldb.ChanStatusRemoteCloseInitiator,
4✔
2977
                        )
4✔
2978
                        if err != nil {
4✔
2979
                                log.Errorf("Unable to mark channel closed: %v",
×
2980
                                        err)
×
2981
                                return
×
2982
                        }
×
2983

2984
                        // We'll now advance our state machine until it reaches
2985
                        // a terminal state.
2986
                        _, _, err = c.advanceState(
4✔
2987
                                uint32(uniClosure.SpendingHeight),
4✔
2988
                                remoteCloseTrigger, &uniClosure.CommitSet,
4✔
2989
                        )
4✔
2990
                        if err != nil {
4✔
2991
                                log.Errorf("Unable to advance state: %v", err)
×
2992
                        }
×
2993

2994
                // The remote has breached the channel. As this is handled by
2995
                // the ChainWatcher and BreachArbitrator, we don't have to do
2996
                // anything in particular, so just advance our state and
2997
                // gracefully exit.
2998
                case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
4✔
2999
                        log.Infof("ChannelArbitrator(%v): remote party has "+
4✔
3000
                                "breached channel!", c.cfg.ChanPoint)
4✔
3001

4✔
3002
                        // In the breach case, we'll only have anchor and
4✔
3003
                        // breach resolutions.
4✔
3004
                        contractRes := &ContractResolutions{
4✔
3005
                                CommitHash:       breachInfo.CommitHash,
4✔
3006
                                BreachResolution: breachInfo.BreachResolution,
4✔
3007
                                AnchorResolution: breachInfo.AnchorResolution,
4✔
3008
                        }
4✔
3009

4✔
3010
                        // We'll transition to the ContractClosed state and log
4✔
3011
                        // the set of resolutions such that they can be turned
4✔
3012
                        // into resolvers later on. We'll also insert the
4✔
3013
                        // CommitSet of the latest set of commitments.
4✔
3014
                        err := c.log.LogContractResolutions(contractRes)
4✔
3015
                        if err != nil {
4✔
3016
                                log.Errorf("Unable to write resolutions: %v",
×
3017
                                        err)
×
3018
                                return
×
3019
                        }
×
3020
                        err = c.log.InsertConfirmedCommitSet(
4✔
3021
                                &breachInfo.CommitSet,
4✔
3022
                        )
4✔
3023
                        if err != nil {
4✔
3024
                                log.Errorf("Unable to write commit set: %v",
×
3025
                                        err)
×
3026
                                return
×
3027
                        }
×
3028

3029
                        // The channel is finally marked pending closed here as
3030
                        // the BreachArbitrator and channel arbitrator have
3031
                        // persisted the relevant states.
3032
                        closeSummary := &breachInfo.CloseSummary
4✔
3033
                        err = c.cfg.MarkChannelClosed(
4✔
3034
                                closeSummary,
4✔
3035
                                channeldb.ChanStatusRemoteCloseInitiator,
4✔
3036
                        )
4✔
3037
                        if err != nil {
4✔
3038
                                log.Errorf("Unable to mark channel closed: %v",
×
3039
                                        err)
×
3040
                                return
×
3041
                        }
×
3042

3043
                        log.Infof("Breached channel=%v marked pending-closed",
4✔
3044
                                breachInfo.BreachResolution.FundingOutPoint)
4✔
3045

4✔
3046
                        // We'll advance our state machine until it reaches a
4✔
3047
                        // terminal state.
4✔
3048
                        _, _, err = c.advanceState(
4✔
3049
                                uint32(bestHeight), breachCloseTrigger,
4✔
3050
                                &breachInfo.CommitSet,
4✔
3051
                        )
4✔
3052
                        if err != nil {
4✔
3053
                                log.Errorf("Unable to advance state: %v", err)
×
3054
                        }
×
3055

3056
                // A new contract has just been resolved, we'll now check our
3057
                // log to see if all contracts have been resolved. If so, then
3058
                // we can exit as the contract is fully resolved.
3059
                case <-c.resolutionSignal:
4✔
3060
                        log.Infof("ChannelArbitrator(%v): a contract has been "+
4✔
3061
                                "fully resolved!", c.cfg.ChanPoint)
4✔
3062

4✔
3063
                        nextState, _, err := c.advanceState(
4✔
3064
                                uint32(bestHeight), chainTrigger, nil,
4✔
3065
                        )
4✔
3066
                        if err != nil {
4✔
3067
                                log.Errorf("Unable to advance state: %v", err)
×
3068
                        }
×
3069

3070
                        // If we don't have anything further to do after
3071
                        // advancing our state, then we'll exit.
3072
                        if nextState == StateFullyResolved {
4✔
3073
                                log.Infof("ChannelArbitrator(%v): all "+
×
3074
                                        "contracts fully resolved, exiting",
×
3075
                                        c.cfg.ChanPoint)
×
3076

×
3077
                                return
×
3078
                        }
×
3079

3080
                // We've just received a request to forcibly close out the
3081
                // channel. We'll
3082
                case closeReq := <-c.forceCloseReqs:
4✔
3083
                        log.Infof("ChannelArbitrator(%v): received force "+
4✔
3084
                                "close request", c.cfg.ChanPoint)
4✔
3085

4✔
3086
                        if c.state != StateDefault {
7✔
3087
                                select {
3✔
3088
                                case closeReq.closeTx <- nil:
3✔
3089
                                case <-c.quit:
×
3090
                                }
3091

3092
                                select {
3✔
3093
                                case closeReq.errResp <- errAlreadyForceClosed:
3✔
3094
                                case <-c.quit:
×
3095
                                }
3096

3097
                                continue
3✔
3098
                        }
3099

3100
                        nextState, closeTx, err := c.advanceState(
4✔
3101
                                uint32(bestHeight), userTrigger, nil,
4✔
3102
                        )
4✔
3103
                        if err != nil {
8✔
3104
                                log.Errorf("Unable to advance state: %v", err)
4✔
3105
                        }
4✔
3106

3107
                        select {
4✔
3108
                        case closeReq.closeTx <- closeTx:
4✔
3109
                        case <-c.quit:
×
3110
                                return
×
3111
                        }
3112

3113
                        select {
4✔
3114
                        case closeReq.errResp <- err:
4✔
3115
                        case <-c.quit:
×
3116
                                return
×
3117
                        }
3118

3119
                        // If we don't have anything further to do after
3120
                        // advancing our state, then we'll exit.
3121
                        if nextState == StateFullyResolved {
4✔
3122
                                log.Infof("ChannelArbitrator(%v): all "+
×
3123
                                        "contracts resolved, exiting",
×
3124
                                        c.cfg.ChanPoint)
×
3125
                                return
×
3126
                        }
×
3127

3128
                case <-c.quit:
4✔
3129
                        return
4✔
3130
                }
3131
        }
3132
}
3133

3134
// checkLegacyBreach returns StateFullyResolved if the channel was closed with
3135
// a breach transaction before the channel arbitrator launched its own breach
3136
// resolver. StateContractClosed is returned if this is a modern breach close
3137
// with a breach resolver. StateError is returned if the log lookup failed.
3138
func (c *ChannelArbitrator) checkLegacyBreach() (ArbitratorState, error) {
4✔
3139
        // A previous version of the channel arbitrator would make the breach
4✔
3140
        // close skip to StateFullyResolved. If there are no contract
4✔
3141
        // resolutions in the bolt arbitrator log, then this is an older breach
4✔
3142
        // close. Otherwise, if there are resolutions, the state should advance
4✔
3143
        // to StateContractClosed.
4✔
3144
        _, err := c.log.FetchContractResolutions()
4✔
3145
        if err == errNoResolutions {
4✔
3146
                // This is an older breach close still in the database.
×
3147
                return StateFullyResolved, nil
×
3148
        } else if err != nil {
4✔
3149
                return StateError, err
×
3150
        }
×
3151

3152
        // This is a modern breach close with resolvers.
3153
        return StateContractClosed, nil
4✔
3154
}
3155

3156
// sweepRequest wraps the arguments used when calling `SweepInput`.
3157
type sweepRequest struct {
3158
        // input is the input to be swept.
3159
        input input.Input
3160

3161
        // params holds the sweeping parameters.
3162
        params sweep.Params
3163
}
3164

3165
// createSweepRequest creates an anchor sweeping request for a particular
3166
// version (local/remote/remote pending) of the commitment.
3167
func (c *ChannelArbitrator) createSweepRequest(
3168
        anchor *lnwallet.AnchorResolution, htlcs htlcSet, anchorPath string,
3169
        heightHint uint32) (sweepRequest, error) {
4✔
3170

4✔
3171
        // Use the chan id as the exclusive group. This prevents any of the
4✔
3172
        // anchors from being batched together.
4✔
3173
        exclusiveGroup := c.cfg.ShortChanID.ToUint64()
4✔
3174

4✔
3175
        // Find the deadline for this specific anchor.
4✔
3176
        deadline, value, err := c.findCommitmentDeadlineAndValue(
4✔
3177
                heightHint, htlcs,
4✔
3178
        )
4✔
3179
        if err != nil {
4✔
3180
                return sweepRequest{}, err
×
3181
        }
×
3182

3183
        // If we cannot find a deadline, it means there's no HTLCs at stake,
3184
        // which means we can relax our anchor sweeping conditions as we don't
3185
        // have any time sensitive outputs to sweep. However we need to
3186
        // register the anchor output with the sweeper so we are later able to
3187
        // bump the close fee.
3188
        if deadline.IsNone() {
8✔
3189
                log.Infof("ChannelArbitrator(%v): no HTLCs at stake, "+
4✔
3190
                        "sweeping anchor with default deadline",
4✔
3191
                        c.cfg.ChanPoint)
4✔
3192
        }
4✔
3193

3194
        witnessType := input.CommitmentAnchor
4✔
3195

4✔
3196
        // For taproot channels, we need to use the proper witness type.
4✔
3197
        if txscript.IsPayToTaproot(
4✔
3198
                anchor.AnchorSignDescriptor.Output.PkScript,
4✔
3199
        ) {
8✔
3200

4✔
3201
                witnessType = input.TaprootAnchorSweepSpend
4✔
3202
        }
4✔
3203

3204
        // Prepare anchor output for sweeping.
3205
        anchorInput := input.MakeBaseInput(
4✔
3206
                &anchor.CommitAnchor,
4✔
3207
                witnessType,
4✔
3208
                &anchor.AnchorSignDescriptor,
4✔
3209
                heightHint,
4✔
3210
                &input.TxInfo{
4✔
3211
                        Fee:    anchor.CommitFee,
4✔
3212
                        Weight: anchor.CommitWeight,
4✔
3213
                },
4✔
3214
        )
4✔
3215

4✔
3216
        // If we have a deadline, we'll use it to calculate the deadline
4✔
3217
        // height, otherwise default to none.
4✔
3218
        deadlineDesc := "None"
4✔
3219
        deadlineHeight := fn.MapOption(func(d int32) int32 {
8✔
3220
                deadlineDesc = fmt.Sprintf("%d", d)
4✔
3221

4✔
3222
                return d + int32(heightHint)
4✔
3223
        })(deadline)
4✔
3224

3225
        // Calculate the budget based on the value under protection, which is
3226
        // the sum of all HTLCs on this commitment subtracted by their budgets.
3227
        // The anchor output in itself has a small output value of 330 sats so
3228
        // we also include it in the budget to pay for the cpfp transaction.
3229
        budget := calculateBudget(
4✔
3230
                value, c.cfg.Budget.AnchorCPFPRatio, c.cfg.Budget.AnchorCPFP,
4✔
3231
        ) + AnchorOutputValue
4✔
3232

4✔
3233
        log.Infof("ChannelArbitrator(%v): offering anchor from %s commitment "+
4✔
3234
                "%v to sweeper with deadline=%v, budget=%v", c.cfg.ChanPoint,
4✔
3235
                anchorPath, anchor.CommitAnchor, deadlineDesc, budget)
4✔
3236

4✔
3237
        // Sweep anchor output with a confirmation target fee preference.
4✔
3238
        // Because this is a cpfp-operation, the anchor will only be attempted
4✔
3239
        // to sweep when the current fee estimate for the confirmation target
4✔
3240
        // exceeds the commit fee rate.
4✔
3241
        return sweepRequest{
4✔
3242
                input: &anchorInput,
4✔
3243
                params: sweep.Params{
4✔
3244
                        ExclusiveGroup: &exclusiveGroup,
4✔
3245
                        Budget:         budget,
4✔
3246
                        DeadlineHeight: deadlineHeight,
4✔
3247
                },
4✔
3248
        }, nil
4✔
3249
}
3250

3251
// prepareAnchorSweeps creates a list of requests to be used by the sweeper for
3252
// all possible commitment versions.
3253
func (c *ChannelArbitrator) prepareAnchorSweeps(heightHint uint32,
3254
        anchors *lnwallet.AnchorResolutions) ([]sweepRequest, error) {
4✔
3255

4✔
3256
        // requests holds all the possible anchor sweep requests. We can have
4✔
3257
        // up to 3 different versions of commitments (local/remote/remote
4✔
3258
        // dangling) to be CPFPed by the anchors.
4✔
3259
        requests := make([]sweepRequest, 0, 3)
4✔
3260

4✔
3261
        // remotePendingReq holds the request for sweeping the anchor output on
4✔
3262
        // the remote pending commitment. It's only set when there's an actual
4✔
3263
        // pending remote commitment and it's used to decide whether we need to
4✔
3264
        // update the fee budget when sweeping the anchor output on the local
4✔
3265
        // commitment.
4✔
3266
        remotePendingReq := fn.None[sweepRequest]()
4✔
3267

4✔
3268
        // First we check on the remote pending commitment and optionally
4✔
3269
        // create an anchor sweeping request.
4✔
3270
        htlcs, ok := c.activeHTLCs[RemotePendingHtlcSet]
4✔
3271
        if ok && anchors.RemotePending != nil {
4✔
3272
                req, err := c.createSweepRequest(
×
3273
                        anchors.RemotePending, htlcs, "remote pending",
×
3274
                        heightHint,
×
3275
                )
×
3276
                if err != nil {
×
3277
                        return nil, err
×
3278
                }
×
3279

3280
                // Save the request.
3281
                requests = append(requests, req)
×
3282

×
3283
                // Set the optional variable.
×
3284
                remotePendingReq = fn.Some(req)
×
3285
        }
3286

3287
        // Check the local commitment and optionally create an anchor sweeping
3288
        // request. The params used in this request will be influenced by the
3289
        // anchor sweeping request made from the pending remote commitment.
3290
        htlcs, ok = c.activeHTLCs[LocalHtlcSet]
4✔
3291
        if ok && anchors.Local != nil {
8✔
3292
                req, err := c.createSweepRequest(
4✔
3293
                        anchors.Local, htlcs, "local", heightHint,
4✔
3294
                )
4✔
3295
                if err != nil {
4✔
3296
                        return nil, err
×
3297
                }
×
3298

3299
                // If there's an anchor sweeping request from the pending
3300
                // remote commitment, we will compare its budget against the
3301
                // budget used here and choose the params that has a larger
3302
                // budget. The deadline when choosing the remote pending budget
3303
                // instead of the local one will always be earlier or equal to
3304
                // the local deadline because outgoing HTLCs are resolved on
3305
                // the local commitment first before they are removed from the
3306
                // remote one.
3307
                remotePendingReq.WhenSome(func(s sweepRequest) {
4✔
3308
                        if s.params.Budget <= req.params.Budget {
×
3309
                                return
×
3310
                        }
×
3311

3312
                        log.Infof("ChannelArbitrator(%v): replaced local "+
×
3313
                                "anchor(%v) sweep params with pending remote "+
×
3314
                                "anchor sweep params, \nold:[%v], \nnew:[%v]",
×
3315
                                c.cfg.ChanPoint, anchors.Local.CommitAnchor,
×
3316
                                req.params, s.params)
×
3317

×
3318
                        req.params = s.params
×
3319
                })
3320

3321
                // Save the request.
3322
                requests = append(requests, req)
4✔
3323
        }
3324

3325
        // Check the remote commitment and create an anchor sweeping request if
3326
        // needed.
3327
        htlcs, ok = c.activeHTLCs[RemoteHtlcSet]
4✔
3328
        if ok && anchors.Remote != nil {
8✔
3329
                req, err := c.createSweepRequest(
4✔
3330
                        anchors.Remote, htlcs, "remote", heightHint,
4✔
3331
                )
4✔
3332
                if err != nil {
4✔
3333
                        return nil, err
×
3334
                }
×
3335

3336
                requests = append(requests, req)
4✔
3337
        }
3338

3339
        return requests, nil
4✔
3340
}
3341

3342
// failIncomingDust resolves the incoming dust HTLCs because they do not have
3343
// an output on the commitment transaction and cannot be resolved onchain. We
3344
// mark them as failed here.
3345
func (c *ChannelArbitrator) failIncomingDust(
3346
        incomingDustHTLCs []channeldb.HTLC) error {
4✔
3347

4✔
3348
        for _, htlc := range incomingDustHTLCs {
8✔
3349
                if !htlc.Incoming || htlc.OutputIndex >= 0 {
4✔
3350
                        return fmt.Errorf("htlc with index %v is not incoming "+
×
3351
                                "dust", htlc.OutputIndex)
×
3352
                }
×
3353

3354
                key := models.CircuitKey{
4✔
3355
                        ChanID: c.cfg.ShortChanID,
4✔
3356
                        HtlcID: htlc.HtlcIndex,
4✔
3357
                }
4✔
3358

4✔
3359
                // Mark this dust htlc as final failed.
4✔
3360
                chainArbCfg := c.cfg.ChainArbitratorConfig
4✔
3361
                err := chainArbCfg.PutFinalHtlcOutcome(
4✔
3362
                        key.ChanID, key.HtlcID, false,
4✔
3363
                )
4✔
3364
                if err != nil {
4✔
3365
                        return err
×
3366
                }
×
3367

3368
                // Send notification.
3369
                chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
4✔
3370
                        key,
4✔
3371
                        channeldb.FinalHtlcInfo{
4✔
3372
                                Settled:  false,
4✔
3373
                                Offchain: false,
4✔
3374
                        },
4✔
3375
                )
4✔
3376
        }
3377

3378
        return nil
4✔
3379
}
3380

3381
// abandonForwards cancels back the incoming HTLCs for their corresponding
3382
// outgoing HTLCs. We use a set here to avoid sending duplicate failure messages
3383
// for the same HTLC. This also needs to be done for locally initiated outgoing
3384
// HTLCs they are special cased in the switch.
3385
func (c *ChannelArbitrator) abandonForwards(htlcs fn.Set[uint64]) error {
4✔
3386
        log.Debugf("ChannelArbitrator(%v): cancelling back %v incoming "+
4✔
3387
                "HTLC(s)", c.cfg.ChanPoint,
4✔
3388
                len(htlcs))
4✔
3389

4✔
3390
        msgsToSend := make([]ResolutionMsg, 0, len(htlcs))
4✔
3391
        failureMsg := &lnwire.FailPermanentChannelFailure{}
4✔
3392

4✔
3393
        for idx := range htlcs {
8✔
3394
                failMsg := ResolutionMsg{
4✔
3395
                        SourceChan: c.cfg.ShortChanID,
4✔
3396
                        HtlcIndex:  idx,
4✔
3397
                        Failure:    failureMsg,
4✔
3398
                }
4✔
3399

4✔
3400
                msgsToSend = append(msgsToSend, failMsg)
4✔
3401
        }
4✔
3402

3403
        // Send the msges to the switch, if there are any.
3404
        if len(msgsToSend) == 0 {
8✔
3405
                return nil
4✔
3406
        }
4✔
3407

3408
        log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
4✔
3409
                c.cfg.ChanPoint, lnutils.SpewLogClosure(msgsToSend))
4✔
3410

4✔
3411
        err := c.cfg.DeliverResolutionMsg(msgsToSend...)
4✔
3412
        if err != nil {
4✔
3413
                log.Errorf("Unable to send resolution msges to switch: %v", err)
×
3414
                return err
×
3415
        }
×
3416

3417
        return nil
4✔
3418
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc