• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12266849117

10 Dec 2024 11:57PM UTC coverage: 49.54% (-0.3%) from 49.808%
12266849117

push

github

web-flow
Merge pull request #8512 from lightningnetwork/rbf-coop-fsm

[3/4] - lnwallet/chancloser: add new protofsm based RBF chan closer

26 of 1082 new or added lines in 10 files covered. (2.4%)

68 existing lines in 8 files now uncovered.

100376 of 202617 relevant lines covered (49.54%)

2.06 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lnwallet/chancloser/rbf_coop_transitions.go
1
package chancloser
2

3
import (
4
        "fmt"
5

6
        "github.com/btcsuite/btcd/btcec/v2"
7
        "github.com/btcsuite/btcd/chaincfg"
8
        "github.com/btcsuite/btcd/mempool"
9
        "github.com/btcsuite/btcd/wire"
10
        "github.com/davecgh/go-spew/spew"
11
        "github.com/lightningnetwork/lnd/fn/v2"
12
        "github.com/lightningnetwork/lnd/input"
13
        "github.com/lightningnetwork/lnd/labels"
14
        "github.com/lightningnetwork/lnd/lntypes"
15
        "github.com/lightningnetwork/lnd/lnutils"
16
        "github.com/lightningnetwork/lnd/lnwallet"
17
        "github.com/lightningnetwork/lnd/lnwire"
18
        "github.com/lightningnetwork/lnd/protofsm"
19
        "github.com/lightningnetwork/lnd/tlv"
20
)
21

22
// sendShutdownEvents is a helper function that returns a set of daemon events
23
// we need to emit when we decide that we should send a shutdown message. We'll
24
// also mark the channel as borked as well, as at this point, we no longer want
25
// to continue with normal operation.
26
func sendShutdownEvents(chanID lnwire.ChannelID, chanPoint wire.OutPoint,
27
        deliveryAddr lnwire.DeliveryAddress, peerPub btcec.PublicKey,
28
        postSendEvent fn.Option[ProtocolEvent],
NEW
29
        chanState ChanStateObserver) (protofsm.DaemonEventSet, error) {
×
NEW
30

×
NEW
31
        // We'll emit a daemon event that instructs the daemon to send out a
×
NEW
32
        // new shutdown message to the remote peer.
×
NEW
33
        msgsToSend := &protofsm.SendMsgEvent[ProtocolEvent]{
×
NEW
34
                TargetPeer: peerPub,
×
NEW
35
                Msgs: []lnwire.Message{&lnwire.Shutdown{
×
NEW
36
                        ChannelID: chanID,
×
NEW
37
                        Address:   deliveryAddr,
×
NEW
38
                }},
×
NEW
39
                SendWhen: fn.Some(func() bool {
×
NEW
40
                        ok := chanState.NoDanglingUpdates()
×
NEW
41
                        if ok {
×
NEW
42
                                chancloserLog.Infof("ChannelPoint(%v): no "+
×
NEW
43
                                        "dangling updates sending shutdown "+
×
NEW
44
                                        "message", chanPoint)
×
NEW
45
                        }
×
46

NEW
47
                        return ok
×
48
                }),
49
                PostSendEvent: postSendEvent,
50
        }
51

52
        // If a close is already in process (we're in the RBF loop), then we
53
        // can skip everything below, and just send out the shutdown message.
NEW
54
        if chanState.FinalBalances().IsSome() {
×
NEW
55
                return protofsm.DaemonEventSet{msgsToSend}, nil
×
NEW
56
        }
×
57

58
        // Before closing, we'll attempt to send a disable update for the
59
        // channel.  We do so before closing the channel as otherwise the
60
        // current edge policy won't be retrievable from the graph.
NEW
61
        if err := chanState.DisableChannel(); err != nil {
×
NEW
62
                return nil, fmt.Errorf("unable to disable channel: %w", err)
×
NEW
63
        }
×
64

65
        // If we have a post-send event, then this means that we're the
66
        // responder. We'll use this fact below to update state in the DB.
NEW
67
        isInitiator := postSendEvent.IsNone()
×
NEW
68

×
NEW
69
        chancloserLog.Infof("ChannelPoint(%v): disabling outgoing adds",
×
NEW
70
                chanPoint)
×
NEW
71

×
NEW
72
        // As we're about to send a shutdown, we'll disable adds in the
×
NEW
73
        // outgoing direction.
×
NEW
74
        if err := chanState.DisableOutgoingAdds(); err != nil {
×
NEW
75
                return nil, fmt.Errorf("unable to disable outgoing "+
×
NEW
76
                        "adds: %w", err)
×
NEW
77
        }
×
78

79
        // To be able to survive a restart, we'll also write to disk
80
        // information about the shutdown we're about to send out.
NEW
81
        err := chanState.MarkShutdownSent(deliveryAddr, isInitiator)
×
NEW
82
        if err != nil {
×
NEW
83
                return nil, fmt.Errorf("unable to mark shutdown sent: %w", err)
×
NEW
84
        }
×
85

NEW
86
        chancloserLog.Debugf("ChannelPoint(%v): marking channel as borked",
×
NEW
87
                chanPoint)
×
NEW
88

×
NEW
89
        return protofsm.DaemonEventSet{msgsToSend}, nil
×
90
}
91

92
// validateShutdown is a helper function that validates that the shutdown has a
93
// proper delivery script, and can be sent based on the current thaw height of
94
// the channel.
95
func validateShutdown(chanThawHeight fn.Option[uint32],
96
        upfrontAddr fn.Option[lnwire.DeliveryAddress],
97
        msg *ShutdownReceived, chanPoint wire.OutPoint,
NEW
98
        chainParams chaincfg.Params) error {
×
NEW
99

×
NEW
100
        // If we've received a shutdown message, and we have a thaw height,
×
NEW
101
        // then we need to make sure that the channel can now be co-op closed.
×
NEW
102
        err := fn.MapOptionZ(chanThawHeight, func(thawHeight uint32) error {
×
NEW
103
                // If the current height is below the thaw height, then we'll
×
NEW
104
                // reject the shutdown message as we can't yet co-op close the
×
NEW
105
                // channel.
×
NEW
106
                if msg.BlockHeight < thawHeight {
×
NEW
107
                        return fmt.Errorf("initiator attempting to "+
×
NEW
108
                                "co-op close frozen ChannelPoint(%v) "+
×
NEW
109
                                "(current_height=%v, thaw_height=%v)",
×
NEW
110
                                chanPoint, msg.BlockHeight,
×
NEW
111
                                thawHeight)
×
NEW
112
                }
×
113

NEW
114
                return nil
×
115
        })
NEW
116
        if err != nil {
×
NEW
117
                return err
×
NEW
118
        }
×
119

120
        // Next, we'll verify that the remote party is sending the expected
121
        // shutdown script.
NEW
122
        return fn.MapOption(func(addr lnwire.DeliveryAddress) error {
×
NEW
123
                return validateShutdownScript(
×
NEW
124
                        addr, msg.ShutdownScript, &chainParams,
×
NEW
125
                )
×
NEW
126
        })(upfrontAddr).UnwrapOr(nil)
×
127
}
128

129
// ProcessEvent takes a protocol event, and implements a state transition for
130
// the state. From this state, we can receive two possible incoming events:
131
// SendShutdown and ShutdownReceived. Both of these will transition us to the
132
// ChannelFlushing state.
133
func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
134
) (*CloseStateTransition, error) {
×
NEW
135

×
NEW
136
        switch msg := event.(type) {
×
137
        // If we get a confirmation, then a prior transaction we broadcasted
138
        // has confirmed, so we can move to our terminal state early.
NEW
139
        case *SpendEvent:
×
NEW
140
                return &CloseStateTransition{
×
NEW
141
                        NextState: &CloseFin{
×
NEW
142
                                ConfirmedTx: msg.Tx,
×
NEW
143
                        },
×
NEW
144
                }, nil
×
145

146
        // If we receive the SendShutdown event, then we'll send our shutdown
147
        // with a special SendPredicate, then go to the ShutdownPending where
148
        // we'll wait for the remote to send their shutdown.
NEW
149
        case *SendShutdown:
×
NEW
150
                // If we have an upfront shutdown addr or a delivery addr then
×
NEW
151
                // we'll use that. Otherwise, we'll generate a new delivery
×
NEW
152
                // addr.
×
NEW
153
                shutdownScript, err := env.LocalUpfrontShutdown.Alt(
×
NEW
154
                        msg.DeliveryAddr,
×
NEW
155
                ).UnwrapOrFuncErr(env.NewDeliveryScript)
×
NEW
156
                if err != nil {
×
NEW
157
                        return nil, err
×
NEW
158
                }
×
159

160
                // We'll emit some daemon events to send the shutdown message
161
                // and disable the channel on the network level. In this case,
162
                // we don't need a post send event as receive their shutdown is
163
                // what'll move us beyond the ShutdownPending state.
NEW
164
                daemonEvents, err := sendShutdownEvents(
×
NEW
165
                        env.ChanID, env.ChanPoint, shutdownScript,
×
NEW
166
                        env.ChanPeer, fn.None[ProtocolEvent](),
×
NEW
167
                        env.ChanObserver,
×
NEW
168
                )
×
NEW
169
                if err != nil {
×
NEW
170
                        return nil, err
×
NEW
171
                }
×
172

NEW
173
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+
×
NEW
174
                        "delivery_script=%v", env.ChanPoint, shutdownScript)
×
NEW
175

×
NEW
176
                // From here, we'll transition to the shutdown pending state. In
×
NEW
177
                // this state we await their shutdown message (self loop), then
×
NEW
178
                // also the flushing event.
×
NEW
179
                return &CloseStateTransition{
×
NEW
180
                        NextState: &ShutdownPending{
×
NEW
181
                                IdealFeeRate: fn.Some(msg.IdealFeeRate),
×
NEW
182
                                ShutdownScripts: ShutdownScripts{
×
NEW
183
                                        LocalDeliveryScript: shutdownScript,
×
NEW
184
                                },
×
NEW
185
                        },
×
NEW
186
                        NewEvents: fn.Some(RbfEvent{
×
NEW
187
                                ExternalEvents: daemonEvents,
×
NEW
188
                        }),
×
NEW
189
                }, nil
×
190

191
        // When we receive a shutdown from the remote party, we'll validate the
192
        // shutdown message, then transition to the ShutdownPending state. We'll
193
        // also emit similar events like the above to send out shutdown, and
194
        // also disable the channel.
NEW
195
        case *ShutdownReceived:
×
NEW
196
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg")
×
NEW
197

×
NEW
198
                // Validate that they can send the message now, and also that
×
NEW
199
                // they haven't violated their commitment to a prior upfront
×
NEW
200
                // shutdown addr.
×
NEW
201
                err := validateShutdown(
×
NEW
202
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
NEW
203
                        env.ChanPoint, env.ChainParams,
×
NEW
204
                )
×
NEW
205
                if err != nil {
×
NEW
206
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
NEW
207
                                "shutdown attempt: %v", err)
×
NEW
208

×
NEW
209
                        return nil, err
×
NEW
210
                }
×
211

212
                // If we have an upfront shutdown addr we'll use that,
213
                // otherwise, we'll generate a new delivery script.
NEW
214
                shutdownAddr, err := env.LocalUpfrontShutdown.UnwrapOrFuncErr(
×
NEW
215
                        env.NewDeliveryScript,
×
NEW
216
                )
×
NEW
217
                if err != nil {
×
NEW
218
                        return nil, err
×
NEW
219
                }
×
220

NEW
221
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg "+
×
NEW
222
                        "at next clean commit state", env.ChanPoint)
×
NEW
223

×
NEW
224
                // Now that we know the shutdown message is valid, we'll obtain
×
NEW
225
                // the set of daemon events we need to emit. We'll also specify
×
NEW
226
                // that once the message has actually been sent, that we
×
NEW
227
                // generate receive an input event of a ShutdownComplete.
×
NEW
228
                daemonEvents, err := sendShutdownEvents(
×
NEW
229
                        env.ChanID, env.ChanPoint, shutdownAddr,
×
NEW
230
                        env.ChanPeer,
×
NEW
231
                        fn.Some[ProtocolEvent](&ShutdownComplete{}),
×
NEW
232
                        env.ChanObserver,
×
NEW
233
                )
×
NEW
234
                if err != nil {
×
NEW
235
                        return nil, err
×
NEW
236
                }
×
237

NEW
238
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
NEW
239
                        env.ChanPoint)
×
NEW
240

×
NEW
241
                // We just received a shutdown, so we'll disable the adds in
×
NEW
242
                // the outgoing direction.
×
NEW
243
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
NEW
244
                        return nil, fmt.Errorf("unable to disable incoming "+
×
NEW
245
                                "adds: %w", err)
×
NEW
246
                }
×
247

NEW
248
                remoteAddr := msg.ShutdownScript
×
NEW
249

×
NEW
250
                return &CloseStateTransition{
×
NEW
251
                        NextState: &ShutdownPending{
×
NEW
252
                                ShutdownScripts: ShutdownScripts{
×
NEW
253
                                        LocalDeliveryScript:  shutdownAddr,
×
NEW
254
                                        RemoteDeliveryScript: remoteAddr,
×
NEW
255
                                },
×
NEW
256
                        },
×
NEW
257
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
258
                                ExternalEvents: daemonEvents,
×
NEW
259
                        }),
×
NEW
260
                }, nil
×
261

262
        // Any other messages in this state will result in an error, as this is
263
        // an undefined state transition.
NEW
264
        default:
×
NEW
265
                return nil, fmt.Errorf("%w: received %T while in ChannelActive",
×
NEW
266
                        ErrInvalidStateTransition, msg)
×
267
        }
268
}
269

270
// ProcessEvent takes a protocol event, and implements a state transition for
271
// the state. Our path to this state will determine the set of valid events. If
272
// we were the one that sent the shutdown, then we'll just wait on the
273
// ShutdownReceived event. Otherwise, we received the shutdown, and can move
274
// forward once we receive the ShutdownComplete event. Receiving
275
// ShutdownComplete means that we've sent our shutdown, as this was specified
276
// as a post send event.
277
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
278
) (*CloseStateTransition, error) {
×
NEW
279

×
NEW
280
        switch msg := event.(type) {
×
281
        // If we get a confirmation, then a prior transaction we broadcasted
282
        // has confirmed, so we can move to our terminal state early.
NEW
283
        case *SpendEvent:
×
NEW
284
                return &CloseStateTransition{
×
NEW
285
                        NextState: &CloseFin{
×
NEW
286
                                ConfirmedTx: msg.Tx,
×
NEW
287
                        },
×
NEW
288
                }, nil
×
289

290
        // When we receive a shutdown from the remote party, we'll validate the
291
        // shutdown message, then transition to the ChannelFlushing state.
NEW
292
        case *ShutdownReceived:
×
NEW
293
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg",
×
NEW
294
                        env.ChanPoint)
×
NEW
295

×
NEW
296
                // Validate that they can send the message now, and also that
×
NEW
297
                // they haven't violated their commitment to a prior upfront
×
NEW
298
                // shutdown addr.
×
NEW
299
                err := validateShutdown(
×
NEW
300
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
NEW
301
                        env.ChanPoint, env.ChainParams,
×
NEW
302
                )
×
NEW
303
                if err != nil {
×
NEW
304
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
NEW
305
                                "shutdown attempt: %v", err)
×
NEW
306

×
NEW
307
                        return nil, err
×
NEW
308
                }
×
309

310
                // If the channel is *already* flushed, and the close is
311
                // go straight into negotiation, as this is the RBF loop.
312
                // already in progress, then we can skip the flushing state and
NEW
313
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
NEW
314
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
NEW
315
                        unknownBalance,
×
NEW
316
                )
×
NEW
317
                if finalBalances != unknownBalance {
×
NEW
318
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
NEW
319
                                ShutdownBalances: finalBalances,
×
NEW
320
                        })
×
NEW
321
                        eventsToEmit = fn.Some(RbfEvent{
×
NEW
322
                                InternalEvent: []ProtocolEvent{
×
NEW
323
                                        channelFlushed,
×
NEW
324
                                },
×
NEW
325
                        })
×
NEW
326
                }
×
327

NEW
328
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
NEW
329
                        env.ChanPoint)
×
NEW
330

×
NEW
331
                // We just received a shutdown, so we'll disable the adds in
×
NEW
332
                // the outgoing direction.
×
NEW
333
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
NEW
334
                        return nil, fmt.Errorf("unable to disable incoming "+
×
NEW
335
                                "adds: %w", err)
×
NEW
336
                }
×
337

NEW
338
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
NEW
339
                        "be flushed...", env.ChanPoint)
×
NEW
340

×
NEW
341
                // We transition to the ChannelFlushing state, where we await
×
NEW
342
                // the ChannelFlushed event.
×
NEW
343
                return &CloseStateTransition{
×
NEW
344
                        NextState: &ChannelFlushing{
×
NEW
345
                                IdealFeeRate: s.IdealFeeRate,
×
NEW
346
                                ShutdownScripts: ShutdownScripts{
×
NEW
347
                                        LocalDeliveryScript:  s.LocalDeliveryScript, //nolint:ll
×
NEW
348
                                        RemoteDeliveryScript: msg.ShutdownScript,    //nolint:ll
×
NEW
349
                                },
×
NEW
350
                        },
×
NEW
351
                        NewEvents: eventsToEmit,
×
NEW
352
                }, nil
×
353

354
        // If we get this message, then this means that we were finally able to
355
        // send out shutdown after receiving it from the remote party. We'll
356
        // now transition directly to the ChannelFlushing state.
NEW
357
        case *ShutdownComplete:
×
NEW
358
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
NEW
359
                        "be flushed...", env.ChanPoint)
×
NEW
360

×
NEW
361
                // If the channel is *already* flushed, and the close is
×
NEW
362
                // already in progress, then we can skip the flushing state and
×
NEW
363
                // go straight into negotiation, as this is the RBF loop.
×
NEW
364
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
NEW
365
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
NEW
366
                        unknownBalance,
×
NEW
367
                )
×
NEW
368
                if finalBalances != unknownBalance {
×
NEW
369
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
NEW
370
                                ShutdownBalances: finalBalances,
×
NEW
371
                        })
×
NEW
372
                        eventsToEmit = fn.Some(RbfEvent{
×
NEW
373
                                InternalEvent: []ProtocolEvent{
×
NEW
374
                                        channelFlushed,
×
NEW
375
                                },
×
NEW
376
                        })
×
NEW
377
                }
×
378

379
                // From here, we'll transition to the channel flushing state.
380
                // We'll stay here until we receive the ChannelFlushed event.
NEW
381
                return &CloseStateTransition{
×
NEW
382
                        NextState: &ChannelFlushing{
×
NEW
383
                                IdealFeeRate:    s.IdealFeeRate,
×
NEW
384
                                ShutdownScripts: s.ShutdownScripts,
×
NEW
385
                        },
×
NEW
386
                        NewEvents: eventsToEmit,
×
NEW
387
                }, nil
×
388

389
        // Any other messages in this state will result in an error, as this is
390
        // an undefined state transition.
NEW
391
        default:
×
NEW
392
                return nil, fmt.Errorf("%w: received %T while in "+
×
NEW
393
                        "ShutdownPending", ErrInvalidStateTransition, msg)
×
394
        }
395
}
396

397
// ProcessEvent takes a new protocol event, and figures out if we can
398
// transition to the next state, or just loop back upon ourself. If we receive
399
// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as
400
// we haven't yet fully cleared the channel. Otherwise, we can move to the
401
// CloseReady state which'll being the channel closing process.
402
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
403
) (*CloseStateTransition, error) {
×
NEW
404

×
NEW
405
        switch msg := event.(type) {
×
406
        // If we get a confirmation, then a prior transaction we broadcasted
407
        // has confirmed, so we can move to our terminal state early.
NEW
408
        case *SpendEvent:
×
NEW
409
                return &CloseStateTransition{
×
NEW
410
                        NextState: &CloseFin{
×
NEW
411
                                ConfirmedTx: msg.Tx,
×
NEW
412
                        },
×
NEW
413
                }, nil
×
414

415
        // If we get an OfferReceived event, then the channel is flushed from
416
        // the PoV of the remote party. However, due to propagation delay or
417
        // concurrency, we may not have received the ChannelFlushed event yet.
418
        // In this case, we'll stash the event and wait for the ChannelFlushed
419
        // event.
NEW
420
        case *OfferReceivedEvent:
×
NEW
421
                chancloserLog.Infof("ChannelPoint(%v): received remote offer "+
×
NEW
422
                        "early, stashing...", env.ChanPoint)
×
NEW
423

×
NEW
424
                c.EarlyRemoteOffer = fn.Some(*msg)
×
NEW
425

×
NEW
426
                // TODO(roasbeef): unit test!
×
NEW
427
                //  * actually do this ^
×
NEW
428

×
NEW
429
                // We'll perform a noop update so we can wait for the actual
×
NEW
430
                // channel flushed event.
×
NEW
431
                return &CloseStateTransition{
×
NEW
432
                        NextState: c,
×
NEW
433
                }, nil
×
434

435
        // If we receive the ChannelFlushed event, then the coast is clear so
436
        // we'll now morph into the dual peer state so we can handle any
437
        // messages needed to drive forward the close process.
NEW
438
        case *ChannelFlushed:
×
NEW
439
                // Both the local and remote losing negotiation needs the terms
×
NEW
440
                // we'll be using to close the channel, so we'll create them
×
NEW
441
                // here.
×
NEW
442
                closeTerms := CloseChannelTerms{
×
NEW
443
                        ShutdownScripts:  c.ShutdownScripts,
×
NEW
444
                        ShutdownBalances: msg.ShutdownBalances,
×
NEW
445
                }
×
NEW
446

×
NEW
447
                chancloserLog.Infof("ChannelPoint(%v): channel flushed! "+
×
NEW
448
                        "proceeding with co-op close", env.ChanPoint)
×
NEW
449

×
NEW
450
                // Now that the channel has been flushed, we'll mark on disk
×
NEW
451
                // that we're approaching the point of no return where we'll
×
NEW
452
                // send a new signature to the remote party.
×
NEW
453
                //
×
NEW
454
                // TODO(roasbeef): doesn't actually matter if initiator here?
×
NEW
455
                if msg.FreshFlush {
×
NEW
456
                        err := env.ChanObserver.MarkCoopBroadcasted(nil, true)
×
NEW
457
                        if err != nil {
×
NEW
458
                                return nil, err
×
NEW
459
                        }
×
460
                }
461

462
                // If an ideal fee rate was specified, then we'll use that,
463
                // otherwise we'll fall back to the default value given in the
464
                // env.
NEW
465
                idealFeeRate := c.IdealFeeRate.UnwrapOr(env.DefaultFeeRate)
×
NEW
466

×
NEW
467
                // We'll then use that fee rate to determine the absolute fee
×
NEW
468
                // we'd propose.
×
NEW
469
                //
×
NEW
470
                // TODO(roasbeef): need to sign the 3 diff versions of this?
×
NEW
471
                localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts()
×
NEW
472
                absoluteFee := env.FeeEstimator.EstimateFee(
×
NEW
473
                        env.ChanType, localTxOut, remoteTxOut,
×
NEW
474
                        idealFeeRate.FeePerKWeight(),
×
NEW
475
                )
×
NEW
476

×
NEW
477
                chancloserLog.Infof("ChannelPoint(%v): using ideal_fee=%v, "+
×
NEW
478
                        "absolute_fee=%v", env.ChanPoint, idealFeeRate,
×
NEW
479
                        absoluteFee)
×
NEW
480

×
NEW
481
                var (
×
NEW
482
                        internalEvents []ProtocolEvent
×
NEW
483
                        newEvents      fn.Option[RbfEvent]
×
NEW
484
                )
×
NEW
485

×
NEW
486
                // If we received a remote offer early from the remote party,
×
NEW
487
                // then we'll add that to the set of internal events to emit.
×
NEW
488
                c.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) {
×
NEW
489
                        internalEvents = append(internalEvents, &offer)
×
NEW
490
                })
×
491

492
                // Only if we have enough funds to pay for the fees do we need
493
                // to emit a localOfferSign event.
494
                //
495
                // TODO(roasbeef): also only proceed if was higher than fee in
496
                // last round?
NEW
497
                if closeTerms.LocalCanPayFees(absoluteFee) {
×
NEW
498
                        // Each time we go into this negotiation flow, we'll
×
NEW
499
                        // kick off our local state with a new close attempt.
×
NEW
500
                        // So we'll emit a internal event to drive forward that
×
NEW
501
                        // part of the state.
×
NEW
502
                        localOfferSign := ProtocolEvent(&SendOfferEvent{
×
NEW
503
                                TargetFeeRate: idealFeeRate,
×
NEW
504
                        })
×
NEW
505
                        internalEvents = append(internalEvents, localOfferSign)
×
NEW
506
                } else {
×
NEW
507
                        chancloserLog.Infof("ChannelPoint(%v): unable to pay "+
×
NEW
508
                                "fees with local balance, skipping "+
×
NEW
509
                                "closing_complete", env.ChanPoint)
×
NEW
510
                }
×
511

NEW
512
                if len(internalEvents) > 0 {
×
NEW
513
                        newEvents = fn.Some(RbfEvent{
×
NEW
514
                                InternalEvent: internalEvents,
×
NEW
515
                        })
×
NEW
516
                }
×
517

NEW
518
                return &CloseStateTransition{
×
NEW
519
                        NextState: &ClosingNegotiation{
×
NEW
520
                                PeerState: lntypes.Dual[AsymmetricPeerState]{
×
NEW
521
                                        Local: &LocalCloseStart{
×
NEW
522
                                                CloseChannelTerms: closeTerms,
×
NEW
523
                                        },
×
NEW
524
                                        Remote: &RemoteCloseStart{
×
NEW
525
                                                CloseChannelTerms: closeTerms,
×
NEW
526
                                        },
×
NEW
527
                                },
×
NEW
528
                        },
×
NEW
529
                        NewEvents: newEvents,
×
NEW
530
                }, nil
×
531

NEW
532
        default:
×
NEW
533
                return nil, fmt.Errorf("%w: received %T while in "+
×
NEW
534
                        "ChannelFlushing", ErrInvalidStateTransition, msg)
×
535
        }
536
}
537

538
// processNegotiateEvent is a helper function that processes a new event to
539
// local channel state once we're in the ClosingNegotiation state.
540
func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent,
541
        env *Environment, chanPeer lntypes.ChannelParty,
NEW
542
) (*CloseStateTransition, error) {
×
NEW
543

×
NEW
544
        targetPeerState := c.PeerState.GetForParty(chanPeer)
×
NEW
545

×
NEW
546
        // Drive forward the remote state based on the next event.
×
NEW
547
        transition, err := targetPeerState.ProcessEvent(
×
NEW
548
                event, env,
×
NEW
549
        )
×
NEW
550
        if err != nil {
×
NEW
551
                return nil, err
×
NEW
552
        }
×
553

NEW
554
        nextPeerState, ok := transition.NextState.(AsymmetricPeerState) //nolint:ll
×
NEW
555
        if !ok {
×
NEW
556
                return nil, fmt.Errorf("expected %T to be "+
×
NEW
557
                        "AsymmetricPeerState", transition.NextState)
×
NEW
558
        }
×
559

560
        // Make a copy of the input state, then update the peer state of the
561
        // proper party.
NEW
562
        newPeerState := *c
×
NEW
563
        newPeerState.PeerState.SetForParty(chanPeer, nextPeerState)
×
NEW
564

×
NEW
565
        return &CloseStateTransition{
×
NEW
566
                NextState: &newPeerState,
×
NEW
567
                NewEvents: transition.NewEvents,
×
NEW
568
        }, nil
×
569
}
570

571
// ProcessEvent drives forward the composite states for the local and remote
572
// party in response to new events. From this state, we'll continue to drive
573
// forward the local and remote states until we arrive at the StateFin stage,
574
// or we loop back up to the ShutdownPending state.
575
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
576
) (*CloseStateTransition, error) {
×
NEW
577

×
NEW
578
        // There're two classes of events that can break us out of this state:
×
NEW
579
        // we receive a confirmation event, or we receive a signal to restart
×
NEW
580
        // the co-op close process.
×
NEW
581
        switch msg := event.(type) {
×
582
        // If we get a confirmation, then the spend request we issued when we
583
        // were leaving the ChannelFlushing state has been confirmed.  We'll
584
        // now transition to the StateFin state.
NEW
585
        case *SpendEvent:
×
NEW
586
                return &CloseStateTransition{
×
NEW
587
                        NextState: &CloseFin{
×
NEW
588
                                ConfirmedTx: msg.Tx,
×
NEW
589
                        },
×
NEW
590
                }, nil
×
591

592
        // Otherwise, if we receive a shutdown, or receive an event to send a
593
        // shutdown, then we'll go back up to the ChannelActive state, and have
594
        // it handle this event by emitting an internal event.
595
        //
596
        // TODO(roasbeef): both will have fee rate specified, so ok?
NEW
597
        case *ShutdownReceived, *SendShutdown:
×
NEW
598
                chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+
×
NEW
599
                        "restarting negotiation", env.ChanPoint)
×
NEW
600

×
NEW
601
                return &CloseStateTransition{
×
NEW
602
                        NextState: &ChannelActive{},
×
NEW
603
                        NewEvents: fn.Some(RbfEvent{
×
NEW
604
                                InternalEvent: []ProtocolEvent{event},
×
NEW
605
                        }),
×
NEW
606
                }, nil
×
607
        }
608

609
        // If we get to this point, then we have an event that'll drive forward
610
        // the negotiation process.  Based on the event, we'll figure out which
611
        // state we'll be modifying.
NEW
612
        switch {
×
NEW
613
        case c.PeerState.GetForParty(lntypes.Local).ShouldRouteTo(event):
×
NEW
614
                chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+
×
NEW
615
                        "chan state", env.ChanPoint, event)
×
NEW
616

×
NEW
617
                // Drive forward the local state based on the next event.
×
NEW
618
                return processNegotiateEvent(c, event, env, lntypes.Local)
×
619

NEW
620
        case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event):
×
NEW
621
                chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+
×
NEW
622
                        "chan state", env.ChanPoint, event)
×
NEW
623

×
NEW
624
                // Drive forward the remote state based on the next event.
×
NEW
625
                return processNegotiateEvent(c, event, env, lntypes.Remote)
×
626
        }
627

NEW
628
        return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation",
×
NEW
629
                ErrInvalidStateTransition, event)
×
630
}
631

632
// newSigTlv is a helper function that returns a new optional TLV sig field for
633
// the parametrized tlv.TlvType value.
NEW
634
func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] {
×
NEW
635
        return tlv.SomeRecordT(tlv.NewRecordT[T](s))
×
NEW
636
}
×
637

638
// ProcessEvent implements the event processing to kick off the process of
639
// obtaining a new (possibly RBF'd) signature for our commitment transaction.
640
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
641
) (*CloseStateTransition, error) {
×
NEW
642

×
NEW
643
        switch msg := event.(type) { //nolint:gocritic
×
644
        // If we receive a SendOfferEvent, then we'll use the specified fee
645
        // rate to generate for the closing transaction with our ideal fee
646
        // rate.
NEW
647
        case *SendOfferEvent:
×
NEW
648
                // First, we'll figure out the absolute fee rate we should pay
×
NEW
649
                // given the state of the local/remote outputs.
×
NEW
650
                localTxOut, remoteTxOut := l.DeriveCloseTxOuts()
×
NEW
651
                absoluteFee := env.FeeEstimator.EstimateFee(
×
NEW
652
                        env.ChanType, localTxOut, remoteTxOut,
×
NEW
653
                        msg.TargetFeeRate.FeePerKWeight(),
×
NEW
654
                )
×
NEW
655

×
NEW
656
                // Now that we know what fee we want to pay, we'll create a new
×
NEW
657
                // signature over our co-op close transaction. For our
×
NEW
658
                // proposals, we'll just always use the known RBF sequence
×
NEW
659
                // value.
×
NEW
660
                localScript := l.LocalDeliveryScript
×
NEW
661
                rawSig, closeTx, closeBalance, err := env.CloseSigner.CreateCloseProposal( //nolint:ll
×
NEW
662
                        absoluteFee, localScript, l.RemoteDeliveryScript,
×
NEW
663
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
NEW
664
                        lnwallet.WithCustomPayer(lntypes.Local),
×
NEW
665
                )
×
NEW
666
                if err != nil {
×
NEW
667
                        return nil, err
×
NEW
668
                }
×
NEW
669
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
NEW
670
                if err != nil {
×
NEW
671
                        return nil, err
×
NEW
672
                }
×
673

NEW
674
                chancloserLog.Infof("closing w/ local_addr=%x, "+
×
NEW
675
                        "remote_addr=%x, fee=%v", localScript[:],
×
NEW
676
                        l.RemoteDeliveryScript[:], absoluteFee)
×
NEW
677

×
NEW
678
                chancloserLog.Infof("proposing closing_tx=%v",
×
NEW
679
                        spew.Sdump(closeTx))
×
NEW
680

×
NEW
681
                // Now that we have our signature, we'll set the proper
×
NEW
682
                // closingSigs field based on if the remote party's output is
×
NEW
683
                // dust or not.
×
NEW
684
                var closingSigs lnwire.ClosingSigs
×
NEW
685
                switch {
×
686
                // If the remote party's output is dust, then we'll set the
687
                // CloserNoClosee field.
NEW
688
                case remoteTxOut == nil:
×
NEW
689
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
NEW
690
                                wireSig,
×
NEW
691
                        )
×
692

693
                // If after paying for fees, our balance is below dust, then
694
                // we'll set the NoCloserClosee field.
NEW
695
                case closeBalance < lnwallet.DustLimitForSize(len(localScript)):
×
NEW
696
                        closingSigs.NoCloserClosee = newSigTlv[tlv.TlvType2](
×
NEW
697
                                wireSig,
×
NEW
698
                        )
×
699

700
                // Otherwise, we'll set the CloserAndClosee field.
701
                //
702
                // TODO(roasbeef): should actually set both??
NEW
703
                default:
×
NEW
704
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
NEW
705
                                wireSig,
×
NEW
706
                        )
×
707
                }
708

709
                // Now that we have our sig, we'll emit a daemon event to send
710
                // it to the remote party, then transition to the
711
                // LocalOfferSent state.
712
                //
713
                // TODO(roasbeef): type alias for protocol event
NEW
714
                sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{ //nolint:ll
×
NEW
715
                        TargetPeer: env.ChanPeer,
×
NEW
716
                        // TODO(roasbeef): mew new func
×
NEW
717
                        Msgs: []lnwire.Message{&lnwire.ClosingComplete{
×
NEW
718
                                ChannelID:   env.ChanID,
×
NEW
719
                                FeeSatoshis: absoluteFee,
×
NEW
720
                                LockTime:    env.BlockHeight,
×
NEW
721
                                ClosingSigs: closingSigs,
×
NEW
722
                        }},
×
NEW
723
                }}
×
NEW
724

×
NEW
725
                chancloserLog.Infof("ChannelPoint(%v): sending closing sig "+
×
NEW
726
                        "to remote party, fee_sats=%v", env.ChanPoint,
×
NEW
727
                        absoluteFee)
×
NEW
728

×
NEW
729
                return &CloseStateTransition{
×
NEW
730
                        NextState: &LocalOfferSent{
×
NEW
731
                                ProposedFee:       absoluteFee,
×
NEW
732
                                LocalSig:          wireSig,
×
NEW
733
                                CloseChannelTerms: l.CloseChannelTerms,
×
NEW
734
                        },
×
NEW
735
                        NewEvents: fn.Some(RbfEvent{
×
NEW
736
                                ExternalEvents: sendEvent,
×
NEW
737
                        }),
×
NEW
738
                }, nil
×
739
        }
740

NEW
741
        return nil, fmt.Errorf("%w: received %T while in LocalCloseStart",
×
NEW
742
                ErrInvalidStateTransition, event)
×
743
}
744

745
// extractSig extracts the expected signature from the closing sig message.
746
// Only one of them should actually be populated as the closing sig message is
747
// sent in response to a ClosingComplete message, it should only sign the same
748
// version of the co-op close tx as the sender did.
NEW
749
func extractSig(msg lnwire.ClosingSig) fn.Result[lnwire.Sig] {
×
NEW
750
        // First, we'll validate that only one signature is included in their
×
NEW
751
        // response to our initial offer. If not, then we'll exit here, and
×
NEW
752
        // trigger a recycle of the connection.
×
NEW
753
        sigInts := []bool{
×
NEW
754
                msg.CloserNoClosee.IsSome(), msg.NoCloserClosee.IsSome(),
×
NEW
755
                msg.CloserAndClosee.IsSome(),
×
NEW
756
        }
×
NEW
757
        numSigs := fn.Foldl(0, sigInts, func(acc int, sigInt bool) int {
×
NEW
758
                if sigInt {
×
NEW
759
                        return acc + 1
×
NEW
760
                }
×
761

NEW
762
                return acc
×
763
        })
NEW
764
        if numSigs != 1 {
×
NEW
765
                return fn.Errf[lnwire.Sig]("%w: only one sig should be set, "+
×
NEW
766
                        "got %v", ErrTooManySigs, numSigs)
×
NEW
767
        }
×
768

769
        // The final sig is the one that's actually set.
NEW
770
        sig := msg.CloserAndClosee.ValOpt().Alt(
×
NEW
771
                msg.NoCloserClosee.ValOpt(),
×
NEW
772
        ).Alt(
×
NEW
773
                msg.CloserNoClosee.ValOpt(),
×
NEW
774
        )
×
NEW
775

×
NEW
776
        return fn.NewResult(sig.UnwrapOrErr(ErrNoSig))
×
777
}
778

779
// ProcessEvent implements the state transition function for the
780
// LocalOfferSent state. In this state, we'll wait for the remote party to
781
// send a close_signed message which gives us the ability to broadcast a new
782
// co-op close transaction.
783
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
784
) (*CloseStateTransition, error) {
×
NEW
785

×
NEW
786
        switch msg := event.(type) { //nolint:gocritic
×
787
        // If we receive a LocalSigReceived event, then we'll attempt to
788
        // validate the signature from the remote party. If valid, then we can
789
        // broadcast the transaction, and transition to the ClosePending state.
NEW
790
        case *LocalSigReceived:
×
NEW
791
                // Extract and validate that only one sig field is set.
×
NEW
792
                sig, err := extractSig(msg.SigMsg).Unpack()
×
NEW
793
                if err != nil {
×
NEW
794
                        return nil, err
×
NEW
795
                }
×
796

NEW
797
                remoteSig, err := sig.ToSignature()
×
NEW
798
                if err != nil {
×
NEW
799
                        return nil, err
×
NEW
800
                }
×
NEW
801
                localSig, err := l.LocalSig.ToSignature()
×
NEW
802
                if err != nil {
×
NEW
803
                        return nil, err
×
NEW
804
                }
×
805

806
                // Now that we have their signature, we'll attempt to validate
807
                // it, then extract a valid closing signature from it.
NEW
808
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
NEW
809
                        localSig, remoteSig, l.LocalDeliveryScript,
×
NEW
810
                        l.RemoteDeliveryScript, l.ProposedFee,
×
NEW
811
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
NEW
812
                        lnwallet.WithCustomPayer(lntypes.Local),
×
NEW
813
                )
×
NEW
814
                if err != nil {
×
NEW
815
                        return nil, err
×
NEW
816
                }
×
817

818
                // As we're about to broadcast a new version of the co-op close
819
                // transaction, we'll mark again as broadcast, but with this
820
                // variant of the co-op close tx.
NEW
821
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, true)
×
NEW
822
                if err != nil {
×
NEW
823
                        return nil, err
×
NEW
824
                }
×
825

NEW
826
                broadcastEvent := protofsm.DaemonEventSet{&protofsm.BroadcastTxn{ //nolint:ll
×
NEW
827
                        Tx: closeTx,
×
NEW
828
                        Label: labels.MakeLabel(
×
NEW
829
                                labels.LabelTypeChannelClose, &env.Scid,
×
NEW
830
                        ),
×
NEW
831
                }}
×
NEW
832

×
NEW
833
                chancloserLog.Infof("ChannelPoint(%v): received sig from "+
×
NEW
834
                        "remote party, broadcasting: tx=%v", env.ChanPoint,
×
NEW
835
                        lnutils.SpewLogClosure(closeTx),
×
NEW
836
                )
×
NEW
837

×
NEW
838
                return &CloseStateTransition{
×
NEW
839
                        NextState: &ClosePending{
×
NEW
840
                                CloseTx: closeTx,
×
NEW
841
                        },
×
NEW
842
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
843
                                ExternalEvents: broadcastEvent,
×
NEW
844
                        }),
×
NEW
845
                }, nil
×
846
        }
847

NEW
848
        return nil, fmt.Errorf("%w: received %T while in LocalOfferSent",
×
NEW
849
                ErrInvalidStateTransition, event)
×
850
}
851

852
// ProcessEvent implements the state transition function for the
853
// RemoteCloseStart. In this state, we'll wait for the remote party to send a
854
// closing_complete message. Assuming they can pay for the fees, we'll sign it
855
// ourselves, then transition to the next state of ClosePending.
856
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
857
) (*CloseStateTransition, error) {
×
NEW
858

×
NEW
859
        switch msg := event.(type) { //nolint:gocritic
×
860
        // If we receive a OfferReceived event, we'll make sure they can
861
        // actually pay for the fee. If so, then we'll counter sign and
862
        // transition to a terminal state.
NEW
863
        case *OfferReceivedEvent:
×
NEW
864
                // To start, we'll perform some basic validation of the sig
×
NEW
865
                // message they've sent. We'll validate that the remote party
×
NEW
866
                // actually has enough fees to pay the closing fees.
×
NEW
867
                if !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis) {
×
NEW
868
                        return nil, fmt.Errorf("%w: %v vs %v",
×
NEW
869
                                ErrRemoteCannotPay,
×
NEW
870
                                msg.SigMsg.FeeSatoshis,
×
NEW
871
                                l.RemoteBalance.ToSatoshis())
×
NEW
872
                }
×
873

874
                // With the basic sanity checks out of the way, we'll now
875
                // figure out which signature that we'll attempt to sign
876
                // against.
NEW
877
                var (
×
NEW
878
                        remoteSig input.Signature
×
NEW
879
                        noClosee  bool
×
NEW
880
                )
×
NEW
881
                switch {
×
882
                // If our balance is dust, then we expect the CloserNoClosee
883
                // sig to be set.
NEW
884
                case l.LocalAmtIsDust():
×
NEW
885
                        if msg.SigMsg.CloserNoClosee.IsNone() {
×
NEW
886
                                return nil, ErrCloserNoClosee
×
NEW
887
                        }
×
NEW
888
                        msg.SigMsg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) {
×
NEW
889
                                remoteSig, _ = s.ToSignature()
×
NEW
890
                                noClosee = true
×
NEW
891
                        })
×
892

893
                // Otherwise, we'll assume that CloseAndClosee is set.
894
                //
895
                // TODO(roasbeef): NoCloserClosee, but makes no sense?
NEW
896
                default:
×
NEW
897
                        if msg.SigMsg.CloserAndClosee.IsNone() {
×
NEW
898
                                return nil, ErrCloserAndClosee
×
NEW
899
                        }
×
NEW
900
                        msg.SigMsg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) { //nolint:ll
×
NEW
901
                                remoteSig, _ = s.ToSignature()
×
NEW
902
                        })
×
903
                }
904

NEW
905
                chanOpts := []lnwallet.ChanCloseOpt{
×
NEW
906
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
NEW
907
                        lnwallet.WithCustomLockTime(msg.SigMsg.LockTime),
×
NEW
908
                        lnwallet.WithCustomPayer(lntypes.Remote),
×
NEW
909
                }
×
NEW
910

×
NEW
911
                chancloserLog.Infof("responding to close w/ local_addr=%x, "+
×
NEW
912
                        "remote_addr=%x, fee=%v",
×
NEW
913
                        l.LocalDeliveryScript[:], l.RemoteDeliveryScript[:],
×
NEW
914
                        msg.SigMsg.FeeSatoshis)
×
NEW
915

×
NEW
916
                // Now that we have the remote sig, we'll sign the version they
×
NEW
917
                // signed, then attempt to complete the cooperative close
×
NEW
918
                // process.
×
NEW
919
                //
×
NEW
920
                // TODO(roasbeef): need to be able to omit an output when
×
NEW
921
                // signing based on the above, as closing opt
×
NEW
922
                rawSig, _, _, err := env.CloseSigner.CreateCloseProposal(
×
NEW
923
                        msg.SigMsg.FeeSatoshis, l.LocalDeliveryScript,
×
NEW
924
                        l.RemoteDeliveryScript, chanOpts...,
×
NEW
925
                )
×
NEW
926
                if err != nil {
×
NEW
927
                        return nil, err
×
NEW
928
                }
×
NEW
929
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
NEW
930
                if err != nil {
×
NEW
931
                        return nil, err
×
NEW
932
                }
×
933

NEW
934
                localSig, err := wireSig.ToSignature()
×
NEW
935
                if err != nil {
×
NEW
936
                        return nil, err
×
NEW
937
                }
×
938

939
                // With our signature created, we'll now attempt to finalize the
940
                // close process.
NEW
941
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
NEW
942
                        localSig, remoteSig, l.LocalDeliveryScript,
×
NEW
943
                        l.RemoteDeliveryScript, msg.SigMsg.FeeSatoshis,
×
NEW
944
                        chanOpts...,
×
NEW
945
                )
×
NEW
946
                if err != nil {
×
NEW
947
                        return nil, err
×
NEW
948
                }
×
949

NEW
950
                chancloserLog.Infof("ChannelPoint(%v): received sig (fee=%v "+
×
NEW
951
                        "sats) from remote party, signing new tx=%v",
×
NEW
952
                        env.ChanPoint, msg.SigMsg.FeeSatoshis,
×
NEW
953
                        lnutils.SpewLogClosure(closeTx),
×
NEW
954
                )
×
NEW
955

×
NEW
956
                var closingSigs lnwire.ClosingSigs
×
NEW
957
                if noClosee {
×
NEW
958
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
NEW
959
                                wireSig,
×
NEW
960
                        )
×
NEW
961
                } else {
×
NEW
962
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
NEW
963
                                wireSig,
×
NEW
964
                        )
×
NEW
965
                }
×
966

967
                // As we're about to broadcast a new version of the co-op close
968
                // transaction, we'll mark again as broadcast, but with this
969
                // variant of the co-op close tx.
970
                //
971
                // TODO(roasbeef): db will only store one instance, store both?
NEW
972
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, false)
×
NEW
973
                if err != nil {
×
NEW
974
                        return nil, err
×
NEW
975
                }
×
976

977
                // As we transition, we'll omit two events: one to broadcast
978
                // the transaction, and the other to send our ClosingSig
979
                // message to the remote party.
NEW
980
                sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{
×
NEW
981
                        TargetPeer: env.ChanPeer,
×
NEW
982
                        Msgs: []lnwire.Message{&lnwire.ClosingSig{
×
NEW
983
                                ChannelID:   env.ChanID,
×
NEW
984
                                ClosingSigs: closingSigs,
×
NEW
985
                        }},
×
NEW
986
                }
×
NEW
987
                broadcastEvent := &protofsm.BroadcastTxn{
×
NEW
988
                        Tx: closeTx,
×
NEW
989
                        Label: labels.MakeLabel(
×
NEW
990
                                labels.LabelTypeChannelClose, &env.Scid,
×
NEW
991
                        ),
×
NEW
992
                }
×
NEW
993
                daemonEvents := protofsm.DaemonEventSet{
×
NEW
994
                        sendEvent, broadcastEvent,
×
NEW
995
                }
×
NEW
996

×
NEW
997
                // Now that we've extracted the signature, we'll transition to
×
NEW
998
                // the next state where we'll sign+broadcast the sig.
×
NEW
999
                return &CloseStateTransition{
×
NEW
1000
                        NextState: &ClosePending{
×
NEW
1001
                                CloseTx: closeTx,
×
NEW
1002
                        },
×
NEW
1003
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
1004
                                ExternalEvents: daemonEvents,
×
NEW
1005
                        }),
×
NEW
1006
                }, nil
×
1007
        }
1008

NEW
1009
        return nil, fmt.Errorf("%w: received %T while in RemoteCloseStart",
×
NEW
1010
                ErrInvalidStateTransition, event)
×
1011
}
1012

1013
// ProcessEvent is a semi-terminal state in the rbf-coop close state machine.
1014
// In this state, we're waiting for either a confirmation, or for either side
1015
// to attempt to create a new RBF'd co-op close transaction.
1016
func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
1017
) (*CloseStateTransition, error) {
×
NEW
1018

×
NEW
1019
        switch msg := event.(type) {
×
1020
        // If we can a spend while waiting for the close, then we'll go to our
1021
        // terminal state.
NEW
1022
        case *SpendEvent:
×
NEW
1023
                return &CloseStateTransition{
×
NEW
1024
                        NextState: &CloseFin{
×
NEW
1025
                                ConfirmedTx: msg.Tx,
×
NEW
1026
                        },
×
NEW
1027
                }, nil
×
1028

NEW
1029
        default:
×
NEW
1030

×
NEW
1031
                return &CloseStateTransition{
×
NEW
1032
                        NextState: c,
×
NEW
1033
                }, nil
×
1034
        }
1035
}
1036

1037
// ProcessEvent is the event processing for out terminal state. In this state,
1038
// we just keep looping back on ourselves.
1039
func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
1040
) (*CloseStateTransition, error) {
×
NEW
1041

×
NEW
1042
        return &CloseStateTransition{
×
NEW
1043
                NextState: c,
×
NEW
1044
        }, nil
×
NEW
1045
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc