• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13586005509

28 Feb 2025 10:14AM UTC coverage: 68.629% (+9.9%) from 58.77%
13586005509

Pull #9521

github

web-flow
Merge 37d3a70a5 into 8532955b3
Pull Request #9521: unit: remove GOACC, use Go 1.20 native coverage functionality

129950 of 189351 relevant lines covered (68.63%)

23726.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.74
/lnwallet/chancloser/rbf_coop_transitions.go
1
package chancloser
2

3
import (
4
        "fmt"
5

6
        "github.com/btcsuite/btcd/btcec/v2"
7
        "github.com/btcsuite/btcd/chaincfg"
8
        "github.com/btcsuite/btcd/mempool"
9
        "github.com/btcsuite/btcd/wire"
10
        "github.com/davecgh/go-spew/spew"
11
        "github.com/lightningnetwork/lnd/fn/v2"
12
        "github.com/lightningnetwork/lnd/input"
13
        "github.com/lightningnetwork/lnd/labels"
14
        "github.com/lightningnetwork/lnd/lntypes"
15
        "github.com/lightningnetwork/lnd/lnutils"
16
        "github.com/lightningnetwork/lnd/lnwallet"
17
        "github.com/lightningnetwork/lnd/lnwire"
18
        "github.com/lightningnetwork/lnd/protofsm"
19
        "github.com/lightningnetwork/lnd/tlv"
20
)
21

22
// sendShutdownEvents is a helper function that returns a set of daemon events
23
// we need to emit when we decide that we should send a shutdown message. We'll
24
// also mark the channel as borked as well, as at this point, we no longer want
25
// to continue with normal operation.
26
func sendShutdownEvents(chanID lnwire.ChannelID, chanPoint wire.OutPoint,
27
        deliveryAddr lnwire.DeliveryAddress, peerPub btcec.PublicKey,
28
        postSendEvent fn.Option[ProtocolEvent],
29
        chanState ChanStateObserver) (protofsm.DaemonEventSet, error) {
4✔
30

4✔
31
        // We'll emit a daemon event that instructs the daemon to send out a
4✔
32
        // new shutdown message to the remote peer.
4✔
33
        msgsToSend := &protofsm.SendMsgEvent[ProtocolEvent]{
4✔
34
                TargetPeer: peerPub,
4✔
35
                Msgs: []lnwire.Message{&lnwire.Shutdown{
4✔
36
                        ChannelID: chanID,
4✔
37
                        Address:   deliveryAddr,
4✔
38
                }},
4✔
39
                SendWhen: fn.Some(func() bool {
8✔
40
                        ok := chanState.NoDanglingUpdates()
4✔
41
                        if ok {
7✔
42
                                chancloserLog.Infof("ChannelPoint(%v): no "+
3✔
43
                                        "dangling updates sending shutdown "+
3✔
44
                                        "message", chanPoint)
3✔
45
                        }
3✔
46

47
                        return ok
4✔
48
                }),
49
                PostSendEvent: postSendEvent,
50
        }
51

52
        // If a close is already in process (we're in the RBF loop), then we
53
        // can skip everything below, and just send out the shutdown message.
54
        if chanState.FinalBalances().IsSome() {
6✔
55
                return protofsm.DaemonEventSet{msgsToSend}, nil
2✔
56
        }
2✔
57

58
        // Before closing, we'll attempt to send a disable update for the
59
        // channel.  We do so before closing the channel as otherwise the
60
        // current edge policy won't be retrievable from the graph.
61
        if err := chanState.DisableChannel(); err != nil {
2✔
62
                return nil, fmt.Errorf("unable to disable channel: %w", err)
×
63
        }
×
64

65
        // If we have a post-send event, then this means that we're the
66
        // responder. We'll use this fact below to update state in the DB.
67
        isInitiator := postSendEvent.IsNone()
2✔
68

2✔
69
        chancloserLog.Infof("ChannelPoint(%v): disabling outgoing adds",
2✔
70
                chanPoint)
2✔
71

2✔
72
        // As we're about to send a shutdown, we'll disable adds in the
2✔
73
        // outgoing direction.
2✔
74
        if err := chanState.DisableOutgoingAdds(); err != nil {
2✔
75
                return nil, fmt.Errorf("unable to disable outgoing "+
×
76
                        "adds: %w", err)
×
77
        }
×
78

79
        // To be able to survive a restart, we'll also write to disk
80
        // information about the shutdown we're about to send out.
81
        err := chanState.MarkShutdownSent(deliveryAddr, isInitiator)
2✔
82
        if err != nil {
2✔
83
                return nil, fmt.Errorf("unable to mark shutdown sent: %w", err)
×
84
        }
×
85

86
        chancloserLog.Debugf("ChannelPoint(%v): marking channel as borked",
2✔
87
                chanPoint)
2✔
88

2✔
89
        return protofsm.DaemonEventSet{msgsToSend}, nil
2✔
90
}
91

92
// validateShutdown is a helper function that validates that the shutdown has a
93
// proper delivery script, and can be sent based on the current thaw height of
94
// the channel.
95
func validateShutdown(chanThawHeight fn.Option[uint32],
96
        upfrontAddr fn.Option[lnwire.DeliveryAddress],
97
        msg *ShutdownReceived, chanPoint wire.OutPoint,
98
        chainParams chaincfg.Params) error {
5✔
99

5✔
100
        // If we've received a shutdown message, and we have a thaw height,
5✔
101
        // then we need to make sure that the channel can now be co-op closed.
5✔
102
        err := fn.MapOptionZ(chanThawHeight, func(thawHeight uint32) error {
5✔
103
                // If the current height is below the thaw height, then we'll
×
104
                // reject the shutdown message as we can't yet co-op close the
×
105
                // channel.
×
106
                if msg.BlockHeight < thawHeight {
×
107
                        return fmt.Errorf("initiator attempting to "+
×
108
                                "co-op close frozen ChannelPoint(%v) "+
×
109
                                "(current_height=%v, thaw_height=%v)",
×
110
                                chanPoint, msg.BlockHeight,
×
111
                                thawHeight)
×
112
                }
×
113

114
                return nil
×
115
        })
116
        if err != nil {
5✔
117
                return err
×
118
        }
×
119

120
        // Next, we'll verify that the remote party is sending the expected
121
        // shutdown script.
122
        return fn.MapOption(func(addr lnwire.DeliveryAddress) error {
7✔
123
                return validateShutdownScript(
2✔
124
                        addr, msg.ShutdownScript, &chainParams,
2✔
125
                )
2✔
126
        })(upfrontAddr).UnwrapOr(nil)
2✔
127
}
128

129
// ProcessEvent takes a protocol event, and implements a state transition for
130
// the state. From this state, we can receive two possible incoming events:
131
// SendShutdown and ShutdownReceived. Both of these will transition us to the
132
// ChannelFlushing state.
133
func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
134
) (*CloseStateTransition, error) {
7✔
135

7✔
136
        switch msg := event.(type) {
7✔
137
        // If we get a confirmation, then a prior transaction we broadcasted
138
        // has confirmed, so we can move to our terminal state early.
139
        case *SpendEvent:
1✔
140
                return &CloseStateTransition{
1✔
141
                        NextState: &CloseFin{
1✔
142
                                ConfirmedTx: msg.Tx,
1✔
143
                        },
1✔
144
                }, nil
1✔
145

146
        // If we receive the SendShutdown event, then we'll send our shutdown
147
        // with a special SendPredicate, then go to the ShutdownPending where
148
        // we'll wait for the remote to send their shutdown.
149
        case *SendShutdown:
3✔
150
                // If we have an upfront shutdown addr or a delivery addr then
3✔
151
                // we'll use that. Otherwise, we'll generate a new delivery
3✔
152
                // addr.
3✔
153
                shutdownScript, err := env.LocalUpfrontShutdown.Alt(
3✔
154
                        msg.DeliveryAddr,
3✔
155
                ).UnwrapOrFuncErr(env.NewDeliveryScript)
3✔
156
                if err != nil {
4✔
157
                        return nil, err
1✔
158
                }
1✔
159

160
                // We'll emit some daemon events to send the shutdown message
161
                // and disable the channel on the network level. In this case,
162
                // we don't need a post send event as receive their shutdown is
163
                // what'll move us beyond the ShutdownPending state.
164
                daemonEvents, err := sendShutdownEvents(
2✔
165
                        env.ChanID, env.ChanPoint, shutdownScript,
2✔
166
                        env.ChanPeer, fn.None[ProtocolEvent](),
2✔
167
                        env.ChanObserver,
2✔
168
                )
2✔
169
                if err != nil {
2✔
170
                        return nil, err
×
171
                }
×
172

173
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+
2✔
174
                        "delivery_script=%v", env.ChanPoint, shutdownScript)
2✔
175

2✔
176
                // From here, we'll transition to the shutdown pending state. In
2✔
177
                // this state we await their shutdown message (self loop), then
2✔
178
                // also the flushing event.
2✔
179
                return &CloseStateTransition{
2✔
180
                        NextState: &ShutdownPending{
2✔
181
                                IdealFeeRate: fn.Some(msg.IdealFeeRate),
2✔
182
                                ShutdownScripts: ShutdownScripts{
2✔
183
                                        LocalDeliveryScript: shutdownScript,
2✔
184
                                },
2✔
185
                        },
2✔
186
                        NewEvents: fn.Some(RbfEvent{
2✔
187
                                ExternalEvents: daemonEvents,
2✔
188
                        }),
2✔
189
                }, nil
2✔
190

191
        // When we receive a shutdown from the remote party, we'll validate the
192
        // shutdown message, then transition to the ShutdownPending state. We'll
193
        // also emit similar events like the above to send out shutdown, and
194
        // also disable the channel.
195
        case *ShutdownReceived:
2✔
196
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg")
2✔
197

2✔
198
                // Validate that they can send the message now, and also that
2✔
199
                // they haven't violated their commitment to a prior upfront
2✔
200
                // shutdown addr.
2✔
201
                err := validateShutdown(
2✔
202
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
2✔
203
                        env.ChanPoint, env.ChainParams,
2✔
204
                )
2✔
205
                if err != nil {
2✔
206
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
207
                                "shutdown attempt: %v", err)
×
208

×
209
                        return nil, err
×
210
                }
×
211

212
                // If we have an upfront shutdown addr we'll use that,
213
                // otherwise, we'll generate a new delivery script.
214
                shutdownAddr, err := env.LocalUpfrontShutdown.UnwrapOrFuncErr(
2✔
215
                        env.NewDeliveryScript,
2✔
216
                )
2✔
217
                if err != nil {
2✔
218
                        return nil, err
×
219
                }
×
220

221
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg "+
2✔
222
                        "at next clean commit state", env.ChanPoint)
2✔
223

2✔
224
                // Now that we know the shutdown message is valid, we'll obtain
2✔
225
                // the set of daemon events we need to emit. We'll also specify
2✔
226
                // that once the message has actually been sent, that we
2✔
227
                // generate receive an input event of a ShutdownComplete.
2✔
228
                daemonEvents, err := sendShutdownEvents(
2✔
229
                        env.ChanID, env.ChanPoint, shutdownAddr,
2✔
230
                        env.ChanPeer,
2✔
231
                        fn.Some[ProtocolEvent](&ShutdownComplete{}),
2✔
232
                        env.ChanObserver,
2✔
233
                )
2✔
234
                if err != nil {
2✔
235
                        return nil, err
×
236
                }
×
237

238
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
2✔
239
                        env.ChanPoint)
2✔
240

2✔
241
                // We just received a shutdown, so we'll disable the adds in
2✔
242
                // the outgoing direction.
2✔
243
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
2✔
244
                        return nil, fmt.Errorf("unable to disable incoming "+
×
245
                                "adds: %w", err)
×
246
                }
×
247

248
                remoteAddr := msg.ShutdownScript
2✔
249

2✔
250
                return &CloseStateTransition{
2✔
251
                        NextState: &ShutdownPending{
2✔
252
                                ShutdownScripts: ShutdownScripts{
2✔
253
                                        LocalDeliveryScript:  shutdownAddr,
2✔
254
                                        RemoteDeliveryScript: remoteAddr,
2✔
255
                                },
2✔
256
                        },
2✔
257
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
2✔
258
                                ExternalEvents: daemonEvents,
2✔
259
                        }),
2✔
260
                }, nil
2✔
261

262
        // Any other messages in this state will result in an error, as this is
263
        // an undefined state transition.
264
        default:
1✔
265
                return nil, fmt.Errorf("%w: received %T while in ChannelActive",
1✔
266
                        ErrInvalidStateTransition, msg)
1✔
267
        }
268
}
269

270
// ProcessEvent takes a protocol event, and implements a state transition for
271
// the state. Our path to this state will determine the set of valid events. If
272
// we were the one that sent the shutdown, then we'll just wait on the
273
// ShutdownReceived event. Otherwise, we received the shutdown, and can move
274
// forward once we receive the ShutdownComplete event. Receiving
275
// ShutdownComplete means that we've sent our shutdown, as this was specified
276
// as a post send event.
277
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
278
) (*CloseStateTransition, error) {
7✔
279

7✔
280
        switch msg := event.(type) {
7✔
281
        // If we get a confirmation, then a prior transaction we broadcasted
282
        // has confirmed, so we can move to our terminal state early.
283
        case *SpendEvent:
1✔
284
                return &CloseStateTransition{
1✔
285
                        NextState: &CloseFin{
1✔
286
                                ConfirmedTx: msg.Tx,
1✔
287
                        },
1✔
288
                }, nil
1✔
289

290
        // When we receive a shutdown from the remote party, we'll validate the
291
        // shutdown message, then transition to the ChannelFlushing state.
292
        case *ShutdownReceived:
3✔
293
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg",
3✔
294
                        env.ChanPoint)
3✔
295

3✔
296
                // Validate that they can send the message now, and also that
3✔
297
                // they haven't violated their commitment to a prior upfront
3✔
298
                // shutdown addr.
3✔
299
                err := validateShutdown(
3✔
300
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
3✔
301
                        env.ChanPoint, env.ChainParams,
3✔
302
                )
3✔
303
                if err != nil {
4✔
304
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
1✔
305
                                "shutdown attempt: %v", err)
1✔
306

1✔
307
                        return nil, err
1✔
308
                }
1✔
309

310
                // If the channel is *already* flushed, and the close is
311
                // go straight into negotiation, as this is the RBF loop.
312
                // already in progress, then we can skip the flushing state and
313
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
2✔
314
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
2✔
315
                        unknownBalance,
2✔
316
                )
2✔
317
                if finalBalances != unknownBalance {
3✔
318
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
1✔
319
                                ShutdownBalances: finalBalances,
1✔
320
                        })
1✔
321
                        eventsToEmit = fn.Some(RbfEvent{
1✔
322
                                InternalEvent: []ProtocolEvent{
1✔
323
                                        channelFlushed,
1✔
324
                                },
1✔
325
                        })
1✔
326
                }
1✔
327

328
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
2✔
329
                        env.ChanPoint)
2✔
330

2✔
331
                // We just received a shutdown, so we'll disable the adds in
2✔
332
                // the outgoing direction.
2✔
333
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
2✔
334
                        return nil, fmt.Errorf("unable to disable incoming "+
×
335
                                "adds: %w", err)
×
336
                }
×
337

338
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
2✔
339
                        "be flushed...", env.ChanPoint)
2✔
340

2✔
341
                // We transition to the ChannelFlushing state, where we await
2✔
342
                // the ChannelFlushed event.
2✔
343
                return &CloseStateTransition{
2✔
344
                        NextState: &ChannelFlushing{
2✔
345
                                IdealFeeRate: s.IdealFeeRate,
2✔
346
                                ShutdownScripts: ShutdownScripts{
2✔
347
                                        LocalDeliveryScript:  s.LocalDeliveryScript, //nolint:ll
2✔
348
                                        RemoteDeliveryScript: msg.ShutdownScript,    //nolint:ll
2✔
349
                                },
2✔
350
                        },
2✔
351
                        NewEvents: eventsToEmit,
2✔
352
                }, nil
2✔
353

354
        // If we get this message, then this means that we were finally able to
355
        // send out shutdown after receiving it from the remote party. We'll
356
        // now transition directly to the ChannelFlushing state.
357
        case *ShutdownComplete:
2✔
358
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
2✔
359
                        "be flushed...", env.ChanPoint)
2✔
360

2✔
361
                // If the channel is *already* flushed, and the close is
2✔
362
                // already in progress, then we can skip the flushing state and
2✔
363
                // go straight into negotiation, as this is the RBF loop.
2✔
364
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
2✔
365
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
2✔
366
                        unknownBalance,
2✔
367
                )
2✔
368
                if finalBalances != unknownBalance {
3✔
369
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
1✔
370
                                ShutdownBalances: finalBalances,
1✔
371
                        })
1✔
372
                        eventsToEmit = fn.Some(RbfEvent{
1✔
373
                                InternalEvent: []ProtocolEvent{
1✔
374
                                        channelFlushed,
1✔
375
                                },
1✔
376
                        })
1✔
377
                }
1✔
378

379
                // From here, we'll transition to the channel flushing state.
380
                // We'll stay here until we receive the ChannelFlushed event.
381
                return &CloseStateTransition{
2✔
382
                        NextState: &ChannelFlushing{
2✔
383
                                IdealFeeRate:    s.IdealFeeRate,
2✔
384
                                ShutdownScripts: s.ShutdownScripts,
2✔
385
                        },
2✔
386
                        NewEvents: eventsToEmit,
2✔
387
                }, nil
2✔
388

389
        // Any other messages in this state will result in an error, as this is
390
        // an undefined state transition.
391
        default:
1✔
392
                return nil, fmt.Errorf("%w: received %T while in "+
1✔
393
                        "ShutdownPending", ErrInvalidStateTransition, msg)
1✔
394
        }
395
}
396

397
// ProcessEvent takes a new protocol event, and figures out if we can
398
// transition to the next state, or just loop back upon ourself. If we receive
399
// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as
400
// we haven't yet fully cleared the channel. Otherwise, we can move to the
401
// CloseReady state which'll being the channel closing process.
402
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
403
) (*CloseStateTransition, error) {
7✔
404

7✔
405
        switch msg := event.(type) {
7✔
406
        // If we get a confirmation, then a prior transaction we broadcasted
407
        // has confirmed, so we can move to our terminal state early.
408
        case *SpendEvent:
×
409
                return &CloseStateTransition{
×
410
                        NextState: &CloseFin{
×
411
                                ConfirmedTx: msg.Tx,
×
412
                        },
×
413
                }, nil
×
414

415
        // If we get an OfferReceived event, then the channel is flushed from
416
        // the PoV of the remote party. However, due to propagation delay or
417
        // concurrency, we may not have received the ChannelFlushed event yet.
418
        // In this case, we'll stash the event and wait for the ChannelFlushed
419
        // event.
420
        case *OfferReceivedEvent:
×
421
                chancloserLog.Infof("ChannelPoint(%v): received remote offer "+
×
422
                        "early, stashing...", env.ChanPoint)
×
423

×
424
                c.EarlyRemoteOffer = fn.Some(*msg)
×
425

×
426
                // TODO(roasbeef): unit test!
×
427
                //  * actually do this ^
×
428

×
429
                // We'll perform a noop update so we can wait for the actual
×
430
                // channel flushed event.
×
431
                return &CloseStateTransition{
×
432
                        NextState: c,
×
433
                }, nil
×
434

435
        // If we receive the ChannelFlushed event, then the coast is clear so
436
        // we'll now morph into the dual peer state so we can handle any
437
        // messages needed to drive forward the close process.
438
        case *ChannelFlushed:
6✔
439
                // Both the local and remote losing negotiation needs the terms
6✔
440
                // we'll be using to close the channel, so we'll create them
6✔
441
                // here.
6✔
442
                closeTerms := CloseChannelTerms{
6✔
443
                        ShutdownScripts:  c.ShutdownScripts,
6✔
444
                        ShutdownBalances: msg.ShutdownBalances,
6✔
445
                }
6✔
446

6✔
447
                chancloserLog.Infof("ChannelPoint(%v): channel flushed! "+
6✔
448
                        "proceeding with co-op close", env.ChanPoint)
6✔
449

6✔
450
                // Now that the channel has been flushed, we'll mark on disk
6✔
451
                // that we're approaching the point of no return where we'll
6✔
452
                // send a new signature to the remote party.
6✔
453
                //
6✔
454
                // TODO(roasbeef): doesn't actually matter if initiator here?
6✔
455
                if msg.FreshFlush {
8✔
456
                        err := env.ChanObserver.MarkCoopBroadcasted(nil, true)
2✔
457
                        if err != nil {
2✔
458
                                return nil, err
×
459
                        }
×
460
                }
461

462
                // If an ideal fee rate was specified, then we'll use that,
463
                // otherwise we'll fall back to the default value given in the
464
                // env.
465
                idealFeeRate := c.IdealFeeRate.UnwrapOr(env.DefaultFeeRate)
6✔
466

6✔
467
                // We'll then use that fee rate to determine the absolute fee
6✔
468
                // we'd propose.
6✔
469
                //
6✔
470
                // TODO(roasbeef): need to sign the 3 diff versions of this?
6✔
471
                localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts()
6✔
472
                absoluteFee := env.FeeEstimator.EstimateFee(
6✔
473
                        env.ChanType, localTxOut, remoteTxOut,
6✔
474
                        idealFeeRate.FeePerKWeight(),
6✔
475
                )
6✔
476

6✔
477
                chancloserLog.Infof("ChannelPoint(%v): using ideal_fee=%v, "+
6✔
478
                        "absolute_fee=%v", env.ChanPoint, idealFeeRate,
6✔
479
                        absoluteFee)
6✔
480

6✔
481
                var (
6✔
482
                        internalEvents []ProtocolEvent
6✔
483
                        newEvents      fn.Option[RbfEvent]
6✔
484
                )
6✔
485

6✔
486
                // If we received a remote offer early from the remote party,
6✔
487
                // then we'll add that to the set of internal events to emit.
6✔
488
                c.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) {
6✔
489
                        internalEvents = append(internalEvents, &offer)
×
490
                })
×
491

492
                // Only if we have enough funds to pay for the fees do we need
493
                // to emit a localOfferSign event.
494
                //
495
                // TODO(roasbeef): also only proceed if was higher than fee in
496
                // last round?
497
                if closeTerms.LocalCanPayFees(absoluteFee) {
9✔
498
                        // Each time we go into this negotiation flow, we'll
3✔
499
                        // kick off our local state with a new close attempt.
3✔
500
                        // So we'll emit a internal event to drive forward that
3✔
501
                        // part of the state.
3✔
502
                        localOfferSign := ProtocolEvent(&SendOfferEvent{
3✔
503
                                TargetFeeRate: idealFeeRate,
3✔
504
                        })
3✔
505
                        internalEvents = append(internalEvents, localOfferSign)
3✔
506
                } else {
6✔
507
                        chancloserLog.Infof("ChannelPoint(%v): unable to pay "+
3✔
508
                                "fees with local balance, skipping "+
3✔
509
                                "closing_complete", env.ChanPoint)
3✔
510
                }
3✔
511

512
                if len(internalEvents) > 0 {
9✔
513
                        newEvents = fn.Some(RbfEvent{
3✔
514
                                InternalEvent: internalEvents,
3✔
515
                        })
3✔
516
                }
3✔
517

518
                return &CloseStateTransition{
6✔
519
                        NextState: &ClosingNegotiation{
6✔
520
                                PeerState: lntypes.Dual[AsymmetricPeerState]{
6✔
521
                                        Local: &LocalCloseStart{
6✔
522
                                                CloseChannelTerms: closeTerms,
6✔
523
                                        },
6✔
524
                                        Remote: &RemoteCloseStart{
6✔
525
                                                CloseChannelTerms: closeTerms,
6✔
526
                                        },
6✔
527
                                },
6✔
528
                        },
6✔
529
                        NewEvents: newEvents,
6✔
530
                }, nil
6✔
531

532
        default:
1✔
533
                return nil, fmt.Errorf("%w: received %T while in "+
1✔
534
                        "ChannelFlushing", ErrInvalidStateTransition, msg)
1✔
535
        }
536
}
537

538
// processNegotiateEvent is a helper function that processes a new event to
539
// local channel state once we're in the ClosingNegotiation state.
540
func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent,
541
        env *Environment, chanPeer lntypes.ChannelParty,
542
) (*CloseStateTransition, error) {
17✔
543

17✔
544
        targetPeerState := c.PeerState.GetForParty(chanPeer)
17✔
545

17✔
546
        // Drive forward the remote state based on the next event.
17✔
547
        transition, err := targetPeerState.ProcessEvent(
17✔
548
                event, env,
17✔
549
        )
17✔
550
        if err != nil {
21✔
551
                return nil, err
4✔
552
        }
4✔
553

554
        nextPeerState, ok := transition.NextState.(AsymmetricPeerState) //nolint:ll
13✔
555
        if !ok {
13✔
556
                return nil, fmt.Errorf("expected %T to be "+
×
557
                        "AsymmetricPeerState", transition.NextState)
×
558
        }
×
559

560
        // Make a copy of the input state, then update the peer state of the
561
        // proper party.
562
        newPeerState := *c
13✔
563
        newPeerState.PeerState.SetForParty(chanPeer, nextPeerState)
13✔
564

13✔
565
        return &CloseStateTransition{
13✔
566
                NextState: &newPeerState,
13✔
567
                NewEvents: transition.NewEvents,
13✔
568
        }, nil
13✔
569
}
570

571
// ProcessEvent drives forward the composite states for the local and remote
572
// party in response to new events. From this state, we'll continue to drive
573
// forward the local and remote states until we arrive at the StateFin stage,
574
// or we loop back up to the ShutdownPending state.
575
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
576
) (*CloseStateTransition, error) {
19✔
577

19✔
578
        // There're two classes of events that can break us out of this state:
19✔
579
        // we receive a confirmation event, or we receive a signal to restart
19✔
580
        // the co-op close process.
19✔
581
        switch msg := event.(type) {
19✔
582
        // If we get a confirmation, then the spend request we issued when we
583
        // were leaving the ChannelFlushing state has been confirmed.  We'll
584
        // now transition to the StateFin state.
585
        case *SpendEvent:
×
586
                return &CloseStateTransition{
×
587
                        NextState: &CloseFin{
×
588
                                ConfirmedTx: msg.Tx,
×
589
                        },
×
590
                }, nil
×
591

592
        // Otherwise, if we receive a shutdown, or receive an event to send a
593
        // shutdown, then we'll go back up to the ChannelActive state, and have
594
        // it handle this event by emitting an internal event.
595
        //
596
        // TODO(roasbeef): both will have fee rate specified, so ok?
597
        case *ShutdownReceived, *SendShutdown:
2✔
598
                chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+
2✔
599
                        "restarting negotiation", env.ChanPoint)
2✔
600

2✔
601
                return &CloseStateTransition{
2✔
602
                        NextState: &ChannelActive{},
2✔
603
                        NewEvents: fn.Some(RbfEvent{
2✔
604
                                InternalEvent: []ProtocolEvent{event},
2✔
605
                        }),
2✔
606
                }, nil
2✔
607
        }
608

609
        // If we get to this point, then we have an event that'll drive forward
610
        // the negotiation process.  Based on the event, we'll figure out which
611
        // state we'll be modifying.
612
        switch {
17✔
613
        case c.PeerState.GetForParty(lntypes.Local).ShouldRouteTo(event):
12✔
614
                chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+
12✔
615
                        "chan state", env.ChanPoint, event)
12✔
616

12✔
617
                // Drive forward the local state based on the next event.
12✔
618
                return processNegotiateEvent(c, event, env, lntypes.Local)
12✔
619

620
        case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event):
5✔
621
                chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+
5✔
622
                        "chan state", env.ChanPoint, event)
5✔
623

5✔
624
                // Drive forward the remote state based on the next event.
5✔
625
                return processNegotiateEvent(c, event, env, lntypes.Remote)
5✔
626
        }
627

628
        return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation",
×
629
                ErrInvalidStateTransition, event)
×
630
}
631

632
// newSigTlv is a helper function that returns a new optional TLV sig field for
633
// the parametrized tlv.TlvType value.
634
func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] {
18✔
635
        return tlv.SomeRecordT(tlv.NewRecordT[T](s))
18✔
636
}
18✔
637

638
// ProcessEvent implements the event processing to kick off the process of
639
// obtaining a new (possibly RBF'd) signature for our commitment transaction.
640
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
641
) (*CloseStateTransition, error) {
8✔
642

8✔
643
        switch msg := event.(type) { //nolint:gocritic
8✔
644
        // If we receive a SendOfferEvent, then we'll use the specified fee
645
        // rate to generate for the closing transaction with our ideal fee
646
        // rate.
647
        case *SendOfferEvent:
8✔
648
                // First, we'll figure out the absolute fee rate we should pay
8✔
649
                // given the state of the local/remote outputs.
8✔
650
                localTxOut, remoteTxOut := l.DeriveCloseTxOuts()
8✔
651
                absoluteFee := env.FeeEstimator.EstimateFee(
8✔
652
                        env.ChanType, localTxOut, remoteTxOut,
8✔
653
                        msg.TargetFeeRate.FeePerKWeight(),
8✔
654
                )
8✔
655

8✔
656
                // Now that we know what fee we want to pay, we'll create a new
8✔
657
                // signature over our co-op close transaction. For our
8✔
658
                // proposals, we'll just always use the known RBF sequence
8✔
659
                // value.
8✔
660
                localScript := l.LocalDeliveryScript
8✔
661
                rawSig, closeTx, closeBalance, err := env.CloseSigner.CreateCloseProposal( //nolint:ll
8✔
662
                        absoluteFee, localScript, l.RemoteDeliveryScript,
8✔
663
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
8✔
664
                        lnwallet.WithCustomPayer(lntypes.Local),
8✔
665
                )
8✔
666
                if err != nil {
8✔
667
                        return nil, err
×
668
                }
×
669
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
8✔
670
                if err != nil {
8✔
671
                        return nil, err
×
672
                }
×
673

674
                chancloserLog.Infof("closing w/ local_addr=%x, "+
8✔
675
                        "remote_addr=%x, fee=%v", localScript[:],
8✔
676
                        l.RemoteDeliveryScript[:], absoluteFee)
8✔
677

8✔
678
                chancloserLog.Infof("proposing closing_tx=%v",
8✔
679
                        spew.Sdump(closeTx))
8✔
680

8✔
681
                // Now that we have our signature, we'll set the proper
8✔
682
                // closingSigs field based on if the remote party's output is
8✔
683
                // dust or not.
8✔
684
                var closingSigs lnwire.ClosingSigs
8✔
685
                switch {
8✔
686
                // If the remote party's output is dust, then we'll set the
687
                // CloserNoClosee field.
688
                case remoteTxOut == nil:
1✔
689
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
1✔
690
                                wireSig,
1✔
691
                        )
1✔
692

693
                // If after paying for fees, our balance is below dust, then
694
                // we'll set the NoCloserClosee field.
695
                case closeBalance < lnwallet.DustLimitForSize(len(localScript)):
1✔
696
                        closingSigs.NoCloserClosee = newSigTlv[tlv.TlvType2](
1✔
697
                                wireSig,
1✔
698
                        )
1✔
699

700
                // Otherwise, we'll set the CloserAndClosee field.
701
                //
702
                // TODO(roasbeef): should actually set both??
703
                default:
6✔
704
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
6✔
705
                                wireSig,
6✔
706
                        )
6✔
707
                }
708

709
                // Now that we have our sig, we'll emit a daemon event to send
710
                // it to the remote party, then transition to the
711
                // LocalOfferSent state.
712
                //
713
                // TODO(roasbeef): type alias for protocol event
714
                sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{ //nolint:ll
8✔
715
                        TargetPeer: env.ChanPeer,
8✔
716
                        // TODO(roasbeef): mew new func
8✔
717
                        Msgs: []lnwire.Message{&lnwire.ClosingComplete{
8✔
718
                                ChannelID:   env.ChanID,
8✔
719
                                FeeSatoshis: absoluteFee,
8✔
720
                                LockTime:    env.BlockHeight,
8✔
721
                                ClosingSigs: closingSigs,
8✔
722
                        }},
8✔
723
                }}
8✔
724

8✔
725
                chancloserLog.Infof("ChannelPoint(%v): sending closing sig "+
8✔
726
                        "to remote party, fee_sats=%v", env.ChanPoint,
8✔
727
                        absoluteFee)
8✔
728

8✔
729
                return &CloseStateTransition{
8✔
730
                        NextState: &LocalOfferSent{
8✔
731
                                ProposedFee:       absoluteFee,
8✔
732
                                LocalSig:          wireSig,
8✔
733
                                CloseChannelTerms: l.CloseChannelTerms,
8✔
734
                        },
8✔
735
                        NewEvents: fn.Some(RbfEvent{
8✔
736
                                ExternalEvents: sendEvent,
8✔
737
                        }),
8✔
738
                }, nil
8✔
739
        }
740

741
        return nil, fmt.Errorf("%w: received %T while in LocalCloseStart",
×
742
                ErrInvalidStateTransition, event)
×
743
}
744

745
// extractSig extracts the expected signature from the closing sig message.
746
// Only one of them should actually be populated as the closing sig message is
747
// sent in response to a ClosingComplete message, it should only sign the same
748
// version of the co-op close tx as the sender did.
749
func extractSig(msg lnwire.ClosingSig) fn.Result[lnwire.Sig] {
4✔
750
        // First, we'll validate that only one signature is included in their
4✔
751
        // response to our initial offer. If not, then we'll exit here, and
4✔
752
        // trigger a recycle of the connection.
4✔
753
        sigInts := []bool{
4✔
754
                msg.CloserNoClosee.IsSome(), msg.NoCloserClosee.IsSome(),
4✔
755
                msg.CloserAndClosee.IsSome(),
4✔
756
        }
4✔
757
        numSigs := fn.Foldl(0, sigInts, func(acc int, sigInt bool) int {
16✔
758
                if sigInt {
17✔
759
                        return acc + 1
5✔
760
                }
5✔
761

762
                return acc
7✔
763
        })
764
        if numSigs != 1 {
5✔
765
                return fn.Errf[lnwire.Sig]("%w: only one sig should be set, "+
1✔
766
                        "got %v", ErrTooManySigs, numSigs)
1✔
767
        }
1✔
768

769
        // The final sig is the one that's actually set.
770
        sig := msg.CloserAndClosee.ValOpt().Alt(
3✔
771
                msg.NoCloserClosee.ValOpt(),
3✔
772
        ).Alt(
3✔
773
                msg.CloserNoClosee.ValOpt(),
3✔
774
        )
3✔
775

3✔
776
        return fn.NewResult(sig.UnwrapOrErr(ErrNoSig))
3✔
777
}
778

779
// ProcessEvent implements the state transition function for the
780
// LocalOfferSent state. In this state, we'll wait for the remote party to
781
// send a close_signed message which gives us the ability to broadcast a new
782
// co-op close transaction.
783
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
784
) (*CloseStateTransition, error) {
4✔
785

4✔
786
        switch msg := event.(type) { //nolint:gocritic
4✔
787
        // If we receive a LocalSigReceived event, then we'll attempt to
788
        // validate the signature from the remote party. If valid, then we can
789
        // broadcast the transaction, and transition to the ClosePending state.
790
        case *LocalSigReceived:
4✔
791
                // Extract and validate that only one sig field is set.
4✔
792
                sig, err := extractSig(msg.SigMsg).Unpack()
4✔
793
                if err != nil {
5✔
794
                        return nil, err
1✔
795
                }
1✔
796

797
                remoteSig, err := sig.ToSignature()
3✔
798
                if err != nil {
3✔
799
                        return nil, err
×
800
                }
×
801
                localSig, err := l.LocalSig.ToSignature()
3✔
802
                if err != nil {
3✔
803
                        return nil, err
×
804
                }
×
805

806
                // Now that we have their signature, we'll attempt to validate
807
                // it, then extract a valid closing signature from it.
808
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
3✔
809
                        localSig, remoteSig, l.LocalDeliveryScript,
3✔
810
                        l.RemoteDeliveryScript, l.ProposedFee,
3✔
811
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
3✔
812
                        lnwallet.WithCustomPayer(lntypes.Local),
3✔
813
                )
3✔
814
                if err != nil {
3✔
815
                        return nil, err
×
816
                }
×
817

818
                // As we're about to broadcast a new version of the co-op close
819
                // transaction, we'll mark again as broadcast, but with this
820
                // variant of the co-op close tx.
821
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, true)
3✔
822
                if err != nil {
3✔
823
                        return nil, err
×
824
                }
×
825

826
                broadcastEvent := protofsm.DaemonEventSet{&protofsm.BroadcastTxn{ //nolint:ll
3✔
827
                        Tx: closeTx,
3✔
828
                        Label: labels.MakeLabel(
3✔
829
                                labels.LabelTypeChannelClose, &env.Scid,
3✔
830
                        ),
3✔
831
                }}
3✔
832

3✔
833
                chancloserLog.Infof("ChannelPoint(%v): received sig from "+
3✔
834
                        "remote party, broadcasting: tx=%v", env.ChanPoint,
3✔
835
                        lnutils.SpewLogClosure(closeTx),
3✔
836
                )
3✔
837

3✔
838
                return &CloseStateTransition{
3✔
839
                        NextState: &ClosePending{
3✔
840
                                CloseTx: closeTx,
3✔
841
                        },
3✔
842
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
3✔
843
                                ExternalEvents: broadcastEvent,
3✔
844
                        }),
3✔
845
                }, nil
3✔
846
        }
847

848
        return nil, fmt.Errorf("%w: received %T while in LocalOfferSent",
×
849
                ErrInvalidStateTransition, event)
×
850
}
851

852
// ProcessEvent implements the state transition function for the
853
// RemoteCloseStart. In this state, we'll wait for the remote party to send a
854
// closing_complete message. Assuming they can pay for the fees, we'll sign it
855
// ourselves, then transition to the next state of ClosePending.
856
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
857
) (*CloseStateTransition, error) {
5✔
858

5✔
859
        switch msg := event.(type) { //nolint:gocritic
5✔
860
        // If we receive a OfferReceived event, we'll make sure they can
861
        // actually pay for the fee. If so, then we'll counter sign and
862
        // transition to a terminal state.
863
        case *OfferReceivedEvent:
5✔
864
                // To start, we'll perform some basic validation of the sig
5✔
865
                // message they've sent. We'll validate that the remote party
5✔
866
                // actually has enough fees to pay the closing fees.
5✔
867
                if !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis) {
6✔
868
                        return nil, fmt.Errorf("%w: %v vs %v",
1✔
869
                                ErrRemoteCannotPay,
1✔
870
                                msg.SigMsg.FeeSatoshis,
1✔
871
                                l.RemoteBalance.ToSatoshis())
1✔
872
                }
1✔
873

874
                // With the basic sanity checks out of the way, we'll now
875
                // figure out which signature that we'll attempt to sign
876
                // against.
877
                var (
4✔
878
                        remoteSig input.Signature
4✔
879
                        noClosee  bool
4✔
880
                )
4✔
881
                switch {
4✔
882
                // If our balance is dust, then we expect the CloserNoClosee
883
                // sig to be set.
884
                case l.LocalAmtIsDust():
1✔
885
                        if msg.SigMsg.CloserNoClosee.IsNone() {
2✔
886
                                return nil, ErrCloserNoClosee
1✔
887
                        }
1✔
888
                        msg.SigMsg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) {
×
889
                                remoteSig, _ = s.ToSignature()
×
890
                                noClosee = true
×
891
                        })
×
892

893
                // Otherwise, we'll assume that CloseAndClosee is set.
894
                //
895
                // TODO(roasbeef): NoCloserClosee, but makes no sense?
896
                default:
3✔
897
                        if msg.SigMsg.CloserAndClosee.IsNone() {
4✔
898
                                return nil, ErrCloserAndClosee
1✔
899
                        }
1✔
900
                        msg.SigMsg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) { //nolint:ll
4✔
901
                                remoteSig, _ = s.ToSignature()
2✔
902
                        })
2✔
903
                }
904

905
                chanOpts := []lnwallet.ChanCloseOpt{
2✔
906
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
2✔
907
                        lnwallet.WithCustomLockTime(msg.SigMsg.LockTime),
2✔
908
                        lnwallet.WithCustomPayer(lntypes.Remote),
2✔
909
                }
2✔
910

2✔
911
                chancloserLog.Infof("responding to close w/ local_addr=%x, "+
2✔
912
                        "remote_addr=%x, fee=%v",
2✔
913
                        l.LocalDeliveryScript[:], l.RemoteDeliveryScript[:],
2✔
914
                        msg.SigMsg.FeeSatoshis)
2✔
915

2✔
916
                // Now that we have the remote sig, we'll sign the version they
2✔
917
                // signed, then attempt to complete the cooperative close
2✔
918
                // process.
2✔
919
                //
2✔
920
                // TODO(roasbeef): need to be able to omit an output when
2✔
921
                // signing based on the above, as closing opt
2✔
922
                rawSig, _, _, err := env.CloseSigner.CreateCloseProposal(
2✔
923
                        msg.SigMsg.FeeSatoshis, l.LocalDeliveryScript,
2✔
924
                        l.RemoteDeliveryScript, chanOpts...,
2✔
925
                )
2✔
926
                if err != nil {
2✔
927
                        return nil, err
×
928
                }
×
929
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
2✔
930
                if err != nil {
2✔
931
                        return nil, err
×
932
                }
×
933

934
                localSig, err := wireSig.ToSignature()
2✔
935
                if err != nil {
2✔
936
                        return nil, err
×
937
                }
×
938

939
                // With our signature created, we'll now attempt to finalize the
940
                // close process.
941
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
2✔
942
                        localSig, remoteSig, l.LocalDeliveryScript,
2✔
943
                        l.RemoteDeliveryScript, msg.SigMsg.FeeSatoshis,
2✔
944
                        chanOpts...,
2✔
945
                )
2✔
946
                if err != nil {
2✔
947
                        return nil, err
×
948
                }
×
949

950
                chancloserLog.Infof("ChannelPoint(%v): received sig (fee=%v "+
2✔
951
                        "sats) from remote party, signing new tx=%v",
2✔
952
                        env.ChanPoint, msg.SigMsg.FeeSatoshis,
2✔
953
                        lnutils.SpewLogClosure(closeTx),
2✔
954
                )
2✔
955

2✔
956
                var closingSigs lnwire.ClosingSigs
2✔
957
                if noClosee {
2✔
958
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
959
                                wireSig,
×
960
                        )
×
961
                } else {
2✔
962
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
2✔
963
                                wireSig,
2✔
964
                        )
2✔
965
                }
2✔
966

967
                // As we're about to broadcast a new version of the co-op close
968
                // transaction, we'll mark again as broadcast, but with this
969
                // variant of the co-op close tx.
970
                //
971
                // TODO(roasbeef): db will only store one instance, store both?
972
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, false)
2✔
973
                if err != nil {
2✔
974
                        return nil, err
×
975
                }
×
976

977
                // As we transition, we'll omit two events: one to broadcast
978
                // the transaction, and the other to send our ClosingSig
979
                // message to the remote party.
980
                sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{
2✔
981
                        TargetPeer: env.ChanPeer,
2✔
982
                        Msgs: []lnwire.Message{&lnwire.ClosingSig{
2✔
983
                                ChannelID:   env.ChanID,
2✔
984
                                ClosingSigs: closingSigs,
2✔
985
                        }},
2✔
986
                }
2✔
987
                broadcastEvent := &protofsm.BroadcastTxn{
2✔
988
                        Tx: closeTx,
2✔
989
                        Label: labels.MakeLabel(
2✔
990
                                labels.LabelTypeChannelClose, &env.Scid,
2✔
991
                        ),
2✔
992
                }
2✔
993
                daemonEvents := protofsm.DaemonEventSet{
2✔
994
                        sendEvent, broadcastEvent,
2✔
995
                }
2✔
996

2✔
997
                // Now that we've extracted the signature, we'll transition to
2✔
998
                // the next state where we'll sign+broadcast the sig.
2✔
999
                return &CloseStateTransition{
2✔
1000
                        NextState: &ClosePending{
2✔
1001
                                CloseTx: closeTx,
2✔
1002
                        },
2✔
1003
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
2✔
1004
                                ExternalEvents: daemonEvents,
2✔
1005
                        }),
2✔
1006
                }, nil
2✔
1007
        }
1008

1009
        return nil, fmt.Errorf("%w: received %T while in RemoteCloseStart",
×
1010
                ErrInvalidStateTransition, event)
×
1011
}
1012

1013
// ProcessEvent is a semi-terminal state in the rbf-coop close state machine.
1014
// In this state, we're waiting for either a confirmation, or for either side
1015
// to attempt to create a new RBF'd co-op close transaction.
1016
func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
1017
) (*CloseStateTransition, error) {
×
1018

×
1019
        switch msg := event.(type) {
×
1020
        // If we can a spend while waiting for the close, then we'll go to our
1021
        // terminal state.
1022
        case *SpendEvent:
×
1023
                return &CloseStateTransition{
×
1024
                        NextState: &CloseFin{
×
1025
                                ConfirmedTx: msg.Tx,
×
1026
                        },
×
1027
                }, nil
×
1028

1029
        default:
×
1030

×
1031
                return &CloseStateTransition{
×
1032
                        NextState: c,
×
1033
                }, nil
×
1034
        }
1035
}
1036

1037
// ProcessEvent is the event processing for out terminal state. In this state,
1038
// we just keep looping back on ourselves.
1039
func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
1040
) (*CloseStateTransition, error) {
×
1041

×
1042
        return &CloseStateTransition{
×
1043
                NextState: c,
×
1044
        }, nil
×
1045
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc