• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 11965896165

22 Nov 2024 03:26AM UTC coverage: 49.579% (-9.4%) from 58.98%
11965896165

Pull #8512

github

Roasbeef
lnwallet/chancloser: add unit tests for new rbf coop close
Pull Request #8512: [3/4] - lnwallet/chancloser: add new protofsm based RBF chan closer

30 of 1081 new or added lines in 8 files covered. (2.78%)

25859 existing lines in 424 files now uncovered.

99910 of 201515 relevant lines covered (49.58%)

2.06 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lnwallet/chancloser/rbf_coop_transitions.go
1
package chancloser
2

3
import (
4
        "fmt"
5

6
        "github.com/btcsuite/btcd/btcec/v2"
7
        "github.com/btcsuite/btcd/chaincfg"
8
        "github.com/btcsuite/btcd/mempool"
9
        "github.com/btcsuite/btcd/wire"
10
        "github.com/lightningnetwork/lnd/fn"
11
        "github.com/lightningnetwork/lnd/input"
12
        "github.com/lightningnetwork/lnd/labels"
13
        "github.com/lightningnetwork/lnd/lnutils"
14
        "github.com/lightningnetwork/lnd/lnwallet"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/protofsm"
17
        "github.com/lightningnetwork/lnd/tlv"
18
)
19

20
// sendShutdownEvents is a helper function that returns a set of daemon events
21
// we need to emit when we decide that we should send a shutdown message. We'll
22
// also mark the channel as borked as well, as at this point, we no longer want
23
// to continue with normal operation.
24
func sendShutdownEvents(chanID lnwire.ChannelID, chanPoint wire.OutPoint,
25
        deliveryAddr lnwire.DeliveryAddress, peerPub btcec.PublicKey,
26
        postSendEvent fn.Option[ProtocolEvent],
NEW
27
        chanState ChanStateObserver) (protofsm.DaemonEventSet, error) {
×
NEW
28

×
NEW
29
        // We'll emit a daemon event that instructs the daemon to send out a
×
NEW
30
        // new shutdown message to the remote peer.
×
NEW
31
        msgsToSend := &protofsm.SendMsgEvent[ProtocolEvent]{
×
NEW
32
                TargetPeer: peerPub,
×
NEW
33
                Msgs: []lnwire.Message{&lnwire.Shutdown{
×
NEW
34
                        ChannelID: chanID,
×
NEW
35
                        Address:   deliveryAddr,
×
NEW
36
                }},
×
NEW
37
                SendWhen: fn.Some(func() bool {
×
NEW
38
                        ok := chanState.NoDanglingUpdates()
×
NEW
39
                        if ok {
×
NEW
40
                                chancloserLog.Infof("ChannelPoint(%v): no "+
×
NEW
41
                                        "dangling updates sending shutdown "+
×
NEW
42
                                        "message", chanPoint)
×
NEW
43
                        }
×
NEW
44
                        return ok
×
45
                }),
46
                PostSendEvent: postSendEvent,
47
        }
48

49
        // If a close is already in process (we're in the RBF loop), then we
50
        // can skip everything below, and just send out the shutdown message.
NEW
51
        if chanState.FinalBalances().IsSome() {
×
NEW
52
                return protofsm.DaemonEventSet{msgsToSend}, nil
×
NEW
53
        }
×
54

55
        // Before closing, we'll attempt to send a disable update for the
56
        // channel.  We do so before closing the channel as otherwise the
57
        // current edge policy won't be retrievable from the graph.
NEW
58
        if err := chanState.DisableChannel(); err != nil {
×
NEW
59
                return nil, fmt.Errorf("unable to disable channel: %w", err)
×
NEW
60
        }
×
61

62
        // If we have a post-send event, then this means that we're the
63
        // responder. We'll use this fact below to update state in the DB.
NEW
64
        isInitiator := postSendEvent.IsNone()
×
NEW
65

×
NEW
66
        chancloserLog.Infof("ChannelPoint(%v): disabling outgoing adds",
×
NEW
67
                chanPoint)
×
NEW
68

×
NEW
69
        // As we're about to send a shutdown, we'll disable adds in the
×
NEW
70
        // outgoing direction.
×
NEW
71
        if err := chanState.DisableOutgoingAdds(); err != nil {
×
NEW
72
                return nil, fmt.Errorf("unable to disable outgoing "+
×
NEW
73
                        "adds: %w", err)
×
NEW
74
        }
×
75

76
        // To be able to survive a restart, we'll also write to disk
77
        // information about the shutdown we're about to send out.
NEW
78
        err := chanState.MarkShutdownSent(deliveryAddr, isInitiator)
×
NEW
79
        if err != nil {
×
NEW
80
                return nil, fmt.Errorf("unable to mark shutdown sent: %w", err)
×
NEW
81
        }
×
82

NEW
83
        chancloserLog.Debugf("ChannelPoint(%v): marking channel as borked",
×
NEW
84
                chanPoint)
×
NEW
85

×
NEW
86
        return protofsm.DaemonEventSet{msgsToSend}, nil
×
87
}
88

89
// validateShutdown is a helper function that validates that the shutdown has a
90
// proper delivery script, and can be sent based on the current thaw height of
91
// the channel.
92
func validateShutdown(chanThawHeight fn.Option[uint32],
93
        upfrontAddr fn.Option[lnwire.DeliveryAddress],
94
        msg *ShutdownReceived, chanPoint wire.OutPoint,
NEW
95
        chainParams chaincfg.Params) error {
×
NEW
96

×
NEW
97
        // If we've received a shutdown message, and we have a thaw height,
×
NEW
98
        // then we need to make sure that the channel can now be co-op closed.
×
NEW
99
        err := fn.MapOption(func(thawHeight uint32) error {
×
NEW
100
                // If the current height is below the thaw height, then we'll
×
NEW
101
                // reject the shutdown message as we can't yet co-op close the
×
NEW
102
                // channel.
×
NEW
103
                if msg.BlockHeight < thawHeight {
×
NEW
104
                        return fmt.Errorf("initiator attempting to "+
×
NEW
105
                                "co-op close frozen ChannelPoint(%v) "+
×
NEW
106
                                "(current_height=%v, thaw_height=%v)",
×
NEW
107
                                chanPoint, msg.BlockHeight,
×
NEW
108
                                thawHeight)
×
NEW
109
                }
×
110

NEW
111
                return nil
×
112
        })(chanThawHeight).UnwrapOr(nil)
NEW
113
        if err != nil {
×
NEW
114
                return err
×
NEW
115
        }
×
116

117
        // Next, we'll verify that the remote party is sending the expected
118
        // shutdown script.
NEW
119
        return fn.MapOption(func(addr lnwire.DeliveryAddress) error {
×
NEW
120
                return validateShutdownScript(
×
NEW
121
                        addr, msg.ShutdownScript, &chainParams,
×
NEW
122
                )
×
NEW
123
        })(upfrontAddr).UnwrapOr(nil)
×
124
}
125

126
// ProcessEvent takes a protocol event, and implements a state transition for
127
// the state. From this state, we can receive two possible incoming events:
128
// SendShutdown and ShutdownReceived. Both of these will transition us to the
129
// ChannelFlushing state.
130
func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
131
) (*CloseStateTransition, error) {
×
NEW
132

×
NEW
133
        switch msg := event.(type) {
×
134

135
        // If we get a confirmation, then a prior transaction we broadcasted
136
        // has confirmed, so we can move to our terminal state early.
NEW
137
        case *SpendEvent:
×
NEW
138
                return &CloseStateTransition{
×
NEW
139
                        NextState: &CloseFin{
×
NEW
140
                                transitionEvent: msg,
×
NEW
141
                                ConfirmedTx:     msg.Tx,
×
NEW
142
                        },
×
NEW
143
                }, nil
×
144

145
        // If we receive the SendShutdown event, then we'll send our shutdown
146
        // with a special SendPredicate, then go to the ShutdownPending where
147
        // we'll wait for the remote to send their shutdown.
NEW
148
        case *SendShutdown:
×
NEW
149
                // If we have an upfront shutdown addr or a delivery addr then
×
NEW
150
                // we'll use that. Otherwise, we'll generate a new delivery
×
NEW
151
                // addr.
×
NEW
152
                shutdownScript, err := env.LocalUpfrontShutdown.Alt(
×
NEW
153
                        msg.DeliveryAddr,
×
NEW
154
                ).UnwrapOrFuncErr(env.NewDeliveryScript)
×
NEW
155
                if err != nil {
×
NEW
156
                        return nil, err
×
NEW
157
                }
×
158

159
                // We'll emit some daemon events to send the shutdown message
160
                // and disable the channel on the network level. In this case,
161
                // we don't need a post send event as receive their shutdown is
162
                // what'll move us beyond the ShutdownPending state.
NEW
163
                daemonEvents, err := sendShutdownEvents(
×
NEW
164
                        env.ChanID, env.ChanPoint, shutdownScript,
×
NEW
165
                        env.ChanPeer, fn.None[ProtocolEvent](),
×
NEW
166
                        env.ChanObserver,
×
NEW
167
                )
×
NEW
168
                if err != nil {
×
NEW
169
                        return nil, err
×
NEW
170
                }
×
171

172
                // We'll also record that we arrived at the ShutdownPending
173
                // state via a SendShutdown event, which means this was a
174
                // locally initiated shutdown.
NEW
175
                shutdownTransition := fn.NewLeft[
×
NEW
176
                        SendShutdown, ShutdownReceived,
×
NEW
177
                ](*msg)
×
NEW
178

×
NEW
179
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+
×
NEW
180
                        "delivery_script=%v", env.ChanPoint, shutdownScript)
×
NEW
181

×
NEW
182
                // From here, we'll transition to the closing flushing state.
×
NEW
183
                // In this state we await their shutdown message (self loop),
×
NEW
184
                // then also the flushing event.
×
NEW
185
                return &CloseStateTransition{
×
NEW
186
                        NextState: &ShutdownPending{
×
NEW
187
                                prevState:    c,
×
NEW
188
                                inputEvents:  shutdownTransition,
×
NEW
189
                                IdealFeeRate: fn.Some(msg.IdealFeeRate),
×
NEW
190
                                ShutdownScripts: ShutdownScripts{
×
NEW
191
                                        LocalDeliveryScript: shutdownScript,
×
NEW
192
                                },
×
NEW
193
                        },
×
NEW
194
                        // TODO(roasbeef): type alias
×
NEW
195
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
196
                                ExternalEvents: fn.Some(daemonEvents),
×
NEW
197
                        }),
×
NEW
198
                }, nil
×
199

200
        // When we receive a shutdown from the remote party, we'll validate the
201
        // shutdown message, then transition to the ChannelFlushing state.
202
        // We'll also emit similar events like the above to send out shutdown,
203
        // and also disable the channel.
NEW
204
        case *ShutdownReceived:
×
NEW
205
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg")
×
NEW
206

×
NEW
207
                // Validate that they can send the message now, and also that
×
NEW
208
                // they haven't violated their commitment to a prior upfront
×
NEW
209
                // shutdown addr.
×
NEW
210
                err := validateShutdown(
×
NEW
211
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
NEW
212
                        env.ChanPoint, env.ChainParams,
×
NEW
213
                )
×
NEW
214
                if err != nil {
×
NEW
215
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
NEW
216
                                "shutdown attempt: %v", err)
×
NEW
217

×
NEW
218
                        // TODO(roasbeef): emit disconnect event?
×
NEW
219
                        return nil, err
×
NEW
220
                }
×
221

222
                // If we have an upfront shutdown addr we'll use that,
223
                // otherwise, we'll generate a new delivery script.
NEW
224
                shutdownAddr, err := env.LocalUpfrontShutdown.UnwrapOrFuncErr(
×
NEW
225
                        env.NewDeliveryScript,
×
NEW
226
                )
×
NEW
227
                if err != nil {
×
NEW
228
                        return nil, err
×
NEW
229
                }
×
230

NEW
231
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg "+
×
NEW
232
                        "at next clean commit state", env.ChanPoint)
×
NEW
233

×
NEW
234
                // Now that we know the shutdown message is valid, we'll obtain
×
NEW
235
                // the set of daemon events we need to emit. We'll also specify
×
NEW
236
                // that once the message has actually been sent, that we
×
NEW
237
                // generate receive an input event of a ShutdownComplete.
×
NEW
238
                daemonEvents, err := sendShutdownEvents(
×
NEW
239
                        env.ChanID, env.ChanPoint, shutdownAddr,
×
NEW
240
                        env.ChanPeer,
×
NEW
241
                        fn.Some[ProtocolEvent](&ShutdownComplete{}),
×
NEW
242
                        env.ChanObserver,
×
NEW
243
                )
×
NEW
244
                if err != nil {
×
NEW
245
                        return nil, err
×
NEW
246
                }
×
247

NEW
248
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
NEW
249
                        env.ChanPoint)
×
NEW
250

×
NEW
251
                // We just received a shutdown, so we'll disable the adds in
×
NEW
252
                // the outgoing direction.
×
NEW
253
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
NEW
254
                        return nil, fmt.Errorf("unable to disable incoming "+
×
NEW
255
                                "adds: %w", err)
×
NEW
256
                }
×
257

258
                // We'll also record that we arrived at the ChannelFlushing
259
                // state via a ShutdownReceived event, which means this was a
260
                // locally initiated shutdown.
NEW
261
                shutdownTransition := fn.NewRight[SendShutdown](*msg)
×
NEW
262

×
NEW
263
                return &CloseStateTransition{
×
NEW
264
                        NextState: &ShutdownPending{
×
NEW
265
                                prevState:   c,
×
NEW
266
                                inputEvents: shutdownTransition,
×
NEW
267
                                ShutdownScripts: ShutdownScripts{
×
NEW
268
                                        LocalDeliveryScript:  shutdownAddr,
×
NEW
269
                                        RemoteDeliveryScript: msg.ShutdownScript,
×
NEW
270
                                },
×
NEW
271
                        },
×
NEW
272
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
273
                                ExternalEvents: fn.Some(daemonEvents),
×
NEW
274
                        }),
×
NEW
275
                }, nil
×
276

277
        // Any other messages in this state will result in an error, as this is
278
        // an undefined state transition.
NEW
279
        default:
×
NEW
280
                return nil, fmt.Errorf("%w: received %T while in ChannelActive",
×
NEW
281
                        ErrInvalidStateTransition, msg)
×
282
        }
283
}
284

285
// ProcessEvent takes a protocol event, and implements a state transition for
286
// the state. Our path to this state will determine the set of valid events. If
287
// we were the one that sent the shutdown, then we'll just wait on the
288
// ShutdownReceived event. Otherwise, we received the shutdown, and can move
289
// forward once we recieve the ShutdownComplete event. Receiving
290
// ShutdownComplete means that we've sent our shutdown, as this was specified
291
// as a post send event.
292
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
293
) (*CloseStateTransition, error) {
×
NEW
294

×
NEW
295
        switch msg := event.(type) {
×
296

297
        // If we get a confirmation, then a prior transaction we broadcasted
298
        // has confirmed, so we can move to our terminal state early.
NEW
299
        case *SpendEvent:
×
NEW
300
                // TODO(roasbeef): any other clean up events needed?
×
NEW
301
                return &CloseStateTransition{
×
NEW
302
                        NextState: &CloseFin{
×
NEW
303
                                transitionEvent: msg,
×
NEW
304
                                ConfirmedTx:     msg.Tx,
×
NEW
305
                        },
×
NEW
306
                }, nil
×
307

308
        // When we receive a shutdown from the remote party, we'll validate the
309
        // shutdown message, then transition to the ChannelFlushing state.
NEW
310
        case *ShutdownReceived:
×
NEW
311
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg",
×
NEW
312
                        env.ChanPoint)
×
NEW
313

×
NEW
314
                // Validate that they can send the message now, and also that
×
NEW
315
                // they haven't violated their commitment to a prior upfront
×
NEW
316
                // shutdown addr.
×
NEW
317
                err := validateShutdown(
×
NEW
318
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
NEW
319
                        env.ChanPoint, env.ChainParams,
×
NEW
320
                )
×
NEW
321
                if err != nil {
×
NEW
322
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
NEW
323
                                "shutdown attempt: %v", err)
×
NEW
324

×
NEW
325
                        return nil, err
×
NEW
326
                }
×
327

328
                // We'll also record that we arrived at the ChannelFlushing
329
                // state via a ShutdownReceived event, which means this was a
330
                // locally initiated shutdown.
NEW
331
                shutdownTransition := fn.NewRight[ShutdownComplete](*msg)
×
NEW
332

×
NEW
333
                // If the channel is *already* flushed, and the close is
×
NEW
334
                // already in progress, then we can skip the flushing state and
×
NEW
335
                // go straight into negotiation, as this is the RBF loop.
×
NEW
336
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
NEW
337
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
NEW
338
                        unknownBalance,
×
NEW
339
                )
×
NEW
340
                if finalBalances != unknownBalance {
×
NEW
341
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
NEW
342
                                ShutdownBalances: finalBalances,
×
NEW
343
                        })
×
NEW
344
                        eventsToEmit = fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
345
                                InternalEvent: fn.Some([]ProtocolEvent{channelFlushed}),
×
NEW
346
                        })
×
NEW
347
                }
×
348

NEW
349
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
NEW
350
                        env.ChanPoint)
×
NEW
351

×
NEW
352
                // We just received a shutdown, so we'll disable the adds in
×
NEW
353
                // the outgoing direction.
×
NEW
354
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
NEW
355
                        return nil, fmt.Errorf("unable to disable incoming "+
×
NEW
356
                                "adds: %w", err)
×
NEW
357
                }
×
358

NEW
359
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
NEW
360
                        "be flushed...", env.ChanPoint)
×
NEW
361

×
NEW
362
                // We transition to the ChannelFlushing state, where we await
×
NEW
363
                // the ChannelFlushed event.
×
NEW
364
                return &CloseStateTransition{
×
NEW
365
                        NextState: &ChannelFlushing{
×
NEW
366
                                inputEvents:  shutdownTransition,
×
NEW
367
                                prevState:    s,
×
NEW
368
                                IdealFeeRate: s.IdealFeeRate,
×
NEW
369
                                ShutdownScripts: ShutdownScripts{
×
NEW
370
                                        LocalDeliveryScript:  s.ShutdownScripts.LocalDeliveryScript, //nolint:lll
×
NEW
371
                                        RemoteDeliveryScript: msg.ShutdownScript,
×
NEW
372
                                },
×
NEW
373
                        },
×
NEW
374
                        NewEvents: eventsToEmit,
×
NEW
375
                }, nil
×
376

377
        // If we get this message, then this means that we were finally able to
378
        // send out shutdown after receiving it from the remote party. We'll
379
        // now transition directly to the ChannelFlushing state.
NEW
380
        case *ShutdownComplete:
×
NEW
381
                // We'll also record that we arrived at the ChannelFlushing
×
NEW
382
                // state via a ShutdownComplete event, which means this was a
×
NEW
383
                // locally initiated shutdown.
×
NEW
384
                shutdownTransition := fn.NewLeft[
×
NEW
385
                        ShutdownComplete, ShutdownReceived,
×
NEW
386
                ](*msg)
×
NEW
387

×
NEW
388
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
NEW
389
                        "be flushed...", env.ChanPoint)
×
NEW
390

×
NEW
391
                // If the channel is *already* flushed, and the close is
×
NEW
392
                // already in progress, then we can skip the flushing state and
×
NEW
393
                // go straight into negotiation, as this is the RBF loop.
×
NEW
394
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
NEW
395
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
NEW
396
                        unknownBalance,
×
NEW
397
                )
×
NEW
398
                if finalBalances != unknownBalance {
×
NEW
399
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
NEW
400
                                ShutdownBalances: finalBalances,
×
NEW
401
                        })
×
NEW
402
                        eventsToEmit = fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
403
                                InternalEvent: fn.Some([]ProtocolEvent{
×
NEW
404
                                        channelFlushed,
×
NEW
405
                                }),
×
NEW
406
                        })
×
NEW
407
                }
×
408

409
                // From here, we'll transition to the channel flushing state.
410
                // We'll stay here until we receive the ChannelFlushed event.
NEW
411
                return &CloseStateTransition{
×
NEW
412
                        NextState: &ChannelFlushing{
×
NEW
413
                                prevState:       s,
×
NEW
414
                                inputEvents:     shutdownTransition,
×
NEW
415
                                IdealFeeRate:    s.IdealFeeRate,
×
NEW
416
                                ShutdownScripts: s.ShutdownScripts,
×
NEW
417
                        },
×
NEW
418
                        NewEvents: eventsToEmit,
×
NEW
419
                }, nil
×
420

421
        // Any other messages in this state will result in an error, as this is
422
        // an undefined state transition.
NEW
423
        default:
×
NEW
424
                return nil, fmt.Errorf("%w: received %T while in ShutdownPending",
×
NEW
425
                        ErrInvalidStateTransition, msg)
×
426
        }
427
}
428

429
// ProcessEvent takes a new protocol event, and figures out if we can
430
// transition to the next state, or just loop back upon ourself. If we receive
431
// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as
432
// we haven't yet fully cleared the channel. Otherwise, we can move to the
433
// CloseReady state which'll being the channel closing process.
434
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
435
) (*CloseStateTransition, error) {
×
NEW
436

×
NEW
437
        switch msg := event.(type) {
×
438

439
        // If we get a confirmation, then a prior transaction we broadcasted
440
        // has confirmed, so we can move to our terminal state early.
NEW
441
        case *SpendEvent:
×
NEW
442
                return &CloseStateTransition{
×
NEW
443
                        NextState: &CloseFin{
×
NEW
444
                                transitionEvent: msg,
×
NEW
445
                                ConfirmedTx:     msg.Tx,
×
NEW
446
                        },
×
NEW
447
                }, nil
×
448

449
        // If we get an OfferReceived event, then the channel is flushed from
450
        // the PoV of the remote party. However, due to propagation delay or
451
        // concurrency, we may not have received the ChannelFlushed event yet.
452
        // In this case, we'll stash the event and wait for the ChannelFlushed
453
        // event.
NEW
454
        case *OfferReceivedEvent:
×
NEW
455
                chancloserLog.Infof("ChannelPoint(%v): received remote offer "+
×
NEW
456
                        "early, stashing...", env.ChanPoint)
×
NEW
457

×
NEW
458
                c.EarlyRemoteOffer = fn.Some(*msg)
×
NEW
459

×
NEW
460
                // TODO(roasbeef): unit test!
×
NEW
461

×
NEW
462
                // We'll perform a noop update so we can wait for the actual
×
NEW
463
                // channel flushed event.
×
NEW
464
                return &CloseStateTransition{
×
NEW
465
                        NextState: c,
×
NEW
466
                }, nil
×
467

468
        // If we receive the ChannelFlushed event, then the coast is clear so
469
        // we'll now morph into the dual peer state so we can handle any
470
        // messages needed to drive forward the close process.
NEW
471
        case *ChannelFlushed:
×
NEW
472
                // Both the local and remote losing negotiation needs the terms
×
NEW
473
                // we'll be using to close the channel, so we'll create them
×
NEW
474
                // here.
×
NEW
475
                closeTerms := CloseChannelTerms{
×
NEW
476
                        ShutdownScripts:  c.ShutdownScripts,
×
NEW
477
                        ShutdownBalances: msg.ShutdownBalances,
×
NEW
478
                }
×
NEW
479

×
NEW
480
                chancloserLog.Infof("ChannelPoint(%v): channel flushed! "+
×
NEW
481
                        "proceeding with co-op close", env.ChanPoint)
×
NEW
482

×
NEW
483
                // Now that the channel has been flushed, we'll mark on disk
×
NEW
484
                // that we're approaching the point of no return where we'll
×
NEW
485
                // send a new signature to the remote party.
×
NEW
486
                //
×
NEW
487
                // TODO(roasbeef): doesn't actually matter if initiator here?
×
NEW
488
                if msg.FreshFlush {
×
NEW
489
                        err := env.ChanObserver.MarkCoopBroadcasted(nil, true)
×
NEW
490
                        if err != nil {
×
NEW
491
                                return nil, err
×
NEW
492
                        }
×
493
                }
494

495
                // If an ideal fee rate was specified, then we'll use that,
496
                // otherwise we'll fall back to the default value given in the
497
                // env.
NEW
498
                idealFeeRate := c.IdealFeeRate.UnwrapOr(env.DefaultFeeRate)
×
NEW
499

×
NEW
500
                // We'll then use that fee rate to determine the absolute fee
×
NEW
501
                // we'd propose.
×
NEW
502
                //
×
NEW
503
                // TODO(roasbeef): need to sign the 3 diff versions of this?
×
NEW
504
                localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts()
×
NEW
505
                absoluteFee := env.FeeEstimator.EstimateFee(
×
NEW
506
                        env.ChanType, localTxOut, remoteTxOut,
×
NEW
507
                        idealFeeRate.FeePerKWeight(),
×
NEW
508
                )
×
NEW
509

×
NEW
510
                chancloserLog.Infof("ChannelPoint(%v): using ideal_fee=%v, "+
×
NEW
511
                        "absolute_fee=%v", env.ChanPoint, idealFeeRate,
×
NEW
512
                        absoluteFee)
×
NEW
513

×
NEW
514
                var (
×
NEW
515
                        internalEvents []ProtocolEvent
×
NEW
516
                        newEvents      fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
NEW
517
                )
×
NEW
518

×
NEW
519
                // If we received a remote offer early from the remote party,
×
NEW
520
                // then we'll add that to the set of internal events to emit.
×
NEW
521
                c.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) {
×
NEW
522
                        internalEvents = append(internalEvents, &offer)
×
NEW
523
                })
×
524

525
                // Only if we have enough funds to pay for the fees do we need
526
                // to emit a localOfferSign event.
527
                //
528
                // TODO(roasbeef): also only proceed if was higher than fee in
529
                // last round?
NEW
530
                if closeTerms.LocalCanPayFees(absoluteFee) {
×
NEW
531
                        // Each time we go into this negotiation flow, we'll
×
NEW
532
                        // kick off our local state with a new close attempt.
×
NEW
533
                        // So we'll emit a internal event to drive forward that
×
NEW
534
                        // part of the state.
×
NEW
535
                        localOfferSign := ProtocolEvent(&SendOfferEvent{
×
NEW
536
                                TargetFeeRate: idealFeeRate,
×
NEW
537
                        })
×
NEW
538
                        internalEvents = append(internalEvents, localOfferSign)
×
NEW
539
                } else {
×
NEW
540
                        chancloserLog.Infof("ChannelPoint(%v): unable to pay "+
×
NEW
541
                                "fees with local balance, skipping "+
×
NEW
542
                                "closing_complete", env.ChanPoint)
×
NEW
543
                }
×
544

NEW
545
                if len(internalEvents) > 0 {
×
NEW
546
                        newEvents = fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
547
                                InternalEvent: fn.Some(internalEvents),
×
NEW
548
                        })
×
NEW
549
                }
×
550

NEW
551
                return &CloseStateTransition{
×
NEW
552
                        NextState: &ClosingNegotiation{
×
NEW
553
                                PeerState: DualPeerState{
×
NEW
554
                                        LocalState: &LocalCloseStart{
×
NEW
555
                                                CloseChannelTerms: closeTerms,
×
NEW
556
                                        },
×
NEW
557
                                        RemoteState: &RemoteCloseStart{
×
NEW
558
                                                CloseChannelTerms: closeTerms,
×
NEW
559
                                        },
×
NEW
560
                                },
×
NEW
561
                        },
×
NEW
562
                        NewEvents: newEvents,
×
NEW
563
                }, nil
×
564

NEW
565
        default:
×
NEW
566
                return nil, fmt.Errorf("%w: received %T while in ChannelFlushing",
×
NEW
567
                        ErrInvalidStateTransition, msg)
×
568
        }
569
}
570

571
// ProcessEvent drives forward the composite states for the local and remote
572
// party in response to new events. From this state, we'll continue to drive
573
// forward the local and remote states until we arrive at the StateFin stage,
574
// or we loop back up to the ShutdownPending state.
575
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
576
) (*CloseStateTransition, error) {
×
NEW
577

×
NEW
578
        // There're two classes of events that can break us out of this state:
×
NEW
579
        // we receive a confirmation event, or we receive a signal to restart
×
NEW
580
        // the co-op close process.
×
NEW
581
        switch msg := event.(type) {
×
582
        // If we get a confirmation, then the spend request we issued when we
583
        // were leaving the ChannelFlushing state has been confirmed.  We'll
584
        // now transition to the StateFin state.
NEW
585
        case *SpendEvent:
×
NEW
586
                return &CloseStateTransition{
×
NEW
587
                        NextState: &CloseFin{
×
NEW
588
                                transitionEvent: msg,
×
NEW
589
                                ConfirmedTx:     msg.Tx,
×
NEW
590
                        },
×
NEW
591
                }, nil
×
592

593
        // Otherwise, if we receive a shutdown, or receive an event to send a
594
        // shutdown, then we'll go back up to the ChannelActive state, and have
595
        // it handle this event by emitting an internal event.
596
        //
597
        // TODO(roasbeef): both will have fee rate specified, so ok?
NEW
598
        case *ShutdownReceived, *SendShutdown:
×
NEW
599
                chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+
×
NEW
600
                        "restarting negotiation", env.ChanPoint)
×
NEW
601

×
NEW
602
                return &CloseStateTransition{
×
NEW
603
                        NextState: &ChannelActive{},
×
NEW
604
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
605
                                InternalEvent: fn.Some([]ProtocolEvent{event}),
×
NEW
606
                        }),
×
NEW
607
                }, nil
×
608
        }
609

610
        // If we get to this point, then we have an event that'll drive forward
611
        // the negotiation process.  Based on the event, we'll figure out which
612
        // state we'll be modifying.
NEW
613
        switch {
×
NEW
614
        case c.PeerState.LocalState.ShouldRouteTo(event):
×
NEW
615
                chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+
×
NEW
616
                        "chan state", env.ChanPoint, event)
×
NEW
617

×
NEW
618
                // Drive forward the local state based on the next event.
×
NEW
619
                transition, err := c.PeerState.LocalState.ProcessEvent(
×
NEW
620
                        event, env,
×
NEW
621
                )
×
NEW
622
                if err != nil {
×
NEW
623
                        return nil, err
×
NEW
624
                }
×
625

NEW
626
                nextLocalState, ok := transition.NextState.(AsymmetricPeerState)
×
NEW
627
                if !ok {
×
NEW
628
                        return nil, fmt.Errorf("expected %T to be "+
×
NEW
629
                                "AsymmetricPeerState", transition.NextState)
×
NEW
630
                }
×
631

NEW
632
                return &CloseStateTransition{
×
NEW
633
                        NextState: &ClosingNegotiation{
×
NEW
634
                                PeerState: DualPeerState{
×
NEW
635
                                        LocalState:  nextLocalState,
×
NEW
636
                                        RemoteState: c.PeerState.RemoteState,
×
NEW
637
                                },
×
NEW
638
                        },
×
NEW
639
                        NewEvents: transition.NewEvents,
×
NEW
640
                }, nil
×
641

NEW
642
        case c.PeerState.RemoteState.ShouldRouteTo(event):
×
NEW
643
                chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+
×
NEW
644
                        "chan state", env.ChanPoint, event)
×
NEW
645

×
NEW
646
                // Drive forward the remote state based on the next event.
×
NEW
647
                transition, err := c.PeerState.RemoteState.ProcessEvent(
×
NEW
648
                        event, env,
×
NEW
649
                )
×
NEW
650
                if err != nil {
×
NEW
651
                        return nil, err
×
NEW
652
                }
×
653

NEW
654
                nextRemoteState, ok := transition.NextState.(AsymmetricPeerState)
×
NEW
655
                if !ok {
×
NEW
656
                        return nil, fmt.Errorf("expected %T to be "+
×
NEW
657
                                "AsymmetricPeerState", transition.NextState)
×
NEW
658
                }
×
659

NEW
660
                return &CloseStateTransition{
×
NEW
661
                        NextState: &ClosingNegotiation{
×
NEW
662
                                PeerState: DualPeerState{
×
NEW
663
                                        LocalState:  c.PeerState.LocalState,
×
NEW
664
                                        RemoteState: nextRemoteState,
×
NEW
665
                                },
×
NEW
666
                        },
×
NEW
667
                        NewEvents: transition.NewEvents,
×
NEW
668
                }, nil
×
669
        }
670

NEW
671
        return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation",
×
NEW
672
                ErrInvalidStateTransition, event)
×
673
}
674

675
// newSigTlv is a helper function that returns a new optional TLV sig field for
676
// the parametrized tlv.TlvType value.
NEW
677
func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] {
×
NEW
678
        return tlv.SomeRecordT(tlv.NewRecordT[T](s))
×
NEW
679
}
×
680

681
// ProcessEvent implements the event processing to kick off the process of
682
// obtaining a new (possibly RBF'd) signature for our commitment transaction.
683
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
684
) (*CloseStateTransition, error) {
×
NEW
685

×
NEW
686
        switch msg := event.(type) {
×
687
        // If we receive a SendOfferEvent, then we'll use the specified fee
688
        // rate to generate for the closing transaction with our ideal fee
689
        // rate.
NEW
690
        case *SendOfferEvent:
×
NEW
691
                // First, we'll figure out the absolute fee rate we should pay
×
NEW
692
                // given the state of the local/remote outputs.
×
NEW
693
                localTxOut, remoteTxOut := l.DeriveCloseTxOuts()
×
NEW
694
                absoluteFee := env.FeeEstimator.EstimateFee(
×
NEW
695
                        env.ChanType, localTxOut, remoteTxOut,
×
NEW
696
                        msg.TargetFeeRate.FeePerKWeight(),
×
NEW
697
                )
×
NEW
698

×
NEW
699
                // Now that we know what fee we want to pay, we'll create a new
×
NEW
700
                // signature over our co-op close transaction. For our
×
NEW
701
                // proposals, we'll just always use the known RBF sequence
×
NEW
702
                // value.
×
NEW
703
                localScript := l.CloseChannelTerms.LocalDeliveryScript
×
NEW
704
                rawSig, _, closeBalance, err := env.CloseSigner.CreateCloseProposal(
×
NEW
705
                        absoluteFee, localScript,
×
NEW
706
                        l.CloseChannelTerms.RemoteDeliveryScript,
×
NEW
707
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
NEW
708
                )
×
NEW
709
                if err != nil {
×
NEW
710
                        return nil, err
×
NEW
711
                }
×
NEW
712
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
NEW
713
                if err != nil {
×
NEW
714
                        return nil, err
×
NEW
715
                }
×
716

NEW
717
                chancloserLog.Infof("closing w/ local_addr=%x, "+
×
NEW
718
                        "remote_addr=%x, fee=%v", localScript[:],
×
NEW
719
                        l.CloseChannelTerms.RemoteDeliveryScript[:],
×
NEW
720
                        absoluteFee)
×
NEW
721

×
NEW
722
                // Now that we have our signature, we'll set the proper
×
NEW
723
                // closingSigs field based on if the remote party's output is
×
NEW
724
                // dust or not.
×
NEW
725
                var closingSigs lnwire.ClosingSigs
×
NEW
726
                switch {
×
727
                // If the remote party's output is dust, then we'll set the
728
                // CloserNoClosee field.
NEW
729
                case remoteTxOut == nil:
×
NEW
730
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
NEW
731
                                wireSig,
×
NEW
732
                        )
×
733

734
                // If after paying for fees, our balance is below dust, then
735
                // we'll set the NoCloserClosee field.
NEW
736
                case closeBalance < lnwallet.DustLimitForSize(len(localScript)):
×
NEW
737
                        closingSigs.NoCloserClosee = newSigTlv[tlv.TlvType2](
×
NEW
738
                                wireSig,
×
NEW
739
                        )
×
740

741
                // Otherwise, we'll set the CloserAndClosee field.
742
                //
743
                // TODO(roasbeef): should actually set both??
NEW
744
                default:
×
NEW
745
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
NEW
746
                                wireSig,
×
NEW
747
                        )
×
748
                }
749

750
                // Now that we have our sig, we'll emit a daemon event to send
751
                // it to the remote party, then transition to the
752
                // LocalOfferSent state.
753
                //
754
                // TODO(roasbeef): type alias for protocol event
NEW
755
                sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{
×
NEW
756
                        TargetPeer: env.ChanPeer,
×
NEW
757
                        // TODO(roasbeef): mew new func
×
NEW
758
                        Msgs: []lnwire.Message{&lnwire.ClosingComplete{
×
NEW
759
                                ChannelID:   env.ChanID,
×
NEW
760
                                FeeSatoshis: absoluteFee,
×
NEW
761
                                Sequence:    mempool.MaxRBFSequence,
×
NEW
762
                                ClosingSigs: closingSigs,
×
NEW
763
                        }},
×
NEW
764
                }}
×
NEW
765

×
NEW
766
                chancloserLog.Infof("ChannelPoint(%v): sending closing sig "+
×
NEW
767
                        "to remote party, fee_sats=%v", env.ChanPoint,
×
NEW
768
                        absoluteFee)
×
NEW
769

×
NEW
770
                return &CloseStateTransition{
×
NEW
771
                        NextState: &LocalOfferSent{
×
NEW
772
                                prevState:         l,
×
NEW
773
                                transitionEvent:   msg,
×
NEW
774
                                ProposedFee:       absoluteFee,
×
NEW
775
                                LocalSig:          wireSig,
×
NEW
776
                                CloseChannelTerms: l.CloseChannelTerms,
×
NEW
777
                        },
×
NEW
778
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
779
                                ExternalEvents: fn.Some(sendEvent),
×
NEW
780
                        }),
×
NEW
781
                }, nil
×
782
        }
783

NEW
784
        return nil, fmt.Errorf("%w: received %T while in LocalCloseStart",
×
NEW
785
                ErrInvalidStateTransition, event)
×
786
}
787

788
// extractSig extracts the expected signature from the closing sig message.
789
// Only one of them should actually be populated as the closing sig message is
790
// sent in response to a ClosingComplete message, it should only sign the same
791
// version of the co-op close tx as the sender did.
NEW
792
func extractSig(msg lnwire.ClosingSig) (*lnwire.Sig, error) {
×
NEW
793
        // First, we'll validate that only one signature is included in their
×
NEW
794
        // response to our initial offer. If not, then we'll exit here, and
×
NEW
795
        // trigger a recycle of the connection.
×
NEW
796
        var (
×
NEW
797
                numSigs  int
×
NEW
798
                sigBools = []bool{
×
NEW
799
                        msg.CloserNoClosee.IsSome(), msg.NoCloserClosee.IsSome(),
×
NEW
800
                        msg.CloserAndClosee.IsSome(),
×
NEW
801
                }
×
NEW
802
        )
×
NEW
803
        for _, b := range sigBools {
×
NEW
804
                if b {
×
NEW
805
                        numSigs += 1
×
NEW
806
                }
×
807
        }
NEW
808
        if numSigs != 1 {
×
NEW
809
                return nil, fmt.Errorf("% w- expected: 1, got: %v",
×
NEW
810
                        ErrTooManySigs, numSigs)
×
NEW
811
        }
×
812

NEW
813
        var sig *lnwire.Sig
×
NEW
814
        msg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) {
×
NEW
815
                sig = &s
×
NEW
816
        })
×
NEW
817
        msg.NoCloserClosee.WhenSomeV(func(s lnwire.Sig) {
×
NEW
818
                sig = &s
×
NEW
819
        })
×
NEW
820
        msg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) {
×
NEW
821
                sig = &s
×
NEW
822
        })
×
823

NEW
824
        return sig, nil
×
825
}
826

827
// ProcessEvent implements the state transition function for the
828
// LocalOfferSent state. In this state, we'll wait for the remote party to
829
// send a close_signed message which gives us the ability to broadcast a new
830
// co-op close transaction.
831
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
832
) (*CloseStateTransition, error) {
×
NEW
833

×
NEW
834
        switch msg := event.(type) {
×
835
        // If we receive a LocalSigReceived event, then we'll attempt to
836
        // validate the signature from the remote party. If valid, then we can
837
        // broadcast the transaction, and transition to the ClosePending state.
NEW
838
        case *LocalSigReceived:
×
NEW
839
                // Extract and validate that only one sig field is set.
×
NEW
840
                //
×
NEW
841
                // TODO(roasbeef): assert same one set based on type, will be
×
NEW
842
                // invalid otherwise anyway?
×
NEW
843
                sig, err := extractSig(msg.SigMsg)
×
NEW
844
                if err != nil {
×
NEW
845
                        return nil, err
×
NEW
846
                }
×
847

NEW
848
                remoteSig, err := sig.ToSignature()
×
NEW
849
                if err != nil {
×
NEW
850
                        return nil, err
×
NEW
851
                }
×
NEW
852
                localSig, err := l.LocalSig.ToSignature()
×
NEW
853
                if err != nil {
×
NEW
854
                        return nil, err
×
NEW
855
                }
×
856

857
                // Now that we have their signature, we'll attempt to validate
858
                // it, then extract a valid closing signature from it.
NEW
859
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
NEW
860
                        localSig, remoteSig,
×
NEW
861
                        l.CloseChannelTerms.LocalDeliveryScript,
×
NEW
862
                        l.CloseChannelTerms.RemoteDeliveryScript,
×
NEW
863
                        l.ProposedFee,
×
NEW
864
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
NEW
865
                )
×
NEW
866
                if err != nil {
×
NEW
867
                        return nil, err
×
NEW
868
                }
×
869

870
                // As we're about to broadcast a new version of the co-op close
871
                // transaction, we'll mark again as broadcast, but with this
872
                // variant of the co-op close tx.
873
                //
874
                // TODO(roasbeef): db will only store one instance -- which is ok?
NEW
875
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, true)
×
NEW
876
                if err != nil {
×
NEW
877
                        return nil, err
×
NEW
878
                }
×
879

NEW
880
                broadcastEvent := protofsm.DaemonEventSet{&protofsm.BroadcastTxn{
×
NEW
881
                        Tx: closeTx,
×
NEW
882
                        Label: labels.MakeLabel(
×
NEW
883
                                labels.LabelTypeChannelClose, &env.Scid,
×
NEW
884
                        ),
×
NEW
885
                }}
×
NEW
886

×
NEW
887
                transitionEvent := fn.NewLeft[LocalSigReceived, OfferReceivedEvent](*msg)
×
NEW
888

×
NEW
889
                chancloserLog.Infof("ChannelPoint(%v): received sig from "+
×
NEW
890
                        "remote party, broadcasting: tx=%v", env.ChanPoint,
×
NEW
891
                        lnutils.SpewLogClosure(closeTx),
×
NEW
892
                )
×
NEW
893

×
NEW
894
                return &CloseStateTransition{
×
NEW
895
                        NextState: &ClosePending{
×
NEW
896
                                transitionEvents: transitionEvent,
×
NEW
897
                                CloseTx:          closeTx,
×
NEW
898
                        },
×
NEW
899
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
900
                                ExternalEvents: fn.Some(broadcastEvent),
×
NEW
901
                        }),
×
NEW
902
                }, nil
×
903
        }
904

NEW
905
        return nil, fmt.Errorf("%w: received %T while in LocalOfferSent",
×
NEW
906
                ErrInvalidStateTransition, event)
×
907
}
908

909
// ProcessEvent implements the state transition function for the
910
// RemoteCloseStart. In this state, we'll wait for the remote party to send a
911
// closing_complete message. Assuming they can pay for the fees, we'll sign it
912
// ourselves, then transition to the next state of RemoteOfferReceived.
913
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
914
) (*CloseStateTransition, error) {
×
NEW
915

×
NEW
916
        switch msg := event.(type) {
×
917
        // If we receive a OfferReceived event, we'll make sure they can
918
        // actually pay for the fee. If so, then we'll counter sign and
919
        // transition to a terminal state.
NEW
920
        case *OfferReceivedEvent:
×
NEW
921
                // To start, we'll perform some basic validation of the sig
×
NEW
922
                // message they've sent.
×
NEW
923
                switch {
×
924
                // We'll validate that the remote party actually has enough
925
                // fees to pay the closing fees.
NEW
926
                case !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis):
×
NEW
927
                        return nil, fmt.Errorf("%w: %v vs %v",
×
NEW
928
                                ErrRemoteCannotPay,
×
NEW
929
                                msg.SigMsg.FeeSatoshis,
×
NEW
930
                                l.RemoteBalance.ToSatoshis())
×
931

932
                // The sequence they send can't be the max sequence, as that would
933
                // prevent RBF.
NEW
934
                case msg.SigMsg.Sequence > mempool.MaxRBFSequence:
×
NEW
935
                        return nil, fmt.Errorf("%w: %v", ErrNonFinalSequence,
×
NEW
936
                                msg.SigMsg.Sequence)
×
937
                }
938

939
                // With the basic sanity checks out of the way, we'll now
940
                // figure out which signature that we'll attempt to sign
941
                // against.
NEW
942
                var (
×
NEW
943
                        remoteSig input.Signature
×
NEW
944
                        noClosee  bool
×
NEW
945
                )
×
NEW
946
                switch {
×
947
                // If our balance is dust, then we expect the CloserNoClosee
948
                // sig to be set.
NEW
949
                case l.LocalAmtIsDust():
×
NEW
950
                        if msg.SigMsg.CloserNoClosee.IsNone() {
×
NEW
951
                                return nil, ErrCloserNoClosee
×
NEW
952
                        }
×
NEW
953
                        msg.SigMsg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) {
×
NEW
954
                                remoteSig, _ = s.ToSignature()
×
NEW
955
                                noClosee = true
×
NEW
956
                        })
×
957

958
                // Otherwise, we'll assume that CloseAndClosee is set.
959
                //
960
                // TODO(roasbeef): NoCloserClosee, but makes no sense?
NEW
961
                default:
×
NEW
962
                        if msg.SigMsg.CloserAndClosee.IsNone() {
×
NEW
963
                                return nil, ErrCloserAndClosee
×
NEW
964
                        }
×
NEW
965
                        msg.SigMsg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) {
×
NEW
966
                                remoteSig, _ = s.ToSignature()
×
NEW
967
                        })
×
968
                }
969

NEW
970
                chanOpts := []lnwallet.ChanCloseOpt{
×
NEW
971
                        lnwallet.WithCustomSequence(msg.SigMsg.Sequence),
×
NEW
972
                }
×
NEW
973

×
NEW
974
                chancloserLog.Infof("responding to close w/ local_addr=%x, "+
×
NEW
975
                        "remote_addr=%x, fee=%v",
×
NEW
976
                        l.CloseChannelTerms.LocalDeliveryScript[:],
×
NEW
977
                        l.CloseChannelTerms.RemoteDeliveryScript[:],
×
NEW
978
                        msg.SigMsg.FeeSatoshis)
×
NEW
979

×
NEW
980
                // Now that we have the remote sig, we'll sign the version they
×
NEW
981
                // signed, then attempt to complete the cooperative close
×
NEW
982
                // process.
×
NEW
983
                //
×
NEW
984
                // TODO(roasbeef): need to be able to omit an output when
×
NEW
985
                // signing based on the above, as closing opt
×
NEW
986
                rawSig, _, _, err := env.CloseSigner.CreateCloseProposal(
×
NEW
987
                        msg.SigMsg.FeeSatoshis,
×
NEW
988
                        l.CloseChannelTerms.LocalDeliveryScript,
×
NEW
989
                        l.CloseChannelTerms.RemoteDeliveryScript,
×
NEW
990
                        chanOpts...,
×
NEW
991
                )
×
NEW
992
                if err != nil {
×
NEW
993
                        return nil, err
×
NEW
994
                }
×
NEW
995
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
NEW
996
                if err != nil {
×
NEW
997
                        return nil, err
×
NEW
998
                }
×
999

NEW
1000
                localSig, err := wireSig.ToSignature()
×
NEW
1001
                if err != nil {
×
NEW
1002
                        return nil, err
×
NEW
1003
                }
×
1004

1005
                // With our signature created, we'll now attempt to finalize
1006
                // the close process.
1007
                //
1008
                // TODO(roasbef); duplication
NEW
1009
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
NEW
1010
                        localSig, remoteSig,
×
NEW
1011
                        l.CloseChannelTerms.LocalDeliveryScript,
×
NEW
1012
                        l.CloseChannelTerms.RemoteDeliveryScript,
×
NEW
1013
                        msg.SigMsg.FeeSatoshis, chanOpts...,
×
NEW
1014
                )
×
NEW
1015
                if err != nil {
×
NEW
1016
                        return nil, err
×
NEW
1017
                }
×
1018

NEW
1019
                chancloserLog.Infof("ChannelPoint(%v): received sig (fee=%v "+
×
NEW
1020
                        "sats) from remote party, signing new tx=%v",
×
NEW
1021
                        env.ChanPoint, msg.SigMsg.FeeSatoshis,
×
NEW
1022
                        lnutils.SpewLogClosure(closeTx),
×
NEW
1023
                )
×
NEW
1024

×
NEW
1025
                var closingSigs lnwire.ClosingSigs
×
NEW
1026
                if noClosee {
×
NEW
1027
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](wireSig)
×
NEW
1028
                } else {
×
NEW
1029
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](wireSig)
×
NEW
1030
                }
×
1031

1032
                // As we're about to broadcast a new version of the co-op close
1033
                // transaction, we'll mark again as broadcast, but with this
1034
                // variant of the co-op close tx.
1035
                //
1036
                // TODO(roasbeef): db will only store one instance, store both?
NEW
1037
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, false)
×
NEW
1038
                if err != nil {
×
NEW
1039
                        return nil, err
×
NEW
1040
                }
×
1041

1042
                // As we transition, we'll omit two events: one to broadcast
1043
                // the transaction, and the other to send our ClosingSig
1044
                // message to the remote party.
NEW
1045
                sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{
×
NEW
1046
                        TargetPeer: env.ChanPeer,
×
NEW
1047
                        Msgs: []lnwire.Message{&lnwire.ClosingSig{
×
NEW
1048
                                ChannelID:   env.ChanID,
×
NEW
1049
                                ClosingSigs: closingSigs,
×
NEW
1050
                        }},
×
NEW
1051
                }
×
NEW
1052
                broadcastEvent := &protofsm.BroadcastTxn{
×
NEW
1053
                        Tx: closeTx,
×
NEW
1054
                        Label: labels.MakeLabel(
×
NEW
1055
                                labels.LabelTypeChannelClose, &env.Scid,
×
NEW
1056
                        ),
×
NEW
1057
                }
×
NEW
1058
                daemonEvents := protofsm.DaemonEventSet{sendEvent, broadcastEvent}
×
NEW
1059

×
NEW
1060
                // Now that we've extracted the signature, we'll transition to
×
NEW
1061
                // the next state where we'll sign+broadcast the sig.
×
NEW
1062
                return &CloseStateTransition{
×
NEW
1063
                        NextState: &ClosePending{
×
NEW
1064
                                CloseTx: closeTx,
×
NEW
1065
                        },
×
NEW
1066
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
NEW
1067
                                ExternalEvents: fn.Some(daemonEvents),
×
NEW
1068
                        }),
×
NEW
1069
                }, nil
×
1070
        }
1071

NEW
1072
        return nil, fmt.Errorf("%w: received %T while in RemoteCloseStart",
×
NEW
1073
                ErrInvalidStateTransition, event)
×
1074
}
1075

1076
// ProcessEvent is a semi-terminal state in the rbf-coop close state machine.
1077
// In this state, we're waiting for either a confirmation, or for either side
1078
// to attempt to create a new RBF'd co-op close transaction.
1079
func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
NEW
1080
) (*CloseStateTransition, error) {
×
NEW
1081

×
NEW
1082
        switch msg := event.(type) {
×
1083
        // If we can a spend while waiting for the close, then we'll go to our
1084
        // terminal state.
NEW
1085
        case *SpendEvent:
×
NEW
1086
                return &CloseStateTransition{
×
NEW
1087
                        NextState: &CloseFin{
×
NEW
1088
                                transitionEvent: msg,
×
NEW
1089
                                ConfirmedTx:     msg.Tx,
×
NEW
1090
                        },
×
NEW
1091
                }, nil
×
1092

NEW
1093
        default:
×
NEW
1094

×
NEW
1095
                return &CloseStateTransition{
×
NEW
1096
                        NextState: c,
×
NEW
1097
                }, nil
×
1098
        }
1099
}
1100

1101
// ProcessEvent is the event processing for out terminal state. In this state,
1102
// we just keep looping back on ourselves.
1103
func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
1104
) (*CloseStateTransition, error) {
×
1105

×
1106
        return &CloseStateTransition{
×
1107
                NextState: c,
×
1108
        }, nil
×
1109
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc