• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13725358077

07 Mar 2025 04:51PM UTC coverage: 58.224% (-10.4%) from 68.615%
13725358077

Pull #9458

github

web-flow
Merge bf4c6625f into ab2dc09eb
Pull Request #9458: multi+server.go: add initial permissions for some peers

346 of 549 new or added lines in 10 files covered. (63.02%)

27466 existing lines in 443 files now uncovered.

94609 of 162492 relevant lines covered (58.22%)

1.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lnwallet/chancloser/rbf_coop_transitions.go
1
package chancloser
2

3
import (
4
        "fmt"
5

6
        "github.com/btcsuite/btcd/btcec/v2"
7
        "github.com/btcsuite/btcd/chaincfg"
8
        "github.com/btcsuite/btcd/mempool"
9
        "github.com/btcsuite/btcd/wire"
10
        "github.com/davecgh/go-spew/spew"
11
        "github.com/lightningnetwork/lnd/fn/v2"
12
        "github.com/lightningnetwork/lnd/input"
13
        "github.com/lightningnetwork/lnd/labels"
14
        "github.com/lightningnetwork/lnd/lntypes"
15
        "github.com/lightningnetwork/lnd/lnutils"
16
        "github.com/lightningnetwork/lnd/lnwallet"
17
        "github.com/lightningnetwork/lnd/lnwire"
18
        "github.com/lightningnetwork/lnd/protofsm"
19
        "github.com/lightningnetwork/lnd/tlv"
20
)
21

22
// sendShutdownEvents is a helper function that returns a set of daemon events
23
// we need to emit when we decide that we should send a shutdown message. We'll
24
// also mark the channel as borked as well, as at this point, we no longer want
25
// to continue with normal operation.
26
func sendShutdownEvents(chanID lnwire.ChannelID, chanPoint wire.OutPoint,
27
        deliveryAddr lnwire.DeliveryAddress, peerPub btcec.PublicKey,
28
        postSendEvent fn.Option[ProtocolEvent],
UNCOV
29
        chanState ChanStateObserver) (protofsm.DaemonEventSet, error) {
×
UNCOV
30

×
UNCOV
31
        // We'll emit a daemon event that instructs the daemon to send out a
×
UNCOV
32
        // new shutdown message to the remote peer.
×
UNCOV
33
        msgsToSend := &protofsm.SendMsgEvent[ProtocolEvent]{
×
UNCOV
34
                TargetPeer: peerPub,
×
UNCOV
35
                Msgs: []lnwire.Message{&lnwire.Shutdown{
×
UNCOV
36
                        ChannelID: chanID,
×
UNCOV
37
                        Address:   deliveryAddr,
×
UNCOV
38
                }},
×
UNCOV
39
                SendWhen: fn.Some(func() bool {
×
UNCOV
40
                        ok := chanState.NoDanglingUpdates()
×
UNCOV
41
                        if ok {
×
UNCOV
42
                                chancloserLog.Infof("ChannelPoint(%v): no "+
×
UNCOV
43
                                        "dangling updates sending shutdown "+
×
UNCOV
44
                                        "message", chanPoint)
×
UNCOV
45
                        }
×
46

UNCOV
47
                        return ok
×
48
                }),
49
                PostSendEvent: postSendEvent,
50
        }
51

52
        // If a close is already in process (we're in the RBF loop), then we
53
        // can skip everything below, and just send out the shutdown message.
UNCOV
54
        if chanState.FinalBalances().IsSome() {
×
UNCOV
55
                return protofsm.DaemonEventSet{msgsToSend}, nil
×
UNCOV
56
        }
×
57

58
        // Before closing, we'll attempt to send a disable update for the
59
        // channel.  We do so before closing the channel as otherwise the
60
        // current edge policy won't be retrievable from the graph.
UNCOV
61
        if err := chanState.DisableChannel(); err != nil {
×
62
                return nil, fmt.Errorf("unable to disable channel: %w", err)
×
63
        }
×
64

65
        // If we have a post-send event, then this means that we're the
66
        // responder. We'll use this fact below to update state in the DB.
UNCOV
67
        isInitiator := postSendEvent.IsNone()
×
UNCOV
68

×
UNCOV
69
        chancloserLog.Infof("ChannelPoint(%v): disabling outgoing adds",
×
UNCOV
70
                chanPoint)
×
UNCOV
71

×
UNCOV
72
        // As we're about to send a shutdown, we'll disable adds in the
×
UNCOV
73
        // outgoing direction.
×
UNCOV
74
        if err := chanState.DisableOutgoingAdds(); err != nil {
×
75
                return nil, fmt.Errorf("unable to disable outgoing "+
×
76
                        "adds: %w", err)
×
77
        }
×
78

79
        // To be able to survive a restart, we'll also write to disk
80
        // information about the shutdown we're about to send out.
UNCOV
81
        err := chanState.MarkShutdownSent(deliveryAddr, isInitiator)
×
UNCOV
82
        if err != nil {
×
83
                return nil, fmt.Errorf("unable to mark shutdown sent: %w", err)
×
84
        }
×
85

UNCOV
86
        chancloserLog.Debugf("ChannelPoint(%v): marking channel as borked",
×
UNCOV
87
                chanPoint)
×
UNCOV
88

×
UNCOV
89
        return protofsm.DaemonEventSet{msgsToSend}, nil
×
90
}
91

92
// validateShutdown is a helper function that validates that the shutdown has a
93
// proper delivery script, and can be sent based on the current thaw height of
94
// the channel.
95
func validateShutdown(chanThawHeight fn.Option[uint32],
96
        upfrontAddr fn.Option[lnwire.DeliveryAddress],
97
        msg *ShutdownReceived, chanPoint wire.OutPoint,
UNCOV
98
        chainParams chaincfg.Params) error {
×
UNCOV
99

×
UNCOV
100
        // If we've received a shutdown message, and we have a thaw height,
×
UNCOV
101
        // then we need to make sure that the channel can now be co-op closed.
×
UNCOV
102
        err := fn.MapOptionZ(chanThawHeight, func(thawHeight uint32) error {
×
103
                // If the current height is below the thaw height, then we'll
×
104
                // reject the shutdown message as we can't yet co-op close the
×
105
                // channel.
×
106
                if msg.BlockHeight < thawHeight {
×
107
                        return fmt.Errorf("initiator attempting to "+
×
108
                                "co-op close frozen ChannelPoint(%v) "+
×
109
                                "(current_height=%v, thaw_height=%v)",
×
110
                                chanPoint, msg.BlockHeight,
×
111
                                thawHeight)
×
112
                }
×
113

114
                return nil
×
115
        })
UNCOV
116
        if err != nil {
×
117
                return err
×
118
        }
×
119

120
        // Next, we'll verify that the remote party is sending the expected
121
        // shutdown script.
UNCOV
122
        return fn.MapOption(func(addr lnwire.DeliveryAddress) error {
×
UNCOV
123
                return validateShutdownScript(
×
UNCOV
124
                        addr, msg.ShutdownScript, &chainParams,
×
UNCOV
125
                )
×
UNCOV
126
        })(upfrontAddr).UnwrapOr(nil)
×
127
}
128

129
// ProcessEvent takes a protocol event, and implements a state transition for
130
// the state. From this state, we can receive two possible incoming events:
131
// SendShutdown and ShutdownReceived. Both of these will transition us to the
132
// ChannelFlushing state.
133
func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
134
) (*CloseStateTransition, error) {
×
UNCOV
135

×
UNCOV
136
        switch msg := event.(type) {
×
137
        // If we get a confirmation, then a prior transaction we broadcasted
138
        // has confirmed, so we can move to our terminal state early.
UNCOV
139
        case *SpendEvent:
×
UNCOV
140
                return &CloseStateTransition{
×
UNCOV
141
                        NextState: &CloseFin{
×
UNCOV
142
                                ConfirmedTx: msg.Tx,
×
UNCOV
143
                        },
×
UNCOV
144
                }, nil
×
145

146
        // If we receive the SendShutdown event, then we'll send our shutdown
147
        // with a special SendPredicate, then go to the ShutdownPending where
148
        // we'll wait for the remote to send their shutdown.
UNCOV
149
        case *SendShutdown:
×
UNCOV
150
                // If we have an upfront shutdown addr or a delivery addr then
×
UNCOV
151
                // we'll use that. Otherwise, we'll generate a new delivery
×
UNCOV
152
                // addr.
×
UNCOV
153
                shutdownScript, err := env.LocalUpfrontShutdown.Alt(
×
UNCOV
154
                        msg.DeliveryAddr,
×
UNCOV
155
                ).UnwrapOrFuncErr(env.NewDeliveryScript)
×
UNCOV
156
                if err != nil {
×
UNCOV
157
                        return nil, err
×
UNCOV
158
                }
×
159

160
                // We'll emit some daemon events to send the shutdown message
161
                // and disable the channel on the network level. In this case,
162
                // we don't need a post send event as receive their shutdown is
163
                // what'll move us beyond the ShutdownPending state.
UNCOV
164
                daemonEvents, err := sendShutdownEvents(
×
UNCOV
165
                        env.ChanID, env.ChanPoint, shutdownScript,
×
UNCOV
166
                        env.ChanPeer, fn.None[ProtocolEvent](),
×
UNCOV
167
                        env.ChanObserver,
×
UNCOV
168
                )
×
UNCOV
169
                if err != nil {
×
170
                        return nil, err
×
171
                }
×
172

UNCOV
173
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+
×
UNCOV
174
                        "delivery_script=%v", env.ChanPoint, shutdownScript)
×
UNCOV
175

×
UNCOV
176
                // From here, we'll transition to the shutdown pending state. In
×
UNCOV
177
                // this state we await their shutdown message (self loop), then
×
UNCOV
178
                // also the flushing event.
×
UNCOV
179
                return &CloseStateTransition{
×
UNCOV
180
                        NextState: &ShutdownPending{
×
UNCOV
181
                                IdealFeeRate: fn.Some(msg.IdealFeeRate),
×
UNCOV
182
                                ShutdownScripts: ShutdownScripts{
×
UNCOV
183
                                        LocalDeliveryScript: shutdownScript,
×
UNCOV
184
                                },
×
UNCOV
185
                        },
×
UNCOV
186
                        NewEvents: fn.Some(RbfEvent{
×
UNCOV
187
                                ExternalEvents: daemonEvents,
×
UNCOV
188
                        }),
×
UNCOV
189
                }, nil
×
190

191
        // When we receive a shutdown from the remote party, we'll validate the
192
        // shutdown message, then transition to the ShutdownPending state. We'll
193
        // also emit similar events like the above to send out shutdown, and
194
        // also disable the channel.
UNCOV
195
        case *ShutdownReceived:
×
UNCOV
196
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg")
×
UNCOV
197

×
UNCOV
198
                // Validate that they can send the message now, and also that
×
UNCOV
199
                // they haven't violated their commitment to a prior upfront
×
UNCOV
200
                // shutdown addr.
×
UNCOV
201
                err := validateShutdown(
×
UNCOV
202
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
UNCOV
203
                        env.ChanPoint, env.ChainParams,
×
UNCOV
204
                )
×
UNCOV
205
                if err != nil {
×
206
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
207
                                "shutdown attempt: %v", err)
×
208

×
209
                        return nil, err
×
210
                }
×
211

212
                // If we have an upfront shutdown addr we'll use that,
213
                // otherwise, we'll generate a new delivery script.
UNCOV
214
                shutdownAddr, err := env.LocalUpfrontShutdown.UnwrapOrFuncErr(
×
UNCOV
215
                        env.NewDeliveryScript,
×
UNCOV
216
                )
×
UNCOV
217
                if err != nil {
×
218
                        return nil, err
×
219
                }
×
220

UNCOV
221
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg "+
×
UNCOV
222
                        "at next clean commit state", env.ChanPoint)
×
UNCOV
223

×
UNCOV
224
                // Now that we know the shutdown message is valid, we'll obtain
×
UNCOV
225
                // the set of daemon events we need to emit. We'll also specify
×
UNCOV
226
                // that once the message has actually been sent, that we
×
UNCOV
227
                // generate receive an input event of a ShutdownComplete.
×
UNCOV
228
                daemonEvents, err := sendShutdownEvents(
×
UNCOV
229
                        env.ChanID, env.ChanPoint, shutdownAddr,
×
UNCOV
230
                        env.ChanPeer,
×
UNCOV
231
                        fn.Some[ProtocolEvent](&ShutdownComplete{}),
×
UNCOV
232
                        env.ChanObserver,
×
UNCOV
233
                )
×
UNCOV
234
                if err != nil {
×
235
                        return nil, err
×
236
                }
×
237

UNCOV
238
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
UNCOV
239
                        env.ChanPoint)
×
UNCOV
240

×
UNCOV
241
                // We just received a shutdown, so we'll disable the adds in
×
UNCOV
242
                // the outgoing direction.
×
UNCOV
243
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
244
                        return nil, fmt.Errorf("unable to disable incoming "+
×
245
                                "adds: %w", err)
×
246
                }
×
247

UNCOV
248
                remoteAddr := msg.ShutdownScript
×
UNCOV
249

×
UNCOV
250
                return &CloseStateTransition{
×
UNCOV
251
                        NextState: &ShutdownPending{
×
UNCOV
252
                                ShutdownScripts: ShutdownScripts{
×
UNCOV
253
                                        LocalDeliveryScript:  shutdownAddr,
×
UNCOV
254
                                        RemoteDeliveryScript: remoteAddr,
×
UNCOV
255
                                },
×
UNCOV
256
                        },
×
UNCOV
257
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
UNCOV
258
                                ExternalEvents: daemonEvents,
×
UNCOV
259
                        }),
×
UNCOV
260
                }, nil
×
261

262
        // Any other messages in this state will result in an error, as this is
263
        // an undefined state transition.
UNCOV
264
        default:
×
UNCOV
265
                return nil, fmt.Errorf("%w: received %T while in ChannelActive",
×
UNCOV
266
                        ErrInvalidStateTransition, msg)
×
267
        }
268
}
269

270
// ProcessEvent takes a protocol event, and implements a state transition for
271
// the state. Our path to this state will determine the set of valid events. If
272
// we were the one that sent the shutdown, then we'll just wait on the
273
// ShutdownReceived event. Otherwise, we received the shutdown, and can move
274
// forward once we receive the ShutdownComplete event. Receiving
275
// ShutdownComplete means that we've sent our shutdown, as this was specified
276
// as a post send event.
277
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
278
) (*CloseStateTransition, error) {
×
UNCOV
279

×
UNCOV
280
        switch msg := event.(type) {
×
281
        // If we get a confirmation, then a prior transaction we broadcasted
282
        // has confirmed, so we can move to our terminal state early.
UNCOV
283
        case *SpendEvent:
×
UNCOV
284
                return &CloseStateTransition{
×
UNCOV
285
                        NextState: &CloseFin{
×
UNCOV
286
                                ConfirmedTx: msg.Tx,
×
UNCOV
287
                        },
×
UNCOV
288
                }, nil
×
289

290
        // When we receive a shutdown from the remote party, we'll validate the
291
        // shutdown message, then transition to the ChannelFlushing state.
UNCOV
292
        case *ShutdownReceived:
×
UNCOV
293
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg",
×
UNCOV
294
                        env.ChanPoint)
×
UNCOV
295

×
UNCOV
296
                // Validate that they can send the message now, and also that
×
UNCOV
297
                // they haven't violated their commitment to a prior upfront
×
UNCOV
298
                // shutdown addr.
×
UNCOV
299
                err := validateShutdown(
×
UNCOV
300
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
UNCOV
301
                        env.ChanPoint, env.ChainParams,
×
UNCOV
302
                )
×
UNCOV
303
                if err != nil {
×
UNCOV
304
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
UNCOV
305
                                "shutdown attempt: %v", err)
×
UNCOV
306

×
UNCOV
307
                        return nil, err
×
UNCOV
308
                }
×
309

310
                // If the channel is *already* flushed, and the close is
311
                // go straight into negotiation, as this is the RBF loop.
312
                // already in progress, then we can skip the flushing state and
UNCOV
313
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
UNCOV
314
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
UNCOV
315
                        unknownBalance,
×
UNCOV
316
                )
×
UNCOV
317
                if finalBalances != unknownBalance {
×
UNCOV
318
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
UNCOV
319
                                ShutdownBalances: finalBalances,
×
UNCOV
320
                        })
×
UNCOV
321
                        eventsToEmit = fn.Some(RbfEvent{
×
UNCOV
322
                                InternalEvent: []ProtocolEvent{
×
UNCOV
323
                                        channelFlushed,
×
UNCOV
324
                                },
×
UNCOV
325
                        })
×
UNCOV
326
                }
×
327

UNCOV
328
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
UNCOV
329
                        env.ChanPoint)
×
UNCOV
330

×
UNCOV
331
                // We just received a shutdown, so we'll disable the adds in
×
UNCOV
332
                // the outgoing direction.
×
UNCOV
333
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
334
                        return nil, fmt.Errorf("unable to disable incoming "+
×
335
                                "adds: %w", err)
×
336
                }
×
337

UNCOV
338
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
UNCOV
339
                        "be flushed...", env.ChanPoint)
×
UNCOV
340

×
UNCOV
341
                // We transition to the ChannelFlushing state, where we await
×
UNCOV
342
                // the ChannelFlushed event.
×
UNCOV
343
                return &CloseStateTransition{
×
UNCOV
344
                        NextState: &ChannelFlushing{
×
UNCOV
345
                                IdealFeeRate: s.IdealFeeRate,
×
UNCOV
346
                                ShutdownScripts: ShutdownScripts{
×
UNCOV
347
                                        LocalDeliveryScript:  s.LocalDeliveryScript, //nolint:ll
×
UNCOV
348
                                        RemoteDeliveryScript: msg.ShutdownScript,    //nolint:ll
×
UNCOV
349
                                },
×
UNCOV
350
                        },
×
UNCOV
351
                        NewEvents: eventsToEmit,
×
UNCOV
352
                }, nil
×
353

354
        // If we get this message, then this means that we were finally able to
355
        // send out shutdown after receiving it from the remote party. We'll
356
        // now transition directly to the ChannelFlushing state.
UNCOV
357
        case *ShutdownComplete:
×
UNCOV
358
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
UNCOV
359
                        "be flushed...", env.ChanPoint)
×
UNCOV
360

×
UNCOV
361
                // If the channel is *already* flushed, and the close is
×
UNCOV
362
                // already in progress, then we can skip the flushing state and
×
UNCOV
363
                // go straight into negotiation, as this is the RBF loop.
×
UNCOV
364
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
UNCOV
365
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
UNCOV
366
                        unknownBalance,
×
UNCOV
367
                )
×
UNCOV
368
                if finalBalances != unknownBalance {
×
UNCOV
369
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
UNCOV
370
                                ShutdownBalances: finalBalances,
×
UNCOV
371
                        })
×
UNCOV
372
                        eventsToEmit = fn.Some(RbfEvent{
×
UNCOV
373
                                InternalEvent: []ProtocolEvent{
×
UNCOV
374
                                        channelFlushed,
×
UNCOV
375
                                },
×
UNCOV
376
                        })
×
UNCOV
377
                }
×
378

379
                // From here, we'll transition to the channel flushing state.
380
                // We'll stay here until we receive the ChannelFlushed event.
UNCOV
381
                return &CloseStateTransition{
×
UNCOV
382
                        NextState: &ChannelFlushing{
×
UNCOV
383
                                IdealFeeRate:    s.IdealFeeRate,
×
UNCOV
384
                                ShutdownScripts: s.ShutdownScripts,
×
UNCOV
385
                        },
×
UNCOV
386
                        NewEvents: eventsToEmit,
×
UNCOV
387
                }, nil
×
388

389
        // Any other messages in this state will result in an error, as this is
390
        // an undefined state transition.
UNCOV
391
        default:
×
UNCOV
392
                return nil, fmt.Errorf("%w: received %T while in "+
×
UNCOV
393
                        "ShutdownPending", ErrInvalidStateTransition, msg)
×
394
        }
395
}
396

397
// ProcessEvent takes a new protocol event, and figures out if we can
398
// transition to the next state, or just loop back upon ourself. If we receive
399
// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as
400
// we haven't yet fully cleared the channel. Otherwise, we can move to the
401
// CloseReady state which'll being the channel closing process.
402
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
403
) (*CloseStateTransition, error) {
×
UNCOV
404

×
UNCOV
405
        switch msg := event.(type) {
×
406
        // If we get a confirmation, then a prior transaction we broadcasted
407
        // has confirmed, so we can move to our terminal state early.
408
        case *SpendEvent:
×
409
                return &CloseStateTransition{
×
410
                        NextState: &CloseFin{
×
411
                                ConfirmedTx: msg.Tx,
×
412
                        },
×
413
                }, nil
×
414

415
        // If we get an OfferReceived event, then the channel is flushed from
416
        // the PoV of the remote party. However, due to propagation delay or
417
        // concurrency, we may not have received the ChannelFlushed event yet.
418
        // In this case, we'll stash the event and wait for the ChannelFlushed
419
        // event.
420
        case *OfferReceivedEvent:
×
421
                chancloserLog.Infof("ChannelPoint(%v): received remote offer "+
×
422
                        "early, stashing...", env.ChanPoint)
×
423

×
424
                c.EarlyRemoteOffer = fn.Some(*msg)
×
425

×
426
                // TODO(roasbeef): unit test!
×
427
                //  * actually do this ^
×
428

×
429
                // We'll perform a noop update so we can wait for the actual
×
430
                // channel flushed event.
×
431
                return &CloseStateTransition{
×
432
                        NextState: c,
×
433
                }, nil
×
434

435
        // If we receive the ChannelFlushed event, then the coast is clear so
436
        // we'll now morph into the dual peer state so we can handle any
437
        // messages needed to drive forward the close process.
UNCOV
438
        case *ChannelFlushed:
×
UNCOV
439
                // Both the local and remote losing negotiation needs the terms
×
UNCOV
440
                // we'll be using to close the channel, so we'll create them
×
UNCOV
441
                // here.
×
UNCOV
442
                closeTerms := CloseChannelTerms{
×
UNCOV
443
                        ShutdownScripts:  c.ShutdownScripts,
×
UNCOV
444
                        ShutdownBalances: msg.ShutdownBalances,
×
UNCOV
445
                }
×
UNCOV
446

×
UNCOV
447
                chancloserLog.Infof("ChannelPoint(%v): channel flushed! "+
×
UNCOV
448
                        "proceeding with co-op close", env.ChanPoint)
×
UNCOV
449

×
UNCOV
450
                // Now that the channel has been flushed, we'll mark on disk
×
UNCOV
451
                // that we're approaching the point of no return where we'll
×
UNCOV
452
                // send a new signature to the remote party.
×
UNCOV
453
                //
×
UNCOV
454
                // TODO(roasbeef): doesn't actually matter if initiator here?
×
UNCOV
455
                if msg.FreshFlush {
×
UNCOV
456
                        err := env.ChanObserver.MarkCoopBroadcasted(nil, true)
×
UNCOV
457
                        if err != nil {
×
458
                                return nil, err
×
459
                        }
×
460
                }
461

462
                // If an ideal fee rate was specified, then we'll use that,
463
                // otherwise we'll fall back to the default value given in the
464
                // env.
UNCOV
465
                idealFeeRate := c.IdealFeeRate.UnwrapOr(env.DefaultFeeRate)
×
UNCOV
466

×
UNCOV
467
                // We'll then use that fee rate to determine the absolute fee
×
UNCOV
468
                // we'd propose.
×
UNCOV
469
                //
×
UNCOV
470
                // TODO(roasbeef): need to sign the 3 diff versions of this?
×
UNCOV
471
                localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts()
×
UNCOV
472
                absoluteFee := env.FeeEstimator.EstimateFee(
×
UNCOV
473
                        env.ChanType, localTxOut, remoteTxOut,
×
UNCOV
474
                        idealFeeRate.FeePerKWeight(),
×
UNCOV
475
                )
×
UNCOV
476

×
UNCOV
477
                chancloserLog.Infof("ChannelPoint(%v): using ideal_fee=%v, "+
×
UNCOV
478
                        "absolute_fee=%v", env.ChanPoint, idealFeeRate,
×
UNCOV
479
                        absoluteFee)
×
UNCOV
480

×
UNCOV
481
                var (
×
UNCOV
482
                        internalEvents []ProtocolEvent
×
UNCOV
483
                        newEvents      fn.Option[RbfEvent]
×
UNCOV
484
                )
×
UNCOV
485

×
UNCOV
486
                // If we received a remote offer early from the remote party,
×
UNCOV
487
                // then we'll add that to the set of internal events to emit.
×
UNCOV
488
                c.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) {
×
489
                        internalEvents = append(internalEvents, &offer)
×
490
                })
×
491

492
                // Only if we have enough funds to pay for the fees do we need
493
                // to emit a localOfferSign event.
494
                //
495
                // TODO(roasbeef): also only proceed if was higher than fee in
496
                // last round?
UNCOV
497
                if closeTerms.LocalCanPayFees(absoluteFee) {
×
UNCOV
498
                        // Each time we go into this negotiation flow, we'll
×
UNCOV
499
                        // kick off our local state with a new close attempt.
×
UNCOV
500
                        // So we'll emit a internal event to drive forward that
×
UNCOV
501
                        // part of the state.
×
UNCOV
502
                        localOfferSign := ProtocolEvent(&SendOfferEvent{
×
UNCOV
503
                                TargetFeeRate: idealFeeRate,
×
UNCOV
504
                        })
×
UNCOV
505
                        internalEvents = append(internalEvents, localOfferSign)
×
UNCOV
506
                } else {
×
UNCOV
507
                        chancloserLog.Infof("ChannelPoint(%v): unable to pay "+
×
UNCOV
508
                                "fees with local balance, skipping "+
×
UNCOV
509
                                "closing_complete", env.ChanPoint)
×
UNCOV
510
                }
×
511

UNCOV
512
                if len(internalEvents) > 0 {
×
UNCOV
513
                        newEvents = fn.Some(RbfEvent{
×
UNCOV
514
                                InternalEvent: internalEvents,
×
UNCOV
515
                        })
×
UNCOV
516
                }
×
517

UNCOV
518
                return &CloseStateTransition{
×
UNCOV
519
                        NextState: &ClosingNegotiation{
×
UNCOV
520
                                PeerState: lntypes.Dual[AsymmetricPeerState]{
×
UNCOV
521
                                        Local: &LocalCloseStart{
×
UNCOV
522
                                                CloseChannelTerms: closeTerms,
×
UNCOV
523
                                        },
×
UNCOV
524
                                        Remote: &RemoteCloseStart{
×
UNCOV
525
                                                CloseChannelTerms: closeTerms,
×
UNCOV
526
                                        },
×
UNCOV
527
                                },
×
UNCOV
528
                        },
×
UNCOV
529
                        NewEvents: newEvents,
×
UNCOV
530
                }, nil
×
531

UNCOV
532
        default:
×
UNCOV
533
                return nil, fmt.Errorf("%w: received %T while in "+
×
UNCOV
534
                        "ChannelFlushing", ErrInvalidStateTransition, msg)
×
535
        }
536
}
537

538
// processNegotiateEvent is a helper function that processes a new event to
539
// local channel state once we're in the ClosingNegotiation state.
540
func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent,
541
        env *Environment, chanPeer lntypes.ChannelParty,
UNCOV
542
) (*CloseStateTransition, error) {
×
UNCOV
543

×
UNCOV
544
        targetPeerState := c.PeerState.GetForParty(chanPeer)
×
UNCOV
545

×
UNCOV
546
        // Drive forward the remote state based on the next event.
×
UNCOV
547
        transition, err := targetPeerState.ProcessEvent(
×
UNCOV
548
                event, env,
×
UNCOV
549
        )
×
UNCOV
550
        if err != nil {
×
UNCOV
551
                return nil, err
×
UNCOV
552
        }
×
553

UNCOV
554
        nextPeerState, ok := transition.NextState.(AsymmetricPeerState) //nolint:ll
×
UNCOV
555
        if !ok {
×
556
                return nil, fmt.Errorf("expected %T to be "+
×
557
                        "AsymmetricPeerState", transition.NextState)
×
558
        }
×
559

560
        // Make a copy of the input state, then update the peer state of the
561
        // proper party.
UNCOV
562
        newPeerState := *c
×
UNCOV
563
        newPeerState.PeerState.SetForParty(chanPeer, nextPeerState)
×
UNCOV
564

×
UNCOV
565
        return &CloseStateTransition{
×
UNCOV
566
                NextState: &newPeerState,
×
UNCOV
567
                NewEvents: transition.NewEvents,
×
UNCOV
568
        }, nil
×
569
}
570

571
// ProcessEvent drives forward the composite states for the local and remote
572
// party in response to new events. From this state, we'll continue to drive
573
// forward the local and remote states until we arrive at the StateFin stage,
574
// or we loop back up to the ShutdownPending state.
575
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
576
) (*CloseStateTransition, error) {
×
UNCOV
577

×
UNCOV
578
        // There're two classes of events that can break us out of this state:
×
UNCOV
579
        // we receive a confirmation event, or we receive a signal to restart
×
UNCOV
580
        // the co-op close process.
×
UNCOV
581
        switch msg := event.(type) {
×
582
        // If we get a confirmation, then the spend request we issued when we
583
        // were leaving the ChannelFlushing state has been confirmed.  We'll
584
        // now transition to the StateFin state.
585
        case *SpendEvent:
×
586
                return &CloseStateTransition{
×
587
                        NextState: &CloseFin{
×
588
                                ConfirmedTx: msg.Tx,
×
589
                        },
×
590
                }, nil
×
591

592
        // Otherwise, if we receive a shutdown, or receive an event to send a
593
        // shutdown, then we'll go back up to the ChannelActive state, and have
594
        // it handle this event by emitting an internal event.
595
        //
596
        // TODO(roasbeef): both will have fee rate specified, so ok?
UNCOV
597
        case *ShutdownReceived, *SendShutdown:
×
UNCOV
598
                chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+
×
UNCOV
599
                        "restarting negotiation", env.ChanPoint)
×
UNCOV
600

×
UNCOV
601
                return &CloseStateTransition{
×
UNCOV
602
                        NextState: &ChannelActive{},
×
UNCOV
603
                        NewEvents: fn.Some(RbfEvent{
×
UNCOV
604
                                InternalEvent: []ProtocolEvent{event},
×
UNCOV
605
                        }),
×
UNCOV
606
                }, nil
×
607
        }
608

609
        // If we get to this point, then we have an event that'll drive forward
610
        // the negotiation process.  Based on the event, we'll figure out which
611
        // state we'll be modifying.
UNCOV
612
        switch {
×
UNCOV
613
        case c.PeerState.GetForParty(lntypes.Local).ShouldRouteTo(event):
×
UNCOV
614
                chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+
×
UNCOV
615
                        "chan state", env.ChanPoint, event)
×
UNCOV
616

×
UNCOV
617
                // Drive forward the local state based on the next event.
×
UNCOV
618
                return processNegotiateEvent(c, event, env, lntypes.Local)
×
619

UNCOV
620
        case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event):
×
UNCOV
621
                chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+
×
UNCOV
622
                        "chan state", env.ChanPoint, event)
×
UNCOV
623

×
UNCOV
624
                // Drive forward the remote state based on the next event.
×
UNCOV
625
                return processNegotiateEvent(c, event, env, lntypes.Remote)
×
626
        }
627

628
        return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation",
×
629
                ErrInvalidStateTransition, event)
×
630
}
631

632
// newSigTlv is a helper function that returns a new optional TLV sig field for
633
// the parametrized tlv.TlvType value.
UNCOV
634
func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] {
×
UNCOV
635
        return tlv.SomeRecordT(tlv.NewRecordT[T](s))
×
UNCOV
636
}
×
637

638
// ProcessEvent implements the event processing to kick off the process of
639
// obtaining a new (possibly RBF'd) signature for our commitment transaction.
640
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
641
) (*CloseStateTransition, error) {
×
UNCOV
642

×
UNCOV
643
        switch msg := event.(type) { //nolint:gocritic
×
644
        // If we receive a SendOfferEvent, then we'll use the specified fee
645
        // rate to generate for the closing transaction with our ideal fee
646
        // rate.
UNCOV
647
        case *SendOfferEvent:
×
UNCOV
648
                // First, we'll figure out the absolute fee rate we should pay
×
UNCOV
649
                // given the state of the local/remote outputs.
×
UNCOV
650
                localTxOut, remoteTxOut := l.DeriveCloseTxOuts()
×
UNCOV
651
                absoluteFee := env.FeeEstimator.EstimateFee(
×
UNCOV
652
                        env.ChanType, localTxOut, remoteTxOut,
×
UNCOV
653
                        msg.TargetFeeRate.FeePerKWeight(),
×
UNCOV
654
                )
×
UNCOV
655

×
UNCOV
656
                // Now that we know what fee we want to pay, we'll create a new
×
UNCOV
657
                // signature over our co-op close transaction. For our
×
UNCOV
658
                // proposals, we'll just always use the known RBF sequence
×
UNCOV
659
                // value.
×
UNCOV
660
                localScript := l.LocalDeliveryScript
×
UNCOV
661
                rawSig, closeTx, closeBalance, err := env.CloseSigner.CreateCloseProposal( //nolint:ll
×
UNCOV
662
                        absoluteFee, localScript, l.RemoteDeliveryScript,
×
UNCOV
663
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
UNCOV
664
                        lnwallet.WithCustomPayer(lntypes.Local),
×
UNCOV
665
                )
×
UNCOV
666
                if err != nil {
×
667
                        return nil, err
×
668
                }
×
UNCOV
669
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
UNCOV
670
                if err != nil {
×
671
                        return nil, err
×
672
                }
×
673

UNCOV
674
                chancloserLog.Infof("closing w/ local_addr=%x, "+
×
UNCOV
675
                        "remote_addr=%x, fee=%v", localScript[:],
×
UNCOV
676
                        l.RemoteDeliveryScript[:], absoluteFee)
×
UNCOV
677

×
UNCOV
678
                chancloserLog.Infof("proposing closing_tx=%v",
×
UNCOV
679
                        spew.Sdump(closeTx))
×
UNCOV
680

×
UNCOV
681
                // Now that we have our signature, we'll set the proper
×
UNCOV
682
                // closingSigs field based on if the remote party's output is
×
UNCOV
683
                // dust or not.
×
UNCOV
684
                var closingSigs lnwire.ClosingSigs
×
UNCOV
685
                switch {
×
686
                // If the remote party's output is dust, then we'll set the
687
                // CloserNoClosee field.
UNCOV
688
                case remoteTxOut == nil:
×
UNCOV
689
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
UNCOV
690
                                wireSig,
×
UNCOV
691
                        )
×
692

693
                // If after paying for fees, our balance is below dust, then
694
                // we'll set the NoCloserClosee field.
UNCOV
695
                case closeBalance < lnwallet.DustLimitForSize(len(localScript)):
×
UNCOV
696
                        closingSigs.NoCloserClosee = newSigTlv[tlv.TlvType2](
×
UNCOV
697
                                wireSig,
×
UNCOV
698
                        )
×
699

700
                // Otherwise, we'll set the CloserAndClosee field.
701
                //
702
                // TODO(roasbeef): should actually set both??
UNCOV
703
                default:
×
UNCOV
704
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
UNCOV
705
                                wireSig,
×
UNCOV
706
                        )
×
707
                }
708

709
                // Now that we have our sig, we'll emit a daemon event to send
710
                // it to the remote party, then transition to the
711
                // LocalOfferSent state.
712
                //
713
                // TODO(roasbeef): type alias for protocol event
UNCOV
714
                sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{ //nolint:ll
×
UNCOV
715
                        TargetPeer: env.ChanPeer,
×
UNCOV
716
                        // TODO(roasbeef): mew new func
×
UNCOV
717
                        Msgs: []lnwire.Message{&lnwire.ClosingComplete{
×
UNCOV
718
                                ChannelID:   env.ChanID,
×
UNCOV
719
                                FeeSatoshis: absoluteFee,
×
UNCOV
720
                                LockTime:    env.BlockHeight,
×
UNCOV
721
                                ClosingSigs: closingSigs,
×
UNCOV
722
                        }},
×
UNCOV
723
                }}
×
UNCOV
724

×
UNCOV
725
                chancloserLog.Infof("ChannelPoint(%v): sending closing sig "+
×
UNCOV
726
                        "to remote party, fee_sats=%v", env.ChanPoint,
×
UNCOV
727
                        absoluteFee)
×
UNCOV
728

×
UNCOV
729
                return &CloseStateTransition{
×
UNCOV
730
                        NextState: &LocalOfferSent{
×
UNCOV
731
                                ProposedFee:       absoluteFee,
×
UNCOV
732
                                LocalSig:          wireSig,
×
UNCOV
733
                                CloseChannelTerms: l.CloseChannelTerms,
×
UNCOV
734
                        },
×
UNCOV
735
                        NewEvents: fn.Some(RbfEvent{
×
UNCOV
736
                                ExternalEvents: sendEvent,
×
UNCOV
737
                        }),
×
UNCOV
738
                }, nil
×
739
        }
740

741
        return nil, fmt.Errorf("%w: received %T while in LocalCloseStart",
×
742
                ErrInvalidStateTransition, event)
×
743
}
744

745
// extractSig extracts the expected signature from the closing sig message.
746
// Only one of them should actually be populated as the closing sig message is
747
// sent in response to a ClosingComplete message, it should only sign the same
748
// version of the co-op close tx as the sender did.
UNCOV
749
func extractSig(msg lnwire.ClosingSig) fn.Result[lnwire.Sig] {
×
UNCOV
750
        // First, we'll validate that only one signature is included in their
×
UNCOV
751
        // response to our initial offer. If not, then we'll exit here, and
×
UNCOV
752
        // trigger a recycle of the connection.
×
UNCOV
753
        sigInts := []bool{
×
UNCOV
754
                msg.CloserNoClosee.IsSome(), msg.NoCloserClosee.IsSome(),
×
UNCOV
755
                msg.CloserAndClosee.IsSome(),
×
UNCOV
756
        }
×
UNCOV
757
        numSigs := fn.Foldl(0, sigInts, func(acc int, sigInt bool) int {
×
UNCOV
758
                if sigInt {
×
UNCOV
759
                        return acc + 1
×
UNCOV
760
                }
×
761

UNCOV
762
                return acc
×
763
        })
UNCOV
764
        if numSigs != 1 {
×
UNCOV
765
                return fn.Errf[lnwire.Sig]("%w: only one sig should be set, "+
×
UNCOV
766
                        "got %v", ErrTooManySigs, numSigs)
×
UNCOV
767
        }
×
768

769
        // The final sig is the one that's actually set.
UNCOV
770
        sig := msg.CloserAndClosee.ValOpt().Alt(
×
UNCOV
771
                msg.NoCloserClosee.ValOpt(),
×
UNCOV
772
        ).Alt(
×
UNCOV
773
                msg.CloserNoClosee.ValOpt(),
×
UNCOV
774
        )
×
UNCOV
775

×
UNCOV
776
        return fn.NewResult(sig.UnwrapOrErr(ErrNoSig))
×
777
}
778

779
// ProcessEvent implements the state transition function for the
780
// LocalOfferSent state. In this state, we'll wait for the remote party to
781
// send a close_signed message which gives us the ability to broadcast a new
782
// co-op close transaction.
783
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
784
) (*CloseStateTransition, error) {
×
UNCOV
785

×
UNCOV
786
        switch msg := event.(type) { //nolint:gocritic
×
787
        // If we receive a LocalSigReceived event, then we'll attempt to
788
        // validate the signature from the remote party. If valid, then we can
789
        // broadcast the transaction, and transition to the ClosePending state.
UNCOV
790
        case *LocalSigReceived:
×
UNCOV
791
                // Extract and validate that only one sig field is set.
×
UNCOV
792
                sig, err := extractSig(msg.SigMsg).Unpack()
×
UNCOV
793
                if err != nil {
×
UNCOV
794
                        return nil, err
×
UNCOV
795
                }
×
796

UNCOV
797
                remoteSig, err := sig.ToSignature()
×
UNCOV
798
                if err != nil {
×
799
                        return nil, err
×
800
                }
×
UNCOV
801
                localSig, err := l.LocalSig.ToSignature()
×
UNCOV
802
                if err != nil {
×
803
                        return nil, err
×
804
                }
×
805

806
                // Now that we have their signature, we'll attempt to validate
807
                // it, then extract a valid closing signature from it.
UNCOV
808
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
UNCOV
809
                        localSig, remoteSig, l.LocalDeliveryScript,
×
UNCOV
810
                        l.RemoteDeliveryScript, l.ProposedFee,
×
UNCOV
811
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
UNCOV
812
                        lnwallet.WithCustomPayer(lntypes.Local),
×
UNCOV
813
                )
×
UNCOV
814
                if err != nil {
×
815
                        return nil, err
×
816
                }
×
817

818
                // As we're about to broadcast a new version of the co-op close
819
                // transaction, we'll mark again as broadcast, but with this
820
                // variant of the co-op close tx.
UNCOV
821
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, true)
×
UNCOV
822
                if err != nil {
×
823
                        return nil, err
×
824
                }
×
825

UNCOV
826
                broadcastEvent := protofsm.DaemonEventSet{&protofsm.BroadcastTxn{ //nolint:ll
×
UNCOV
827
                        Tx: closeTx,
×
UNCOV
828
                        Label: labels.MakeLabel(
×
UNCOV
829
                                labels.LabelTypeChannelClose, &env.Scid,
×
UNCOV
830
                        ),
×
UNCOV
831
                }}
×
UNCOV
832

×
UNCOV
833
                chancloserLog.Infof("ChannelPoint(%v): received sig from "+
×
UNCOV
834
                        "remote party, broadcasting: tx=%v", env.ChanPoint,
×
UNCOV
835
                        lnutils.SpewLogClosure(closeTx),
×
UNCOV
836
                )
×
UNCOV
837

×
UNCOV
838
                return &CloseStateTransition{
×
UNCOV
839
                        NextState: &ClosePending{
×
UNCOV
840
                                CloseTx: closeTx,
×
UNCOV
841
                        },
×
UNCOV
842
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
UNCOV
843
                                ExternalEvents: broadcastEvent,
×
UNCOV
844
                        }),
×
UNCOV
845
                }, nil
×
846
        }
847

848
        return nil, fmt.Errorf("%w: received %T while in LocalOfferSent",
×
849
                ErrInvalidStateTransition, event)
×
850
}
851

852
// ProcessEvent implements the state transition function for the
853
// RemoteCloseStart. In this state, we'll wait for the remote party to send a
854
// closing_complete message. Assuming they can pay for the fees, we'll sign it
855
// ourselves, then transition to the next state of ClosePending.
856
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
UNCOV
857
) (*CloseStateTransition, error) {
×
UNCOV
858

×
UNCOV
859
        switch msg := event.(type) { //nolint:gocritic
×
860
        // If we receive a OfferReceived event, we'll make sure they can
861
        // actually pay for the fee. If so, then we'll counter sign and
862
        // transition to a terminal state.
UNCOV
863
        case *OfferReceivedEvent:
×
UNCOV
864
                // To start, we'll perform some basic validation of the sig
×
UNCOV
865
                // message they've sent. We'll validate that the remote party
×
UNCOV
866
                // actually has enough fees to pay the closing fees.
×
UNCOV
867
                if !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis) {
×
UNCOV
868
                        return nil, fmt.Errorf("%w: %v vs %v",
×
UNCOV
869
                                ErrRemoteCannotPay,
×
UNCOV
870
                                msg.SigMsg.FeeSatoshis,
×
UNCOV
871
                                l.RemoteBalance.ToSatoshis())
×
UNCOV
872
                }
×
873

874
                // With the basic sanity checks out of the way, we'll now
875
                // figure out which signature that we'll attempt to sign
876
                // against.
UNCOV
877
                var (
×
UNCOV
878
                        remoteSig input.Signature
×
UNCOV
879
                        noClosee  bool
×
UNCOV
880
                )
×
UNCOV
881
                switch {
×
882
                // If our balance is dust, then we expect the CloserNoClosee
883
                // sig to be set.
UNCOV
884
                case l.LocalAmtIsDust():
×
UNCOV
885
                        if msg.SigMsg.CloserNoClosee.IsNone() {
×
UNCOV
886
                                return nil, ErrCloserNoClosee
×
UNCOV
887
                        }
×
888
                        msg.SigMsg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) {
×
889
                                remoteSig, _ = s.ToSignature()
×
890
                                noClosee = true
×
891
                        })
×
892

893
                // Otherwise, we'll assume that CloseAndClosee is set.
894
                //
895
                // TODO(roasbeef): NoCloserClosee, but makes no sense?
UNCOV
896
                default:
×
UNCOV
897
                        if msg.SigMsg.CloserAndClosee.IsNone() {
×
UNCOV
898
                                return nil, ErrCloserAndClosee
×
UNCOV
899
                        }
×
UNCOV
900
                        msg.SigMsg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) { //nolint:ll
×
UNCOV
901
                                remoteSig, _ = s.ToSignature()
×
UNCOV
902
                        })
×
903
                }
904

UNCOV
905
                chanOpts := []lnwallet.ChanCloseOpt{
×
UNCOV
906
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
UNCOV
907
                        lnwallet.WithCustomLockTime(msg.SigMsg.LockTime),
×
UNCOV
908
                        lnwallet.WithCustomPayer(lntypes.Remote),
×
UNCOV
909
                }
×
UNCOV
910

×
UNCOV
911
                chancloserLog.Infof("responding to close w/ local_addr=%x, "+
×
UNCOV
912
                        "remote_addr=%x, fee=%v",
×
UNCOV
913
                        l.LocalDeliveryScript[:], l.RemoteDeliveryScript[:],
×
UNCOV
914
                        msg.SigMsg.FeeSatoshis)
×
UNCOV
915

×
UNCOV
916
                // Now that we have the remote sig, we'll sign the version they
×
UNCOV
917
                // signed, then attempt to complete the cooperative close
×
UNCOV
918
                // process.
×
UNCOV
919
                //
×
UNCOV
920
                // TODO(roasbeef): need to be able to omit an output when
×
UNCOV
921
                // signing based on the above, as closing opt
×
UNCOV
922
                rawSig, _, _, err := env.CloseSigner.CreateCloseProposal(
×
UNCOV
923
                        msg.SigMsg.FeeSatoshis, l.LocalDeliveryScript,
×
UNCOV
924
                        l.RemoteDeliveryScript, chanOpts...,
×
UNCOV
925
                )
×
UNCOV
926
                if err != nil {
×
927
                        return nil, err
×
928
                }
×
UNCOV
929
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
UNCOV
930
                if err != nil {
×
931
                        return nil, err
×
932
                }
×
933

UNCOV
934
                localSig, err := wireSig.ToSignature()
×
UNCOV
935
                if err != nil {
×
936
                        return nil, err
×
937
                }
×
938

939
                // With our signature created, we'll now attempt to finalize the
940
                // close process.
UNCOV
941
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
UNCOV
942
                        localSig, remoteSig, l.LocalDeliveryScript,
×
UNCOV
943
                        l.RemoteDeliveryScript, msg.SigMsg.FeeSatoshis,
×
UNCOV
944
                        chanOpts...,
×
UNCOV
945
                )
×
UNCOV
946
                if err != nil {
×
947
                        return nil, err
×
948
                }
×
949

UNCOV
950
                chancloserLog.Infof("ChannelPoint(%v): received sig (fee=%v "+
×
UNCOV
951
                        "sats) from remote party, signing new tx=%v",
×
UNCOV
952
                        env.ChanPoint, msg.SigMsg.FeeSatoshis,
×
UNCOV
953
                        lnutils.SpewLogClosure(closeTx),
×
UNCOV
954
                )
×
UNCOV
955

×
UNCOV
956
                var closingSigs lnwire.ClosingSigs
×
UNCOV
957
                if noClosee {
×
958
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
959
                                wireSig,
×
960
                        )
×
UNCOV
961
                } else {
×
UNCOV
962
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
UNCOV
963
                                wireSig,
×
UNCOV
964
                        )
×
UNCOV
965
                }
×
966

967
                // As we're about to broadcast a new version of the co-op close
968
                // transaction, we'll mark again as broadcast, but with this
969
                // variant of the co-op close tx.
970
                //
971
                // TODO(roasbeef): db will only store one instance, store both?
UNCOV
972
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, false)
×
UNCOV
973
                if err != nil {
×
974
                        return nil, err
×
975
                }
×
976

977
                // As we transition, we'll omit two events: one to broadcast
978
                // the transaction, and the other to send our ClosingSig
979
                // message to the remote party.
UNCOV
980
                sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{
×
UNCOV
981
                        TargetPeer: env.ChanPeer,
×
UNCOV
982
                        Msgs: []lnwire.Message{&lnwire.ClosingSig{
×
UNCOV
983
                                ChannelID:   env.ChanID,
×
UNCOV
984
                                ClosingSigs: closingSigs,
×
UNCOV
985
                        }},
×
UNCOV
986
                }
×
UNCOV
987
                broadcastEvent := &protofsm.BroadcastTxn{
×
UNCOV
988
                        Tx: closeTx,
×
UNCOV
989
                        Label: labels.MakeLabel(
×
UNCOV
990
                                labels.LabelTypeChannelClose, &env.Scid,
×
UNCOV
991
                        ),
×
UNCOV
992
                }
×
UNCOV
993
                daemonEvents := protofsm.DaemonEventSet{
×
UNCOV
994
                        sendEvent, broadcastEvent,
×
UNCOV
995
                }
×
UNCOV
996

×
UNCOV
997
                // Now that we've extracted the signature, we'll transition to
×
UNCOV
998
                // the next state where we'll sign+broadcast the sig.
×
UNCOV
999
                return &CloseStateTransition{
×
UNCOV
1000
                        NextState: &ClosePending{
×
UNCOV
1001
                                CloseTx: closeTx,
×
UNCOV
1002
                        },
×
UNCOV
1003
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
UNCOV
1004
                                ExternalEvents: daemonEvents,
×
UNCOV
1005
                        }),
×
UNCOV
1006
                }, nil
×
1007
        }
1008

1009
        return nil, fmt.Errorf("%w: received %T while in RemoteCloseStart",
×
1010
                ErrInvalidStateTransition, event)
×
1011
}
1012

1013
// ProcessEvent is a semi-terminal state in the rbf-coop close state machine.
1014
// In this state, we're waiting for either a confirmation, or for either side
1015
// to attempt to create a new RBF'd co-op close transaction.
1016
func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
1017
) (*CloseStateTransition, error) {
×
1018

×
1019
        switch msg := event.(type) {
×
1020
        // If we can a spend while waiting for the close, then we'll go to our
1021
        // terminal state.
1022
        case *SpendEvent:
×
1023
                return &CloseStateTransition{
×
1024
                        NextState: &CloseFin{
×
1025
                                ConfirmedTx: msg.Tx,
×
1026
                        },
×
1027
                }, nil
×
1028

1029
        default:
×
1030

×
1031
                return &CloseStateTransition{
×
1032
                        NextState: c,
×
1033
                }, nil
×
1034
        }
1035
}
1036

1037
// ProcessEvent is the event processing for out terminal state. In this state,
1038
// we just keep looping back on ourselves.
1039
func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
1040
) (*CloseStateTransition, error) {
×
1041

×
1042
        return &CloseStateTransition{
×
1043
                NextState: c,
×
1044
        }, nil
×
1045
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc