• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13035292482

29 Jan 2025 03:59PM UTC coverage: 49.3% (-9.5%) from 58.777%
13035292482

Pull #9456

github

mohamedawnallah
docs: update release-notes-0.19.0.md

In this commit, we warn users about the removal
of RPCs `SendToRoute`, `SendToRouteSync`, `SendPayment`,
and `SendPaymentSync` in the next release 0.20.
Pull Request #9456: lnrpc+docs: deprecate warning `SendToRoute`, `SendToRouteSync`, `SendPayment`, and `SendPaymentSync` in Release 0.19

100634 of 204126 relevant lines covered (49.3%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lnwallet/chancloser/rbf_coop_transitions.go
1
package chancloser
2

3
import (
4
        "fmt"
5

6
        "github.com/btcsuite/btcd/btcec/v2"
7
        "github.com/btcsuite/btcd/chaincfg"
8
        "github.com/btcsuite/btcd/mempool"
9
        "github.com/btcsuite/btcd/wire"
10
        "github.com/davecgh/go-spew/spew"
11
        "github.com/lightningnetwork/lnd/fn/v2"
12
        "github.com/lightningnetwork/lnd/input"
13
        "github.com/lightningnetwork/lnd/labels"
14
        "github.com/lightningnetwork/lnd/lntypes"
15
        "github.com/lightningnetwork/lnd/lnutils"
16
        "github.com/lightningnetwork/lnd/lnwallet"
17
        "github.com/lightningnetwork/lnd/lnwire"
18
        "github.com/lightningnetwork/lnd/protofsm"
19
        "github.com/lightningnetwork/lnd/tlv"
20
)
21

22
// sendShutdownEvents is a helper function that returns a set of daemon events
23
// we need to emit when we decide that we should send a shutdown message. We'll
24
// also mark the channel as borked as well, as at this point, we no longer want
25
// to continue with normal operation.
26
func sendShutdownEvents(chanID lnwire.ChannelID, chanPoint wire.OutPoint,
27
        deliveryAddr lnwire.DeliveryAddress, peerPub btcec.PublicKey,
28
        postSendEvent fn.Option[ProtocolEvent],
29
        chanState ChanStateObserver) (protofsm.DaemonEventSet, error) {
×
30

×
31
        // We'll emit a daemon event that instructs the daemon to send out a
×
32
        // new shutdown message to the remote peer.
×
33
        msgsToSend := &protofsm.SendMsgEvent[ProtocolEvent]{
×
34
                TargetPeer: peerPub,
×
35
                Msgs: []lnwire.Message{&lnwire.Shutdown{
×
36
                        ChannelID: chanID,
×
37
                        Address:   deliveryAddr,
×
38
                }},
×
39
                SendWhen: fn.Some(func() bool {
×
40
                        ok := chanState.NoDanglingUpdates()
×
41
                        if ok {
×
42
                                chancloserLog.Infof("ChannelPoint(%v): no "+
×
43
                                        "dangling updates sending shutdown "+
×
44
                                        "message", chanPoint)
×
45
                        }
×
46

47
                        return ok
×
48
                }),
49
                PostSendEvent: postSendEvent,
50
        }
51

52
        // If a close is already in process (we're in the RBF loop), then we
53
        // can skip everything below, and just send out the shutdown message.
54
        if chanState.FinalBalances().IsSome() {
×
55
                return protofsm.DaemonEventSet{msgsToSend}, nil
×
56
        }
×
57

58
        // Before closing, we'll attempt to send a disable update for the
59
        // channel.  We do so before closing the channel as otherwise the
60
        // current edge policy won't be retrievable from the graph.
61
        if err := chanState.DisableChannel(); err != nil {
×
62
                return nil, fmt.Errorf("unable to disable channel: %w", err)
×
63
        }
×
64

65
        // If we have a post-send event, then this means that we're the
66
        // responder. We'll use this fact below to update state in the DB.
67
        isInitiator := postSendEvent.IsNone()
×
68

×
69
        chancloserLog.Infof("ChannelPoint(%v): disabling outgoing adds",
×
70
                chanPoint)
×
71

×
72
        // As we're about to send a shutdown, we'll disable adds in the
×
73
        // outgoing direction.
×
74
        if err := chanState.DisableOutgoingAdds(); err != nil {
×
75
                return nil, fmt.Errorf("unable to disable outgoing "+
×
76
                        "adds: %w", err)
×
77
        }
×
78

79
        // To be able to survive a restart, we'll also write to disk
80
        // information about the shutdown we're about to send out.
81
        err := chanState.MarkShutdownSent(deliveryAddr, isInitiator)
×
82
        if err != nil {
×
83
                return nil, fmt.Errorf("unable to mark shutdown sent: %w", err)
×
84
        }
×
85

86
        chancloserLog.Debugf("ChannelPoint(%v): marking channel as borked",
×
87
                chanPoint)
×
88

×
89
        return protofsm.DaemonEventSet{msgsToSend}, nil
×
90
}
91

92
// validateShutdown is a helper function that validates that the shutdown has a
93
// proper delivery script, and can be sent based on the current thaw height of
94
// the channel.
95
func validateShutdown(chanThawHeight fn.Option[uint32],
96
        upfrontAddr fn.Option[lnwire.DeliveryAddress],
97
        msg *ShutdownReceived, chanPoint wire.OutPoint,
98
        chainParams chaincfg.Params) error {
×
99

×
100
        // If we've received a shutdown message, and we have a thaw height,
×
101
        // then we need to make sure that the channel can now be co-op closed.
×
102
        err := fn.MapOptionZ(chanThawHeight, func(thawHeight uint32) error {
×
103
                // If the current height is below the thaw height, then we'll
×
104
                // reject the shutdown message as we can't yet co-op close the
×
105
                // channel.
×
106
                if msg.BlockHeight < thawHeight {
×
107
                        return fmt.Errorf("initiator attempting to "+
×
108
                                "co-op close frozen ChannelPoint(%v) "+
×
109
                                "(current_height=%v, thaw_height=%v)",
×
110
                                chanPoint, msg.BlockHeight,
×
111
                                thawHeight)
×
112
                }
×
113

114
                return nil
×
115
        })
116
        if err != nil {
×
117
                return err
×
118
        }
×
119

120
        // Next, we'll verify that the remote party is sending the expected
121
        // shutdown script.
122
        return fn.MapOption(func(addr lnwire.DeliveryAddress) error {
×
123
                return validateShutdownScript(
×
124
                        addr, msg.ShutdownScript, &chainParams,
×
125
                )
×
126
        })(upfrontAddr).UnwrapOr(nil)
×
127
}
128

129
// ProcessEvent takes a protocol event, and implements a state transition for
130
// the state. From this state, we can receive two possible incoming events:
131
// SendShutdown and ShutdownReceived. Both of these will transition us to the
132
// ChannelFlushing state.
133
func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
134
) (*CloseStateTransition, error) {
×
135

×
136
        switch msg := event.(type) {
×
137
        // If we get a confirmation, then a prior transaction we broadcasted
138
        // has confirmed, so we can move to our terminal state early.
139
        case *SpendEvent:
×
140
                return &CloseStateTransition{
×
141
                        NextState: &CloseFin{
×
142
                                ConfirmedTx: msg.Tx,
×
143
                        },
×
144
                }, nil
×
145

146
        // If we receive the SendShutdown event, then we'll send our shutdown
147
        // with a special SendPredicate, then go to the ShutdownPending where
148
        // we'll wait for the remote to send their shutdown.
149
        case *SendShutdown:
×
150
                // If we have an upfront shutdown addr or a delivery addr then
×
151
                // we'll use that. Otherwise, we'll generate a new delivery
×
152
                // addr.
×
153
                shutdownScript, err := env.LocalUpfrontShutdown.Alt(
×
154
                        msg.DeliveryAddr,
×
155
                ).UnwrapOrFuncErr(env.NewDeliveryScript)
×
156
                if err != nil {
×
157
                        return nil, err
×
158
                }
×
159

160
                // We'll emit some daemon events to send the shutdown message
161
                // and disable the channel on the network level. In this case,
162
                // we don't need a post send event as receive their shutdown is
163
                // what'll move us beyond the ShutdownPending state.
164
                daemonEvents, err := sendShutdownEvents(
×
165
                        env.ChanID, env.ChanPoint, shutdownScript,
×
166
                        env.ChanPeer, fn.None[ProtocolEvent](),
×
167
                        env.ChanObserver,
×
168
                )
×
169
                if err != nil {
×
170
                        return nil, err
×
171
                }
×
172

173
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+
×
174
                        "delivery_script=%v", env.ChanPoint, shutdownScript)
×
175

×
176
                // From here, we'll transition to the shutdown pending state. In
×
177
                // this state we await their shutdown message (self loop), then
×
178
                // also the flushing event.
×
179
                return &CloseStateTransition{
×
180
                        NextState: &ShutdownPending{
×
181
                                IdealFeeRate: fn.Some(msg.IdealFeeRate),
×
182
                                ShutdownScripts: ShutdownScripts{
×
183
                                        LocalDeliveryScript: shutdownScript,
×
184
                                },
×
185
                        },
×
186
                        NewEvents: fn.Some(RbfEvent{
×
187
                                ExternalEvents: daemonEvents,
×
188
                        }),
×
189
                }, nil
×
190

191
        // When we receive a shutdown from the remote party, we'll validate the
192
        // shutdown message, then transition to the ShutdownPending state. We'll
193
        // also emit similar events like the above to send out shutdown, and
194
        // also disable the channel.
195
        case *ShutdownReceived:
×
196
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg")
×
197

×
198
                // Validate that they can send the message now, and also that
×
199
                // they haven't violated their commitment to a prior upfront
×
200
                // shutdown addr.
×
201
                err := validateShutdown(
×
202
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
203
                        env.ChanPoint, env.ChainParams,
×
204
                )
×
205
                if err != nil {
×
206
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
207
                                "shutdown attempt: %v", err)
×
208

×
209
                        return nil, err
×
210
                }
×
211

212
                // If we have an upfront shutdown addr we'll use that,
213
                // otherwise, we'll generate a new delivery script.
214
                shutdownAddr, err := env.LocalUpfrontShutdown.UnwrapOrFuncErr(
×
215
                        env.NewDeliveryScript,
×
216
                )
×
217
                if err != nil {
×
218
                        return nil, err
×
219
                }
×
220

221
                chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg "+
×
222
                        "at next clean commit state", env.ChanPoint)
×
223

×
224
                // Now that we know the shutdown message is valid, we'll obtain
×
225
                // the set of daemon events we need to emit. We'll also specify
×
226
                // that once the message has actually been sent, that we
×
227
                // generate receive an input event of a ShutdownComplete.
×
228
                daemonEvents, err := sendShutdownEvents(
×
229
                        env.ChanID, env.ChanPoint, shutdownAddr,
×
230
                        env.ChanPeer,
×
231
                        fn.Some[ProtocolEvent](&ShutdownComplete{}),
×
232
                        env.ChanObserver,
×
233
                )
×
234
                if err != nil {
×
235
                        return nil, err
×
236
                }
×
237

238
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
239
                        env.ChanPoint)
×
240

×
241
                // We just received a shutdown, so we'll disable the adds in
×
242
                // the outgoing direction.
×
243
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
244
                        return nil, fmt.Errorf("unable to disable incoming "+
×
245
                                "adds: %w", err)
×
246
                }
×
247

248
                remoteAddr := msg.ShutdownScript
×
249

×
250
                return &CloseStateTransition{
×
251
                        NextState: &ShutdownPending{
×
252
                                ShutdownScripts: ShutdownScripts{
×
253
                                        LocalDeliveryScript:  shutdownAddr,
×
254
                                        RemoteDeliveryScript: remoteAddr,
×
255
                                },
×
256
                        },
×
257
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
258
                                ExternalEvents: daemonEvents,
×
259
                        }),
×
260
                }, nil
×
261

262
        // Any other messages in this state will result in an error, as this is
263
        // an undefined state transition.
264
        default:
×
265
                return nil, fmt.Errorf("%w: received %T while in ChannelActive",
×
266
                        ErrInvalidStateTransition, msg)
×
267
        }
268
}
269

270
// ProcessEvent takes a protocol event, and implements a state transition for
271
// the state. Our path to this state will determine the set of valid events. If
272
// we were the one that sent the shutdown, then we'll just wait on the
273
// ShutdownReceived event. Otherwise, we received the shutdown, and can move
274
// forward once we receive the ShutdownComplete event. Receiving
275
// ShutdownComplete means that we've sent our shutdown, as this was specified
276
// as a post send event.
277
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
278
) (*CloseStateTransition, error) {
×
279

×
280
        switch msg := event.(type) {
×
281
        // If we get a confirmation, then a prior transaction we broadcasted
282
        // has confirmed, so we can move to our terminal state early.
283
        case *SpendEvent:
×
284
                return &CloseStateTransition{
×
285
                        NextState: &CloseFin{
×
286
                                ConfirmedTx: msg.Tx,
×
287
                        },
×
288
                }, nil
×
289

290
        // When we receive a shutdown from the remote party, we'll validate the
291
        // shutdown message, then transition to the ChannelFlushing state.
292
        case *ShutdownReceived:
×
293
                chancloserLog.Infof("ChannelPoint(%v): received shutdown msg",
×
294
                        env.ChanPoint)
×
295

×
296
                // Validate that they can send the message now, and also that
×
297
                // they haven't violated their commitment to a prior upfront
×
298
                // shutdown addr.
×
299
                err := validateShutdown(
×
300
                        env.ThawHeight, env.RemoteUpfrontShutdown, msg,
×
301
                        env.ChanPoint, env.ChainParams,
×
302
                )
×
303
                if err != nil {
×
304
                        chancloserLog.Errorf("ChannelPoint(%v): rejecting "+
×
305
                                "shutdown attempt: %v", err)
×
306

×
307
                        return nil, err
×
308
                }
×
309

310
                // If the channel is *already* flushed, and the close is
311
                // go straight into negotiation, as this is the RBF loop.
312
                // already in progress, then we can skip the flushing state and
313
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
314
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
315
                        unknownBalance,
×
316
                )
×
317
                if finalBalances != unknownBalance {
×
318
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
319
                                ShutdownBalances: finalBalances,
×
320
                        })
×
321
                        eventsToEmit = fn.Some(RbfEvent{
×
322
                                InternalEvent: []ProtocolEvent{
×
323
                                        channelFlushed,
×
324
                                },
×
325
                        })
×
326
                }
×
327

328
                chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds",
×
329
                        env.ChanPoint)
×
330

×
331
                // We just received a shutdown, so we'll disable the adds in
×
332
                // the outgoing direction.
×
333
                if err := env.ChanObserver.DisableIncomingAdds(); err != nil {
×
334
                        return nil, fmt.Errorf("unable to disable incoming "+
×
335
                                "adds: %w", err)
×
336
                }
×
337

338
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
339
                        "be flushed...", env.ChanPoint)
×
340

×
341
                // We transition to the ChannelFlushing state, where we await
×
342
                // the ChannelFlushed event.
×
343
                return &CloseStateTransition{
×
344
                        NextState: &ChannelFlushing{
×
345
                                IdealFeeRate: s.IdealFeeRate,
×
346
                                ShutdownScripts: ShutdownScripts{
×
347
                                        LocalDeliveryScript:  s.LocalDeliveryScript, //nolint:ll
×
348
                                        RemoteDeliveryScript: msg.ShutdownScript,    //nolint:ll
×
349
                                },
×
350
                        },
×
351
                        NewEvents: eventsToEmit,
×
352
                }, nil
×
353

354
        // If we get this message, then this means that we were finally able to
355
        // send out shutdown after receiving it from the remote party. We'll
356
        // now transition directly to the ChannelFlushing state.
357
        case *ShutdownComplete:
×
358
                chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+
×
359
                        "be flushed...", env.ChanPoint)
×
360

×
361
                // If the channel is *already* flushed, and the close is
×
362
                // already in progress, then we can skip the flushing state and
×
363
                // go straight into negotiation, as this is the RBF loop.
×
364
                var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]]
×
365
                finalBalances := env.ChanObserver.FinalBalances().UnwrapOr(
×
366
                        unknownBalance,
×
367
                )
×
368
                if finalBalances != unknownBalance {
×
369
                        channelFlushed := ProtocolEvent(&ChannelFlushed{
×
370
                                ShutdownBalances: finalBalances,
×
371
                        })
×
372
                        eventsToEmit = fn.Some(RbfEvent{
×
373
                                InternalEvent: []ProtocolEvent{
×
374
                                        channelFlushed,
×
375
                                },
×
376
                        })
×
377
                }
×
378

379
                // From here, we'll transition to the channel flushing state.
380
                // We'll stay here until we receive the ChannelFlushed event.
381
                return &CloseStateTransition{
×
382
                        NextState: &ChannelFlushing{
×
383
                                IdealFeeRate:    s.IdealFeeRate,
×
384
                                ShutdownScripts: s.ShutdownScripts,
×
385
                        },
×
386
                        NewEvents: eventsToEmit,
×
387
                }, nil
×
388

389
        // Any other messages in this state will result in an error, as this is
390
        // an undefined state transition.
391
        default:
×
392
                return nil, fmt.Errorf("%w: received %T while in "+
×
393
                        "ShutdownPending", ErrInvalidStateTransition, msg)
×
394
        }
395
}
396

397
// ProcessEvent takes a new protocol event, and figures out if we can
398
// transition to the next state, or just loop back upon ourself. If we receive
399
// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as
400
// we haven't yet fully cleared the channel. Otherwise, we can move to the
401
// CloseReady state which'll being the channel closing process.
402
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
403
) (*CloseStateTransition, error) {
×
404

×
405
        switch msg := event.(type) {
×
406
        // If we get a confirmation, then a prior transaction we broadcasted
407
        // has confirmed, so we can move to our terminal state early.
408
        case *SpendEvent:
×
409
                return &CloseStateTransition{
×
410
                        NextState: &CloseFin{
×
411
                                ConfirmedTx: msg.Tx,
×
412
                        },
×
413
                }, nil
×
414

415
        // If we get an OfferReceived event, then the channel is flushed from
416
        // the PoV of the remote party. However, due to propagation delay or
417
        // concurrency, we may not have received the ChannelFlushed event yet.
418
        // In this case, we'll stash the event and wait for the ChannelFlushed
419
        // event.
420
        case *OfferReceivedEvent:
×
421
                chancloserLog.Infof("ChannelPoint(%v): received remote offer "+
×
422
                        "early, stashing...", env.ChanPoint)
×
423

×
424
                c.EarlyRemoteOffer = fn.Some(*msg)
×
425

×
426
                // TODO(roasbeef): unit test!
×
427
                //  * actually do this ^
×
428

×
429
                // We'll perform a noop update so we can wait for the actual
×
430
                // channel flushed event.
×
431
                return &CloseStateTransition{
×
432
                        NextState: c,
×
433
                }, nil
×
434

435
        // If we receive the ChannelFlushed event, then the coast is clear so
436
        // we'll now morph into the dual peer state so we can handle any
437
        // messages needed to drive forward the close process.
438
        case *ChannelFlushed:
×
439
                // Both the local and remote losing negotiation needs the terms
×
440
                // we'll be using to close the channel, so we'll create them
×
441
                // here.
×
442
                closeTerms := CloseChannelTerms{
×
443
                        ShutdownScripts:  c.ShutdownScripts,
×
444
                        ShutdownBalances: msg.ShutdownBalances,
×
445
                }
×
446

×
447
                chancloserLog.Infof("ChannelPoint(%v): channel flushed! "+
×
448
                        "proceeding with co-op close", env.ChanPoint)
×
449

×
450
                // Now that the channel has been flushed, we'll mark on disk
×
451
                // that we're approaching the point of no return where we'll
×
452
                // send a new signature to the remote party.
×
453
                //
×
454
                // TODO(roasbeef): doesn't actually matter if initiator here?
×
455
                if msg.FreshFlush {
×
456
                        err := env.ChanObserver.MarkCoopBroadcasted(nil, true)
×
457
                        if err != nil {
×
458
                                return nil, err
×
459
                        }
×
460
                }
461

462
                // If an ideal fee rate was specified, then we'll use that,
463
                // otherwise we'll fall back to the default value given in the
464
                // env.
465
                idealFeeRate := c.IdealFeeRate.UnwrapOr(env.DefaultFeeRate)
×
466

×
467
                // We'll then use that fee rate to determine the absolute fee
×
468
                // we'd propose.
×
469
                //
×
470
                // TODO(roasbeef): need to sign the 3 diff versions of this?
×
471
                localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts()
×
472
                absoluteFee := env.FeeEstimator.EstimateFee(
×
473
                        env.ChanType, localTxOut, remoteTxOut,
×
474
                        idealFeeRate.FeePerKWeight(),
×
475
                )
×
476

×
477
                chancloserLog.Infof("ChannelPoint(%v): using ideal_fee=%v, "+
×
478
                        "absolute_fee=%v", env.ChanPoint, idealFeeRate,
×
479
                        absoluteFee)
×
480

×
481
                var (
×
482
                        internalEvents []ProtocolEvent
×
483
                        newEvents      fn.Option[RbfEvent]
×
484
                )
×
485

×
486
                // If we received a remote offer early from the remote party,
×
487
                // then we'll add that to the set of internal events to emit.
×
488
                c.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) {
×
489
                        internalEvents = append(internalEvents, &offer)
×
490
                })
×
491

492
                // Only if we have enough funds to pay for the fees do we need
493
                // to emit a localOfferSign event.
494
                //
495
                // TODO(roasbeef): also only proceed if was higher than fee in
496
                // last round?
497
                if closeTerms.LocalCanPayFees(absoluteFee) {
×
498
                        // Each time we go into this negotiation flow, we'll
×
499
                        // kick off our local state with a new close attempt.
×
500
                        // So we'll emit a internal event to drive forward that
×
501
                        // part of the state.
×
502
                        localOfferSign := ProtocolEvent(&SendOfferEvent{
×
503
                                TargetFeeRate: idealFeeRate,
×
504
                        })
×
505
                        internalEvents = append(internalEvents, localOfferSign)
×
506
                } else {
×
507
                        chancloserLog.Infof("ChannelPoint(%v): unable to pay "+
×
508
                                "fees with local balance, skipping "+
×
509
                                "closing_complete", env.ChanPoint)
×
510
                }
×
511

512
                if len(internalEvents) > 0 {
×
513
                        newEvents = fn.Some(RbfEvent{
×
514
                                InternalEvent: internalEvents,
×
515
                        })
×
516
                }
×
517

518
                return &CloseStateTransition{
×
519
                        NextState: &ClosingNegotiation{
×
520
                                PeerState: lntypes.Dual[AsymmetricPeerState]{
×
521
                                        Local: &LocalCloseStart{
×
522
                                                CloseChannelTerms: closeTerms,
×
523
                                        },
×
524
                                        Remote: &RemoteCloseStart{
×
525
                                                CloseChannelTerms: closeTerms,
×
526
                                        },
×
527
                                },
×
528
                        },
×
529
                        NewEvents: newEvents,
×
530
                }, nil
×
531

532
        default:
×
533
                return nil, fmt.Errorf("%w: received %T while in "+
×
534
                        "ChannelFlushing", ErrInvalidStateTransition, msg)
×
535
        }
536
}
537

538
// processNegotiateEvent is a helper function that processes a new event to
539
// local channel state once we're in the ClosingNegotiation state.
540
func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent,
541
        env *Environment, chanPeer lntypes.ChannelParty,
542
) (*CloseStateTransition, error) {
×
543

×
544
        targetPeerState := c.PeerState.GetForParty(chanPeer)
×
545

×
546
        // Drive forward the remote state based on the next event.
×
547
        transition, err := targetPeerState.ProcessEvent(
×
548
                event, env,
×
549
        )
×
550
        if err != nil {
×
551
                return nil, err
×
552
        }
×
553

554
        nextPeerState, ok := transition.NextState.(AsymmetricPeerState) //nolint:ll
×
555
        if !ok {
×
556
                return nil, fmt.Errorf("expected %T to be "+
×
557
                        "AsymmetricPeerState", transition.NextState)
×
558
        }
×
559

560
        // Make a copy of the input state, then update the peer state of the
561
        // proper party.
562
        newPeerState := *c
×
563
        newPeerState.PeerState.SetForParty(chanPeer, nextPeerState)
×
564

×
565
        return &CloseStateTransition{
×
566
                NextState: &newPeerState,
×
567
                NewEvents: transition.NewEvents,
×
568
        }, nil
×
569
}
570

571
// ProcessEvent drives forward the composite states for the local and remote
572
// party in response to new events. From this state, we'll continue to drive
573
// forward the local and remote states until we arrive at the StateFin stage,
574
// or we loop back up to the ShutdownPending state.
575
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
576
) (*CloseStateTransition, error) {
×
577

×
578
        // There're two classes of events that can break us out of this state:
×
579
        // we receive a confirmation event, or we receive a signal to restart
×
580
        // the co-op close process.
×
581
        switch msg := event.(type) {
×
582
        // If we get a confirmation, then the spend request we issued when we
583
        // were leaving the ChannelFlushing state has been confirmed.  We'll
584
        // now transition to the StateFin state.
585
        case *SpendEvent:
×
586
                return &CloseStateTransition{
×
587
                        NextState: &CloseFin{
×
588
                                ConfirmedTx: msg.Tx,
×
589
                        },
×
590
                }, nil
×
591

592
        // Otherwise, if we receive a shutdown, or receive an event to send a
593
        // shutdown, then we'll go back up to the ChannelActive state, and have
594
        // it handle this event by emitting an internal event.
595
        //
596
        // TODO(roasbeef): both will have fee rate specified, so ok?
597
        case *ShutdownReceived, *SendShutdown:
×
598
                chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+
×
599
                        "restarting negotiation", env.ChanPoint)
×
600

×
601
                return &CloseStateTransition{
×
602
                        NextState: &ChannelActive{},
×
603
                        NewEvents: fn.Some(RbfEvent{
×
604
                                InternalEvent: []ProtocolEvent{event},
×
605
                        }),
×
606
                }, nil
×
607
        }
608

609
        // If we get to this point, then we have an event that'll drive forward
610
        // the negotiation process.  Based on the event, we'll figure out which
611
        // state we'll be modifying.
612
        switch {
×
613
        case c.PeerState.GetForParty(lntypes.Local).ShouldRouteTo(event):
×
614
                chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+
×
615
                        "chan state", env.ChanPoint, event)
×
616

×
617
                // Drive forward the local state based on the next event.
×
618
                return processNegotiateEvent(c, event, env, lntypes.Local)
×
619

620
        case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event):
×
621
                chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+
×
622
                        "chan state", env.ChanPoint, event)
×
623

×
624
                // Drive forward the remote state based on the next event.
×
625
                return processNegotiateEvent(c, event, env, lntypes.Remote)
×
626
        }
627

628
        return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation",
×
629
                ErrInvalidStateTransition, event)
×
630
}
631

632
// newSigTlv is a helper function that returns a new optional TLV sig field for
633
// the parametrized tlv.TlvType value.
634
func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] {
×
635
        return tlv.SomeRecordT(tlv.NewRecordT[T](s))
×
636
}
×
637

638
// ProcessEvent implements the event processing to kick off the process of
639
// obtaining a new (possibly RBF'd) signature for our commitment transaction.
640
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
641
) (*CloseStateTransition, error) {
×
642

×
643
        switch msg := event.(type) { //nolint:gocritic
×
644
        // If we receive a SendOfferEvent, then we'll use the specified fee
645
        // rate to generate for the closing transaction with our ideal fee
646
        // rate.
647
        case *SendOfferEvent:
×
648
                // First, we'll figure out the absolute fee rate we should pay
×
649
                // given the state of the local/remote outputs.
×
650
                localTxOut, remoteTxOut := l.DeriveCloseTxOuts()
×
651
                absoluteFee := env.FeeEstimator.EstimateFee(
×
652
                        env.ChanType, localTxOut, remoteTxOut,
×
653
                        msg.TargetFeeRate.FeePerKWeight(),
×
654
                )
×
655

×
656
                // Now that we know what fee we want to pay, we'll create a new
×
657
                // signature over our co-op close transaction. For our
×
658
                // proposals, we'll just always use the known RBF sequence
×
659
                // value.
×
660
                localScript := l.LocalDeliveryScript
×
661
                rawSig, closeTx, closeBalance, err := env.CloseSigner.CreateCloseProposal( //nolint:ll
×
662
                        absoluteFee, localScript, l.RemoteDeliveryScript,
×
663
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
664
                        lnwallet.WithCustomPayer(lntypes.Local),
×
665
                )
×
666
                if err != nil {
×
667
                        return nil, err
×
668
                }
×
669
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
670
                if err != nil {
×
671
                        return nil, err
×
672
                }
×
673

674
                chancloserLog.Infof("closing w/ local_addr=%x, "+
×
675
                        "remote_addr=%x, fee=%v", localScript[:],
×
676
                        l.RemoteDeliveryScript[:], absoluteFee)
×
677

×
678
                chancloserLog.Infof("proposing closing_tx=%v",
×
679
                        spew.Sdump(closeTx))
×
680

×
681
                // Now that we have our signature, we'll set the proper
×
682
                // closingSigs field based on if the remote party's output is
×
683
                // dust or not.
×
684
                var closingSigs lnwire.ClosingSigs
×
685
                switch {
×
686
                // If the remote party's output is dust, then we'll set the
687
                // CloserNoClosee field.
688
                case remoteTxOut == nil:
×
689
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
690
                                wireSig,
×
691
                        )
×
692

693
                // If after paying for fees, our balance is below dust, then
694
                // we'll set the NoCloserClosee field.
695
                case closeBalance < lnwallet.DustLimitForSize(len(localScript)):
×
696
                        closingSigs.NoCloserClosee = newSigTlv[tlv.TlvType2](
×
697
                                wireSig,
×
698
                        )
×
699

700
                // Otherwise, we'll set the CloserAndClosee field.
701
                //
702
                // TODO(roasbeef): should actually set both??
703
                default:
×
704
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
705
                                wireSig,
×
706
                        )
×
707
                }
708

709
                // Now that we have our sig, we'll emit a daemon event to send
710
                // it to the remote party, then transition to the
711
                // LocalOfferSent state.
712
                //
713
                // TODO(roasbeef): type alias for protocol event
714
                sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{ //nolint:ll
×
715
                        TargetPeer: env.ChanPeer,
×
716
                        // TODO(roasbeef): mew new func
×
717
                        Msgs: []lnwire.Message{&lnwire.ClosingComplete{
×
718
                                ChannelID:   env.ChanID,
×
719
                                FeeSatoshis: absoluteFee,
×
720
                                LockTime:    env.BlockHeight,
×
721
                                ClosingSigs: closingSigs,
×
722
                        }},
×
723
                }}
×
724

×
725
                chancloserLog.Infof("ChannelPoint(%v): sending closing sig "+
×
726
                        "to remote party, fee_sats=%v", env.ChanPoint,
×
727
                        absoluteFee)
×
728

×
729
                return &CloseStateTransition{
×
730
                        NextState: &LocalOfferSent{
×
731
                                ProposedFee:       absoluteFee,
×
732
                                LocalSig:          wireSig,
×
733
                                CloseChannelTerms: l.CloseChannelTerms,
×
734
                        },
×
735
                        NewEvents: fn.Some(RbfEvent{
×
736
                                ExternalEvents: sendEvent,
×
737
                        }),
×
738
                }, nil
×
739
        }
740

741
        return nil, fmt.Errorf("%w: received %T while in LocalCloseStart",
×
742
                ErrInvalidStateTransition, event)
×
743
}
744

745
// extractSig extracts the expected signature from the closing sig message.
746
// Only one of them should actually be populated as the closing sig message is
747
// sent in response to a ClosingComplete message, it should only sign the same
748
// version of the co-op close tx as the sender did.
749
func extractSig(msg lnwire.ClosingSig) fn.Result[lnwire.Sig] {
×
750
        // First, we'll validate that only one signature is included in their
×
751
        // response to our initial offer. If not, then we'll exit here, and
×
752
        // trigger a recycle of the connection.
×
753
        sigInts := []bool{
×
754
                msg.CloserNoClosee.IsSome(), msg.NoCloserClosee.IsSome(),
×
755
                msg.CloserAndClosee.IsSome(),
×
756
        }
×
757
        numSigs := fn.Foldl(0, sigInts, func(acc int, sigInt bool) int {
×
758
                if sigInt {
×
759
                        return acc + 1
×
760
                }
×
761

762
                return acc
×
763
        })
764
        if numSigs != 1 {
×
765
                return fn.Errf[lnwire.Sig]("%w: only one sig should be set, "+
×
766
                        "got %v", ErrTooManySigs, numSigs)
×
767
        }
×
768

769
        // The final sig is the one that's actually set.
770
        sig := msg.CloserAndClosee.ValOpt().Alt(
×
771
                msg.NoCloserClosee.ValOpt(),
×
772
        ).Alt(
×
773
                msg.CloserNoClosee.ValOpt(),
×
774
        )
×
775

×
776
        return fn.NewResult(sig.UnwrapOrErr(ErrNoSig))
×
777
}
778

779
// ProcessEvent implements the state transition function for the
780
// LocalOfferSent state. In this state, we'll wait for the remote party to
781
// send a close_signed message which gives us the ability to broadcast a new
782
// co-op close transaction.
783
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
784
) (*CloseStateTransition, error) {
×
785

×
786
        switch msg := event.(type) { //nolint:gocritic
×
787
        // If we receive a LocalSigReceived event, then we'll attempt to
788
        // validate the signature from the remote party. If valid, then we can
789
        // broadcast the transaction, and transition to the ClosePending state.
790
        case *LocalSigReceived:
×
791
                // Extract and validate that only one sig field is set.
×
792
                sig, err := extractSig(msg.SigMsg).Unpack()
×
793
                if err != nil {
×
794
                        return nil, err
×
795
                }
×
796

797
                remoteSig, err := sig.ToSignature()
×
798
                if err != nil {
×
799
                        return nil, err
×
800
                }
×
801
                localSig, err := l.LocalSig.ToSignature()
×
802
                if err != nil {
×
803
                        return nil, err
×
804
                }
×
805

806
                // Now that we have their signature, we'll attempt to validate
807
                // it, then extract a valid closing signature from it.
808
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
809
                        localSig, remoteSig, l.LocalDeliveryScript,
×
810
                        l.RemoteDeliveryScript, l.ProposedFee,
×
811
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
812
                        lnwallet.WithCustomPayer(lntypes.Local),
×
813
                )
×
814
                if err != nil {
×
815
                        return nil, err
×
816
                }
×
817

818
                // As we're about to broadcast a new version of the co-op close
819
                // transaction, we'll mark again as broadcast, but with this
820
                // variant of the co-op close tx.
821
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, true)
×
822
                if err != nil {
×
823
                        return nil, err
×
824
                }
×
825

826
                broadcastEvent := protofsm.DaemonEventSet{&protofsm.BroadcastTxn{ //nolint:ll
×
827
                        Tx: closeTx,
×
828
                        Label: labels.MakeLabel(
×
829
                                labels.LabelTypeChannelClose, &env.Scid,
×
830
                        ),
×
831
                }}
×
832

×
833
                chancloserLog.Infof("ChannelPoint(%v): received sig from "+
×
834
                        "remote party, broadcasting: tx=%v", env.ChanPoint,
×
835
                        lnutils.SpewLogClosure(closeTx),
×
836
                )
×
837

×
838
                return &CloseStateTransition{
×
839
                        NextState: &ClosePending{
×
840
                                CloseTx: closeTx,
×
841
                        },
×
842
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
843
                                ExternalEvents: broadcastEvent,
×
844
                        }),
×
845
                }, nil
×
846
        }
847

848
        return nil, fmt.Errorf("%w: received %T while in LocalOfferSent",
×
849
                ErrInvalidStateTransition, event)
×
850
}
851

852
// ProcessEvent implements the state transition function for the
853
// RemoteCloseStart. In this state, we'll wait for the remote party to send a
854
// closing_complete message. Assuming they can pay for the fees, we'll sign it
855
// ourselves, then transition to the next state of ClosePending.
856
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
857
) (*CloseStateTransition, error) {
×
858

×
859
        switch msg := event.(type) { //nolint:gocritic
×
860
        // If we receive a OfferReceived event, we'll make sure they can
861
        // actually pay for the fee. If so, then we'll counter sign and
862
        // transition to a terminal state.
863
        case *OfferReceivedEvent:
×
864
                // To start, we'll perform some basic validation of the sig
×
865
                // message they've sent. We'll validate that the remote party
×
866
                // actually has enough fees to pay the closing fees.
×
867
                if !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis) {
×
868
                        return nil, fmt.Errorf("%w: %v vs %v",
×
869
                                ErrRemoteCannotPay,
×
870
                                msg.SigMsg.FeeSatoshis,
×
871
                                l.RemoteBalance.ToSatoshis())
×
872
                }
×
873

874
                // With the basic sanity checks out of the way, we'll now
875
                // figure out which signature that we'll attempt to sign
876
                // against.
877
                var (
×
878
                        remoteSig input.Signature
×
879
                        noClosee  bool
×
880
                )
×
881
                switch {
×
882
                // If our balance is dust, then we expect the CloserNoClosee
883
                // sig to be set.
884
                case l.LocalAmtIsDust():
×
885
                        if msg.SigMsg.CloserNoClosee.IsNone() {
×
886
                                return nil, ErrCloserNoClosee
×
887
                        }
×
888
                        msg.SigMsg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) {
×
889
                                remoteSig, _ = s.ToSignature()
×
890
                                noClosee = true
×
891
                        })
×
892

893
                // Otherwise, we'll assume that CloseAndClosee is set.
894
                //
895
                // TODO(roasbeef): NoCloserClosee, but makes no sense?
896
                default:
×
897
                        if msg.SigMsg.CloserAndClosee.IsNone() {
×
898
                                return nil, ErrCloserAndClosee
×
899
                        }
×
900
                        msg.SigMsg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) { //nolint:ll
×
901
                                remoteSig, _ = s.ToSignature()
×
902
                        })
×
903
                }
904

905
                chanOpts := []lnwallet.ChanCloseOpt{
×
906
                        lnwallet.WithCustomSequence(mempool.MaxRBFSequence),
×
907
                        lnwallet.WithCustomLockTime(msg.SigMsg.LockTime),
×
908
                        lnwallet.WithCustomPayer(lntypes.Remote),
×
909
                }
×
910

×
911
                chancloserLog.Infof("responding to close w/ local_addr=%x, "+
×
912
                        "remote_addr=%x, fee=%v",
×
913
                        l.LocalDeliveryScript[:], l.RemoteDeliveryScript[:],
×
914
                        msg.SigMsg.FeeSatoshis)
×
915

×
916
                // Now that we have the remote sig, we'll sign the version they
×
917
                // signed, then attempt to complete the cooperative close
×
918
                // process.
×
919
                //
×
920
                // TODO(roasbeef): need to be able to omit an output when
×
921
                // signing based on the above, as closing opt
×
922
                rawSig, _, _, err := env.CloseSigner.CreateCloseProposal(
×
923
                        msg.SigMsg.FeeSatoshis, l.LocalDeliveryScript,
×
924
                        l.RemoteDeliveryScript, chanOpts...,
×
925
                )
×
926
                if err != nil {
×
927
                        return nil, err
×
928
                }
×
929
                wireSig, err := lnwire.NewSigFromSignature(rawSig)
×
930
                if err != nil {
×
931
                        return nil, err
×
932
                }
×
933

934
                localSig, err := wireSig.ToSignature()
×
935
                if err != nil {
×
936
                        return nil, err
×
937
                }
×
938

939
                // With our signature created, we'll now attempt to finalize the
940
                // close process.
941
                closeTx, _, err := env.CloseSigner.CompleteCooperativeClose(
×
942
                        localSig, remoteSig, l.LocalDeliveryScript,
×
943
                        l.RemoteDeliveryScript, msg.SigMsg.FeeSatoshis,
×
944
                        chanOpts...,
×
945
                )
×
946
                if err != nil {
×
947
                        return nil, err
×
948
                }
×
949

950
                chancloserLog.Infof("ChannelPoint(%v): received sig (fee=%v "+
×
951
                        "sats) from remote party, signing new tx=%v",
×
952
                        env.ChanPoint, msg.SigMsg.FeeSatoshis,
×
953
                        lnutils.SpewLogClosure(closeTx),
×
954
                )
×
955

×
956
                var closingSigs lnwire.ClosingSigs
×
957
                if noClosee {
×
958
                        closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1](
×
959
                                wireSig,
×
960
                        )
×
961
                } else {
×
962
                        closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3](
×
963
                                wireSig,
×
964
                        )
×
965
                }
×
966

967
                // As we're about to broadcast a new version of the co-op close
968
                // transaction, we'll mark again as broadcast, but with this
969
                // variant of the co-op close tx.
970
                //
971
                // TODO(roasbeef): db will only store one instance, store both?
972
                err = env.ChanObserver.MarkCoopBroadcasted(closeTx, false)
×
973
                if err != nil {
×
974
                        return nil, err
×
975
                }
×
976

977
                // As we transition, we'll omit two events: one to broadcast
978
                // the transaction, and the other to send our ClosingSig
979
                // message to the remote party.
980
                sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{
×
981
                        TargetPeer: env.ChanPeer,
×
982
                        Msgs: []lnwire.Message{&lnwire.ClosingSig{
×
983
                                ChannelID:   env.ChanID,
×
984
                                ClosingSigs: closingSigs,
×
985
                        }},
×
986
                }
×
987
                broadcastEvent := &protofsm.BroadcastTxn{
×
988
                        Tx: closeTx,
×
989
                        Label: labels.MakeLabel(
×
990
                                labels.LabelTypeChannelClose, &env.Scid,
×
991
                        ),
×
992
                }
×
993
                daemonEvents := protofsm.DaemonEventSet{
×
994
                        sendEvent, broadcastEvent,
×
995
                }
×
996

×
997
                // Now that we've extracted the signature, we'll transition to
×
998
                // the next state where we'll sign+broadcast the sig.
×
999
                return &CloseStateTransition{
×
1000
                        NextState: &ClosePending{
×
1001
                                CloseTx: closeTx,
×
1002
                        },
×
1003
                        NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{
×
1004
                                ExternalEvents: daemonEvents,
×
1005
                        }),
×
1006
                }, nil
×
1007
        }
1008

1009
        return nil, fmt.Errorf("%w: received %T while in RemoteCloseStart",
×
1010
                ErrInvalidStateTransition, event)
×
1011
}
1012

1013
// ProcessEvent is a semi-terminal state in the rbf-coop close state machine.
1014
// In this state, we're waiting for either a confirmation, or for either side
1015
// to attempt to create a new RBF'd co-op close transaction.
1016
func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
1017
) (*CloseStateTransition, error) {
×
1018

×
1019
        switch msg := event.(type) {
×
1020
        // If we can a spend while waiting for the close, then we'll go to our
1021
        // terminal state.
1022
        case *SpendEvent:
×
1023
                return &CloseStateTransition{
×
1024
                        NextState: &CloseFin{
×
1025
                                ConfirmedTx: msg.Tx,
×
1026
                        },
×
1027
                }, nil
×
1028

1029
        default:
×
1030

×
1031
                return &CloseStateTransition{
×
1032
                        NextState: c,
×
1033
                }, nil
×
1034
        }
1035
}
1036

1037
// ProcessEvent is the event processing for out terminal state. In this state,
1038
// we just keep looping back on ourselves.
1039
func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
1040
) (*CloseStateTransition, error) {
×
1041

×
1042
        return &CloseStateTransition{
×
1043
                NextState: c,
×
1044
        }, nil
×
1045
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc