• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16569502135

28 Jul 2025 12:50PM UTC coverage: 67.251% (+0.02%) from 67.227%
16569502135

Pull #9455

github

web-flow
Merge b3899c4fd into 2e36f9b8b
Pull Request #9455: discovery+lnwire: add support for DNS host name in NodeAnnouncement msg

179 of 208 new or added lines in 6 files covered. (86.06%)

105 existing lines in 23 files now uncovered.

135676 of 201746 relevant lines covered (67.25%)

21711.59 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.63
/peer/brontide.go
1
package peer
2

3
import (
4
        "bytes"
5
        "container/list"
6
        "context"
7
        "errors"
8
        "fmt"
9
        "math/rand"
10
        "net"
11
        "strings"
12
        "sync"
13
        "sync/atomic"
14
        "time"
15

16
        "github.com/btcsuite/btcd/btcec/v2"
17
        "github.com/btcsuite/btcd/chaincfg/chainhash"
18
        "github.com/btcsuite/btcd/connmgr"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btclog/v2"
22
        "github.com/davecgh/go-spew/spew"
23
        "github.com/lightningnetwork/lnd/buffer"
24
        "github.com/lightningnetwork/lnd/chainntnfs"
25
        "github.com/lightningnetwork/lnd/channeldb"
26
        "github.com/lightningnetwork/lnd/channelnotifier"
27
        "github.com/lightningnetwork/lnd/contractcourt"
28
        "github.com/lightningnetwork/lnd/discovery"
29
        "github.com/lightningnetwork/lnd/feature"
30
        "github.com/lightningnetwork/lnd/fn/v2"
31
        "github.com/lightningnetwork/lnd/funding"
32
        graphdb "github.com/lightningnetwork/lnd/graph/db"
33
        "github.com/lightningnetwork/lnd/graph/db/models"
34
        "github.com/lightningnetwork/lnd/htlcswitch"
35
        "github.com/lightningnetwork/lnd/htlcswitch/hodl"
36
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
37
        "github.com/lightningnetwork/lnd/input"
38
        "github.com/lightningnetwork/lnd/invoices"
39
        "github.com/lightningnetwork/lnd/keychain"
40
        "github.com/lightningnetwork/lnd/lnpeer"
41
        "github.com/lightningnetwork/lnd/lntypes"
42
        "github.com/lightningnetwork/lnd/lnutils"
43
        "github.com/lightningnetwork/lnd/lnwallet"
44
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
45
        "github.com/lightningnetwork/lnd/lnwallet/chancloser"
46
        "github.com/lightningnetwork/lnd/lnwire"
47
        "github.com/lightningnetwork/lnd/msgmux"
48
        "github.com/lightningnetwork/lnd/netann"
49
        "github.com/lightningnetwork/lnd/pool"
50
        "github.com/lightningnetwork/lnd/protofsm"
51
        "github.com/lightningnetwork/lnd/queue"
52
        "github.com/lightningnetwork/lnd/subscribe"
53
        "github.com/lightningnetwork/lnd/ticker"
54
        "github.com/lightningnetwork/lnd/tlv"
55
        "github.com/lightningnetwork/lnd/watchtower/wtclient"
56
)
57

58
const (
59
        // pingInterval is the interval at which ping messages are sent.
60
        pingInterval = 1 * time.Minute
61

62
        // pingTimeout is the amount of time we will wait for a pong response
63
        // before considering the peer to be unresponsive.
64
        //
65
        // This MUST be a smaller value than the pingInterval.
66
        pingTimeout = 30 * time.Second
67

68
        // idleTimeout is the duration of inactivity before we time out a peer.
69
        idleTimeout = 5 * time.Minute
70

71
        // writeMessageTimeout is the timeout used when writing a message to the
72
        // peer.
73
        writeMessageTimeout = 5 * time.Second
74

75
        // readMessageTimeout is the timeout used when reading a message from a
76
        // peer.
77
        readMessageTimeout = 5 * time.Second
78

79
        // handshakeTimeout is the timeout used when waiting for the peer's init
80
        // message.
81
        handshakeTimeout = 15 * time.Second
82

83
        // ErrorBufferSize is the number of historic peer errors that we store.
84
        ErrorBufferSize = 10
85

86
        // pongSizeCeiling is the upper bound on a uniformly distributed random
87
        // variable that we use for requesting pong responses. We don't use the
88
        // MaxPongBytes (upper bound accepted by the protocol) because it is
89
        // needlessly wasteful of precious Tor bandwidth for little to no gain.
90
        pongSizeCeiling = 4096
91

92
        // torTimeoutMultiplier is the scaling factor we use on network timeouts
93
        // for Tor peers.
94
        torTimeoutMultiplier = 3
95

96
        // msgStreamSize is the size of the message streams.
97
        msgStreamSize = 50
98
)
99

100
var (
101
        // ErrChannelNotFound is an error returned when a channel is queried and
102
        // either the Brontide doesn't know of it, or the channel in question
103
        // is pending.
104
        ErrChannelNotFound = fmt.Errorf("channel not found")
105
)
106

107
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
108
// a buffered channel which will be sent upon once the write is complete. This
109
// buffered channel acts as a semaphore to be used for synchronization purposes.
110
type outgoingMsg struct {
111
        priority bool
112
        msg      lnwire.Message
113
        errChan  chan error // MUST be buffered.
114
}
115

116
// newChannelMsg packages a channeldb.OpenChannel with a channel that allows
117
// the receiver of the request to report when the channel creation process has
118
// completed.
119
type newChannelMsg struct {
120
        // channel is used when the pending channel becomes active.
121
        channel *lnpeer.NewChannel
122

123
        // channelID is used when there's a new pending channel.
124
        channelID lnwire.ChannelID
125

126
        err chan error
127
}
128

129
type customMsg struct {
130
        peer [33]byte
131
        msg  lnwire.Custom
132
}
133

134
// closeMsg is a wrapper struct around any wire messages that deal with the
135
// cooperative channel closure negotiation process. This struct includes the
136
// raw channel ID targeted along with the original message.
137
type closeMsg struct {
138
        cid lnwire.ChannelID
139
        msg lnwire.Message
140
}
141

142
// PendingUpdate describes the pending state of a closing channel.
143
type PendingUpdate struct {
144
        // Txid is the txid of the closing transaction.
145
        Txid []byte
146

147
        // OutputIndex is the output index of our output in the closing
148
        // transaction.
149
        OutputIndex uint32
150

151
        // FeePerVByte is an optional field, that is set only when the new RBF
152
        // coop close flow is used. This indicates the new closing fee rate on
153
        // the closing transaction.
154
        FeePerVbyte fn.Option[chainfee.SatPerVByte]
155

156
        // IsLocalCloseTx is an optional field that indicates if this update is
157
        // sent for our local close txn, or the close txn of the remote party.
158
        // This is only set if the new RBF coop close flow is used.
159
        IsLocalCloseTx fn.Option[bool]
160
}
161

162
// ChannelCloseUpdate contains the outcome of the close channel operation.
163
type ChannelCloseUpdate struct {
164
        ClosingTxid []byte
165
        Success     bool
166

167
        // LocalCloseOutput is an optional, additional output on the closing
168
        // transaction that the local party should be paid to. This will only be
169
        // populated if the local balance isn't dust.
170
        LocalCloseOutput fn.Option[chancloser.CloseOutput]
171

172
        // RemoteCloseOutput is an optional, additional output on the closing
173
        // transaction that the remote party should be paid to. This will only
174
        // be populated if the remote balance isn't dust.
175
        RemoteCloseOutput fn.Option[chancloser.CloseOutput]
176

177
        // AuxOutputs is an optional set of additional outputs that might be
178
        // included in the closing transaction. These are used for custom
179
        // channel types.
180
        AuxOutputs fn.Option[chancloser.AuxCloseOutputs]
181
}
182

183
// TimestampedError is a timestamped error that is used to store the most recent
184
// errors we have experienced with our peers.
185
type TimestampedError struct {
186
        Error     error
187
        Timestamp time.Time
188
}
189

190
// Config defines configuration fields that are necessary for a peer object
191
// to function.
192
type Config struct {
193
        // Conn is the underlying network connection for this peer.
194
        Conn MessageConn
195

196
        // ConnReq stores information related to the persistent connection request
197
        // for this peer.
198
        ConnReq *connmgr.ConnReq
199

200
        // PubKeyBytes is the serialized, compressed public key of this peer.
201
        PubKeyBytes [33]byte
202

203
        // Addr is the network address of the peer.
204
        Addr *lnwire.NetAddress
205

206
        // Inbound indicates whether or not the peer is an inbound peer.
207
        Inbound bool
208

209
        // Features is the set of features that we advertise to the remote party.
210
        Features *lnwire.FeatureVector
211

212
        // LegacyFeatures is the set of features that we advertise to the remote
213
        // peer for backwards compatibility. Nodes that have not implemented
214
        // flat features will still be able to read our feature bits from the
215
        // legacy global field, but we will also advertise everything in the
216
        // default features field.
217
        LegacyFeatures *lnwire.FeatureVector
218

219
        // OutgoingCltvRejectDelta defines the number of blocks before expiry of
220
        // an htlc where we don't offer it anymore.
221
        OutgoingCltvRejectDelta uint32
222

223
        // ChanActiveTimeout specifies the duration the peer will wait to request
224
        // a channel reenable, beginning from the time the peer was started.
225
        ChanActiveTimeout time.Duration
226

227
        // ErrorBuffer stores a set of errors related to a peer. It contains error
228
        // messages that our peer has recently sent us over the wire and records of
229
        // unknown messages that were sent to us so that we can have a full track
230
        // record of the communication errors we have had with our peer. If we
231
        // choose to disconnect from a peer, it also stores the reason we had for
232
        // disconnecting.
233
        ErrorBuffer *queue.CircularBuffer
234

235
        // WritePool is the task pool that manages reuse of write buffers. Write
236
        // tasks are submitted to the pool in order to conserve the total number of
237
        // write buffers allocated at any one time, and decouple write buffer
238
        // allocation from the peer life cycle.
239
        WritePool *pool.Write
240

241
        // ReadPool is the task pool that manages reuse of read buffers.
242
        ReadPool *pool.Read
243

244
        // Switch is a pointer to the htlcswitch. It is used to setup, get, and
245
        // tear-down ChannelLinks.
246
        Switch messageSwitch
247

248
        // InterceptSwitch is a pointer to the InterceptableSwitch, a wrapper around
249
        // the regular Switch. We only export it here to pass ForwardPackets to the
250
        // ChannelLinkConfig.
251
        InterceptSwitch *htlcswitch.InterceptableSwitch
252

253
        // ChannelDB is used to fetch opened channels, and closed channels.
254
        ChannelDB *channeldb.ChannelStateDB
255

256
        // ChannelGraph is a pointer to the channel graph which is used to
257
        // query information about the set of known active channels.
258
        ChannelGraph *graphdb.ChannelGraph
259

260
        // ChainArb is used to subscribe to channel events, update contract signals,
261
        // and force close channels.
262
        ChainArb *contractcourt.ChainArbitrator
263

264
        // AuthGossiper is needed so that the Brontide impl can register with the
265
        // gossiper and process remote channel announcements.
266
        AuthGossiper *discovery.AuthenticatedGossiper
267

268
        // ChanStatusMgr is used to set or un-set the disabled bit in channel
269
        // updates.
270
        ChanStatusMgr *netann.ChanStatusManager
271

272
        // ChainIO is used to retrieve the best block.
273
        ChainIO lnwallet.BlockChainIO
274

275
        // FeeEstimator is used to compute our target ideal fee-per-kw when
276
        // initializing the coop close process.
277
        FeeEstimator chainfee.Estimator
278

279
        // Signer is used when creating *lnwallet.LightningChannel instances.
280
        Signer input.Signer
281

282
        // SigPool is used when creating *lnwallet.LightningChannel instances.
283
        SigPool *lnwallet.SigPool
284

285
        // Wallet is used to publish transactions and generates delivery
286
        // scripts during the coop close process.
287
        Wallet *lnwallet.LightningWallet
288

289
        // ChainNotifier is used to receive confirmations of a coop close
290
        // transaction.
291
        ChainNotifier chainntnfs.ChainNotifier
292

293
        // BestBlockView is used to efficiently query for up-to-date
294
        // blockchain state information
295
        BestBlockView chainntnfs.BestBlockView
296

297
        // RoutingPolicy is used to set the forwarding policy for links created by
298
        // the Brontide.
299
        RoutingPolicy models.ForwardingPolicy
300

301
        // Sphinx is used when setting up ChannelLinks so they can decode sphinx
302
        // onion blobs.
303
        Sphinx *hop.OnionProcessor
304

305
        // WitnessBeacon is used when setting up ChannelLinks so they can add any
306
        // preimages that they learn.
307
        WitnessBeacon contractcourt.WitnessBeacon
308

309
        // Invoices is passed to the ChannelLink on creation and handles all
310
        // invoice-related logic.
311
        Invoices *invoices.InvoiceRegistry
312

313
        // ChannelNotifier is used by the link to notify other sub-systems about
314
        // channel-related events and by the Brontide to subscribe to
315
        // ActiveLinkEvents.
316
        ChannelNotifier *channelnotifier.ChannelNotifier
317

318
        // HtlcNotifier is used when creating a ChannelLink.
319
        HtlcNotifier *htlcswitch.HtlcNotifier
320

321
        // TowerClient is used to backup revoked states.
322
        TowerClient wtclient.ClientManager
323

324
        // DisconnectPeer is used to disconnect this peer if the cooperative close
325
        // process fails.
326
        DisconnectPeer func(*btcec.PublicKey) error
327

328
        // GenNodeAnnouncement is used to send our node announcement to the remote
329
        // on startup.
330
        GenNodeAnnouncement func(...netann.NodeAnnModifier) (
331
                lnwire.NodeAnnouncement, error)
332

333
        // PrunePersistentPeerConnection is used to remove all internal state
334
        // related to this peer in the server.
335
        PrunePersistentPeerConnection func([33]byte)
336

337
        // FetchLastChanUpdate fetches our latest channel update for a target
338
        // channel.
339
        FetchLastChanUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate1,
340
                error)
341

342
        // FundingManager is an implementation of the funding.Controller interface.
343
        FundingManager funding.Controller
344

345
        // Hodl is used when creating ChannelLinks to specify HodlFlags as
346
        // breakpoints in dev builds.
347
        Hodl *hodl.Config
348

349
        // UnsafeReplay is used when creating ChannelLinks to specify whether or
350
        // not to replay adds on its commitment tx.
351
        UnsafeReplay bool
352

353
        // MaxOutgoingCltvExpiry is used when creating ChannelLinks and is the max
354
        // number of blocks that funds could be locked up for when forwarding
355
        // payments.
356
        MaxOutgoingCltvExpiry uint32
357

358
        // MaxChannelFeeAllocation is used when creating ChannelLinks and is the
359
        // maximum percentage of total funds that can be allocated to a channel's
360
        // commitment fee. This only applies for the initiator of the channel.
361
        MaxChannelFeeAllocation float64
362

363
        // MaxAnchorsCommitFeeRate is the maximum fee rate we'll use as an
364
        // initiator for anchor channel commitments.
365
        MaxAnchorsCommitFeeRate chainfee.SatPerKWeight
366

367
        // CoopCloseTargetConfs is the confirmation target that will be used
368
        // to estimate the fee rate to use during a cooperative channel
369
        // closure initiated by the remote peer.
370
        CoopCloseTargetConfs uint32
371

372
        // ServerPubKey is the serialized, compressed public key of our lnd node.
373
        // It is used to determine which policy (channel edge) to pass to the
374
        // ChannelLink.
375
        ServerPubKey [33]byte
376

377
        // ChannelCommitInterval is the maximum time that is allowed to pass between
378
        // receiving a channel state update and signing the next commitment.
379
        // Setting this to a longer duration allows for more efficient channel
380
        // operations at the cost of latency.
381
        ChannelCommitInterval time.Duration
382

383
        // PendingCommitInterval is the maximum time that is allowed to pass
384
        // while waiting for the remote party to revoke a locally initiated
385
        // commitment state. Setting this to a longer duration if a slow
386
        // response is expected from the remote party or large number of
387
        // payments are attempted at the same time.
388
        PendingCommitInterval time.Duration
389

390
        // ChannelCommitBatchSize is the maximum number of channel state updates
391
        // that is accumulated before signing a new commitment.
392
        ChannelCommitBatchSize uint32
393

394
        // HandleCustomMessage is called whenever a custom message is received
395
        // from the peer.
396
        HandleCustomMessage func(peer [33]byte, msg *lnwire.Custom) error
397

398
        // GetAliases is passed to created links so the Switch and link can be
399
        // aware of the channel's aliases.
400
        GetAliases func(base lnwire.ShortChannelID) []lnwire.ShortChannelID
401

402
        // RequestAlias allows the Brontide struct to request an alias to send
403
        // to the peer.
404
        RequestAlias func() (lnwire.ShortChannelID, error)
405

406
        // AddLocalAlias persists an alias to an underlying alias store.
407
        AddLocalAlias func(alias, base lnwire.ShortChannelID,
408
                gossip, liveUpdate bool) error
409

410
        // AuxLeafStore is an optional store that can be used to store auxiliary
411
        // leaves for certain custom channel types.
412
        AuxLeafStore fn.Option[lnwallet.AuxLeafStore]
413

414
        // AuxSigner is an optional signer that can be used to sign auxiliary
415
        // leaves for certain custom channel types.
416
        AuxSigner fn.Option[lnwallet.AuxSigner]
417

418
        // AuxResolver is an optional interface that can be used to modify the
419
        // way contracts are resolved.
420
        AuxResolver fn.Option[lnwallet.AuxContractResolver]
421

422
        // AuxTrafficShaper is an optional auxiliary traffic shaper that can be
423
        // used to manage the bandwidth of peer links.
424
        AuxTrafficShaper fn.Option[htlcswitch.AuxTrafficShaper]
425

426
        // PongBuf is a slice we'll reuse instead of allocating memory on the
427
        // heap. Since only reads will occur and no writes, there is no need
428
        // for any synchronization primitives. As a result, it's safe to share
429
        // this across multiple Peer struct instances.
430
        PongBuf []byte
431

432
        // Adds the option to disable forwarding payments in blinded routes
433
        // by failing back any blinding-related payloads as if they were
434
        // invalid.
435
        DisallowRouteBlinding bool
436

437
        // DisallowQuiescence is a flag that indicates whether the Brontide
438
        // should have the quiescence feature disabled.
439
        DisallowQuiescence bool
440

441
        // QuiescenceTimeout is the max duration that the channel can be
442
        // quiesced. Any dependent protocols (dynamic commitments, splicing,
443
        // etc.) must finish their operations under this timeout value,
444
        // otherwise the node will disconnect.
445
        QuiescenceTimeout time.Duration
446

447
        // MaxFeeExposure limits the number of outstanding fees in a channel.
448
        // This value will be passed to created links.
449
        MaxFeeExposure lnwire.MilliSatoshi
450

451
        // MsgRouter is an optional instance of the main message router that
452
        // the peer will use. If None, then a new default version will be used
453
        // in place.
454
        MsgRouter fn.Option[msgmux.Router]
455

456
        // AuxChanCloser is an optional instance of an abstraction that can be
457
        // used to modify the way the co-op close transaction is constructed.
458
        AuxChanCloser fn.Option[chancloser.AuxChanCloser]
459

460
        // ShouldFwdExpEndorsement is a closure that indicates whether
461
        // experimental endorsement signals should be set.
462
        ShouldFwdExpEndorsement func() bool
463

464
        // NoDisconnectOnPongFailure indicates whether the peer should *not* be
465
        // disconnected if a pong is not received in time or is mismatched.
466
        NoDisconnectOnPongFailure bool
467

468
        // Quit is the server's quit channel. If this is closed, we halt operation.
469
        Quit chan struct{}
470
}
471

472
// chanCloserFsm is a union-like type that can hold the two versions of co-op
473
// close we support: negotiation, and RBF based.
474
//
475
// TODO(roasbeef): rename to chancloser.Negotiator and chancloser.RBF?
476
type chanCloserFsm = fn.Either[*chancloser.ChanCloser, *chancloser.RbfChanCloser] //nolint:ll
477

478
// makeNegotiateCloser creates a new negotiate closer from a
479
// chancloser.ChanCloser.
480
func makeNegotiateCloser(chanCloser *chancloser.ChanCloser) chanCloserFsm {
12✔
481
        return fn.NewLeft[*chancloser.ChanCloser, *chancloser.RbfChanCloser](
12✔
482
                chanCloser,
12✔
483
        )
12✔
484
}
12✔
485

486
// makeRbfCloser creates a new RBF closer from a chancloser.RbfChanCloser.
487
func makeRbfCloser(rbfCloser *chancloser.RbfChanCloser) chanCloserFsm {
3✔
488
        return fn.NewRight[*chancloser.ChanCloser](
3✔
489
                rbfCloser,
3✔
490
        )
3✔
491
}
3✔
492

493
// Brontide is an active peer on the Lightning Network. This struct is responsible
494
// for managing any channel state related to this peer. To do so, it has
495
// several helper goroutines to handle events such as HTLC timeouts, new
496
// funding workflow, and detecting an uncooperative closure of any active
497
// channels.
498
type Brontide struct {
499
        // MUST be used atomically.
500
        started    int32
501
        disconnect int32
502

503
        // MUST be used atomically.
504
        bytesReceived uint64
505
        bytesSent     uint64
506

507
        // isTorConnection is a flag that indicates whether or not we believe
508
        // the remote peer is a tor connection. It is not always possible to
509
        // know this with certainty but we have heuristics we use that should
510
        // catch most cases.
511
        //
512
        // NOTE: We judge the tor-ness of a connection by if the remote peer has
513
        // ".onion" in the address OR if it's connected over localhost.
514
        // This will miss cases where our peer is connected to our clearnet
515
        // address over the tor network (via exit nodes). It will also misjudge
516
        // actual localhost connections as tor. We need to include this because
517
        // inbound connections to our tor address will appear to come from the
518
        // local socks5 proxy. This heuristic is only used to expand the timeout
519
        // window for peers so it is OK to misjudge this. If you use this field
520
        // for any other purpose you should seriously consider whether or not
521
        // this heuristic is good enough for your use case.
522
        isTorConnection bool
523

524
        pingManager *PingManager
525

526
        // lastPingPayload stores an unsafe pointer wrapped as an atomic
527
        // variable which points to the last payload the remote party sent us
528
        // as their ping.
529
        //
530
        // MUST be used atomically.
531
        lastPingPayload atomic.Value
532

533
        cfg Config
534

535
        // activeSignal when closed signals that the peer is now active and
536
        // ready to process messages.
537
        activeSignal chan struct{}
538

539
        // startTime is the time this peer connection was successfully established.
540
        // It will be zero for peers that did not successfully call Start().
541
        startTime time.Time
542

543
        // sendQueue is the channel which is used to queue outgoing messages to be
544
        // written onto the wire. Note that this channel is unbuffered.
545
        sendQueue chan outgoingMsg
546

547
        // outgoingQueue is a buffered channel which allows second/third party
548
        // objects to queue messages to be sent out on the wire.
549
        outgoingQueue chan outgoingMsg
550

551
        // activeChannels is a map which stores the state machines of all
552
        // active channels. Channels are indexed into the map by the txid of
553
        // the funding transaction which opened the channel.
554
        //
555
        // NOTE: On startup, pending channels are stored as nil in this map.
556
        // Confirmed channels have channel data populated in the map. This means
557
        // that accesses to this map should nil-check the LightningChannel to
558
        // see if this is a pending channel or not. The tradeoff here is either
559
        // having two maps everywhere (one for pending, one for confirmed chans)
560
        // or having an extra nil-check per access.
561
        activeChannels *lnutils.SyncMap[
562
                lnwire.ChannelID, *lnwallet.LightningChannel]
563

564
        // addedChannels tracks any new channels opened during this peer's
565
        // lifecycle. We use this to filter out these new channels when the time
566
        // comes to request a reenable for active channels, since they will have
567
        // waited a shorter duration.
568
        addedChannels *lnutils.SyncMap[lnwire.ChannelID, struct{}]
569

570
        // newActiveChannel is used by the fundingManager to send fully opened
571
        // channels to the source peer which handled the funding workflow.
572
        newActiveChannel chan *newChannelMsg
573

574
        // newPendingChannel is used by the fundingManager to send pending open
575
        // channels to the source peer which handled the funding workflow.
576
        newPendingChannel chan *newChannelMsg
577

578
        // removePendingChannel is used by the fundingManager to cancel pending
579
        // open channels to the source peer when the funding flow is failed.
580
        removePendingChannel chan *newChannelMsg
581

582
        // activeMsgStreams is a map from channel id to the channel streams that
583
        // proxy messages to individual, active links.
584
        activeMsgStreams map[lnwire.ChannelID]*msgStream
585

586
        // activeChanCloses is a map that keeps track of all the active
587
        // cooperative channel closures. Any channel closing messages are directed
588
        // to one of these active state machines. Once the channel has been closed,
589
        // the state machine will be deleted from the map.
590
        activeChanCloses *lnutils.SyncMap[lnwire.ChannelID, chanCloserFsm]
591

592
        // localCloseChanReqs is a channel in which any local requests to close
593
        // a particular channel are sent over.
594
        localCloseChanReqs chan *htlcswitch.ChanClose
595

596
        // linkFailures receives all reported channel failures from the switch,
597
        // and instructs the channelManager to clean remaining channel state.
598
        linkFailures chan linkFailureReport
599

600
        // chanCloseMsgs is a channel that any message related to channel
601
        // closures are sent over. This includes lnwire.Shutdown message as
602
        // well as lnwire.ClosingSigned messages.
603
        chanCloseMsgs chan *closeMsg
604

605
        // remoteFeatures is the feature vector received from the peer during
606
        // the connection handshake.
607
        remoteFeatures *lnwire.FeatureVector
608

609
        // resentChanSyncMsg is a set that keeps track of which channels we
610
        // have re-sent channel reestablishment messages for. This is done to
611
        // avoid getting into loop where both peers will respond to the other
612
        // peer's chansync message with its own over and over again.
613
        resentChanSyncMsg map[lnwire.ChannelID]struct{}
614

615
        // channelEventClient is the channel event subscription client that's
616
        // used to assist retry enabling the channels. This client is only
617
        // created when the reenableTimeout is no greater than 1 minute. Once
618
        // created, it is canceled once the reenabling has been finished.
619
        //
620
        // NOTE: we choose to create the client conditionally to avoid
621
        // potentially holding lots of un-consumed events.
622
        channelEventClient *subscribe.Client
623

624
        // msgRouter is an instance of the msgmux.Router which is used to send
625
        // off new wire messages for handing.
626
        msgRouter fn.Option[msgmux.Router]
627

628
        // globalMsgRouter is a flag that indicates whether we have a global
629
        // msg router. If so, then we don't worry about stopping the msg router
630
        // when a peer disconnects.
631
        globalMsgRouter bool
632

633
        startReady chan struct{}
634

635
        // cg is a helper that encapsulates a wait group and quit channel and
636
        // allows contexts that either block or cancel on those depending on
637
        // the use case.
638
        cg *fn.ContextGuard
639

640
        // log is a peer-specific logging instance.
641
        log btclog.Logger
642
}
643

644
// A compile-time check to ensure that Brontide satisfies the lnpeer.Peer
645
// interface.
646
var _ lnpeer.Peer = (*Brontide)(nil)
647

648
// NewBrontide creates a new Brontide from a peer.Config struct.
649
func NewBrontide(cfg Config) *Brontide {
28✔
650
        logPrefix := fmt.Sprintf("Peer(%x):", cfg.PubKeyBytes)
28✔
651

28✔
652
        // We have a global message router if one was passed in via the config.
28✔
653
        // In this case, we don't need to attempt to tear it down when the peer
28✔
654
        // is stopped.
28✔
655
        globalMsgRouter := cfg.MsgRouter.IsSome()
28✔
656

28✔
657
        // We'll either use the msg router instance passed in, or create a new
28✔
658
        // blank instance.
28✔
659
        msgRouter := cfg.MsgRouter.Alt(fn.Some[msgmux.Router](
28✔
660
                msgmux.NewMultiMsgRouter(),
28✔
661
        ))
28✔
662

28✔
663
        p := &Brontide{
28✔
664
                cfg:           cfg,
28✔
665
                activeSignal:  make(chan struct{}),
28✔
666
                sendQueue:     make(chan outgoingMsg),
28✔
667
                outgoingQueue: make(chan outgoingMsg),
28✔
668
                addedChannels: &lnutils.SyncMap[lnwire.ChannelID, struct{}]{},
28✔
669
                activeChannels: &lnutils.SyncMap[
28✔
670
                        lnwire.ChannelID, *lnwallet.LightningChannel,
28✔
671
                ]{},
28✔
672
                newActiveChannel:     make(chan *newChannelMsg, 1),
28✔
673
                newPendingChannel:    make(chan *newChannelMsg, 1),
28✔
674
                removePendingChannel: make(chan *newChannelMsg),
28✔
675

28✔
676
                activeMsgStreams: make(map[lnwire.ChannelID]*msgStream),
28✔
677
                activeChanCloses: &lnutils.SyncMap[
28✔
678
                        lnwire.ChannelID, chanCloserFsm,
28✔
679
                ]{},
28✔
680
                localCloseChanReqs: make(chan *htlcswitch.ChanClose),
28✔
681
                linkFailures:       make(chan linkFailureReport),
28✔
682
                chanCloseMsgs:      make(chan *closeMsg),
28✔
683
                resentChanSyncMsg:  make(map[lnwire.ChannelID]struct{}),
28✔
684
                startReady:         make(chan struct{}),
28✔
685
                log:                peerLog.WithPrefix(logPrefix),
28✔
686
                msgRouter:          msgRouter,
28✔
687
                globalMsgRouter:    globalMsgRouter,
28✔
688
                cg:                 fn.NewContextGuard(),
28✔
689
        }
28✔
690

28✔
691
        if cfg.Conn != nil && cfg.Conn.RemoteAddr() != nil {
31✔
692
                remoteAddr := cfg.Conn.RemoteAddr().String()
3✔
693
                p.isTorConnection = strings.Contains(remoteAddr, ".onion") ||
3✔
694
                        strings.Contains(remoteAddr, "127.0.0.1")
3✔
695
        }
3✔
696

697
        var (
28✔
698
                lastBlockHeader           *wire.BlockHeader
28✔
699
                lastSerializedBlockHeader [wire.MaxBlockHeaderPayload]byte
28✔
700
        )
28✔
701
        newPingPayload := func() []byte {
28✔
702
                // We query the BestBlockHeader from our BestBlockView each time
×
703
                // this is called, and update our serialized block header if
×
704
                // they differ.  Over time, we'll use this to disseminate the
×
705
                // latest block header between all our peers, which can later be
×
706
                // used to cross-check our own view of the network to mitigate
×
707
                // various types of eclipse attacks.
×
708
                header, err := p.cfg.BestBlockView.BestBlockHeader()
×
709
                if err != nil && header == lastBlockHeader {
×
710
                        return lastSerializedBlockHeader[:]
×
711
                }
×
712

713
                buf := bytes.NewBuffer(lastSerializedBlockHeader[0:0])
×
714
                err = header.Serialize(buf)
×
715
                if err == nil {
×
716
                        lastBlockHeader = header
×
717
                } else {
×
718
                        p.log.Warn("unable to serialize current block" +
×
719
                                "header for ping payload generation." +
×
720
                                "This should be impossible and means" +
×
721
                                "there is an implementation bug.")
×
722
                }
×
723

724
                return lastSerializedBlockHeader[:]
×
725
        }
726

727
        // TODO(roasbeef): make dynamic in order to create fake cover traffic.
728
        //
729
        // NOTE(proofofkeags): this was changed to be dynamic to allow better
730
        // pong identification, however, more thought is needed to make this
731
        // actually usable as a traffic decoy.
732
        randPongSize := func() uint16 {
28✔
733
                return uint16(
×
734
                        // We don't need cryptographic randomness here.
×
735
                        /* #nosec */
×
736
                        rand.Intn(pongSizeCeiling) + 1,
×
737
                )
×
738
        }
×
739

740
        p.pingManager = NewPingManager(&PingManagerConfig{
28✔
741
                NewPingPayload:   newPingPayload,
28✔
742
                NewPongSize:      randPongSize,
28✔
743
                IntervalDuration: p.scaleTimeout(pingInterval),
28✔
744
                TimeoutDuration:  p.scaleTimeout(pingTimeout),
28✔
745
                SendPing: func(ping *lnwire.Ping) {
28✔
746
                        p.queueMsg(ping, nil)
×
747
                },
×
748
                OnPongFailure: func(reason error,
749
                        timeWaitedForPong time.Duration,
750
                        lastKnownRTT time.Duration) {
×
751

×
752
                        logMsg := fmt.Sprintf("pong response "+
×
753
                                "failure for %s: %v. Time waited for this "+
×
754
                                "pong: %v. Last successful RTT: %v.",
×
755
                                p, reason, timeWaitedForPong, lastKnownRTT)
×
756

×
757
                        // If NoDisconnectOnPongFailure is true, we don't
×
758
                        // disconnect. Otherwise (if it's false, the default),
×
759
                        // we disconnect.
×
760
                        if p.cfg.NoDisconnectOnPongFailure {
×
761
                                p.log.Warnf("%s -- not disconnecting "+
×
762
                                        "due to config", logMsg)
×
763
                                return
×
764
                        }
×
765

766
                        p.log.Warnf("%s -- disconnecting", logMsg)
×
767

×
768
                        go p.Disconnect(fmt.Errorf("pong failure: %w", reason))
×
769
                },
770
        })
771

772
        return p
28✔
773
}
774

775
// Start starts all helper goroutines the peer needs for normal operations.  In
776
// the case this peer has already been started, then this function is a noop.
777
func (p *Brontide) Start() error {
6✔
778
        if atomic.AddInt32(&p.started, 1) != 1 {
6✔
779
                return nil
×
780
        }
×
781

782
        // Once we've finished starting up the peer, we'll signal to other
783
        // goroutines that the they can move forward to tear down the peer, or
784
        // carry out other relevant changes.
785
        defer close(p.startReady)
6✔
786

6✔
787
        p.log.Tracef("starting with conn[%v->%v]",
6✔
788
                p.cfg.Conn.LocalAddr(), p.cfg.Conn.RemoteAddr())
6✔
789

6✔
790
        // Fetch and then load all the active channels we have with this remote
6✔
791
        // peer from the database.
6✔
792
        activeChans, err := p.cfg.ChannelDB.FetchOpenChannels(
6✔
793
                p.cfg.Addr.IdentityKey,
6✔
794
        )
6✔
795
        if err != nil {
6✔
796
                p.log.Errorf("Unable to fetch active chans "+
×
797
                        "for peer: %v", err)
×
798
                return err
×
799
        }
×
800

801
        if len(activeChans) == 0 {
10✔
802
                go p.cfg.PrunePersistentPeerConnection(p.cfg.PubKeyBytes)
4✔
803
        }
4✔
804

805
        // Quickly check if we have any existing legacy channels with this
806
        // peer.
807
        haveLegacyChan := false
6✔
808
        for _, c := range activeChans {
11✔
809
                if c.ChanType.IsTweakless() {
10✔
810
                        continue
5✔
811
                }
812

813
                haveLegacyChan = true
3✔
814
                break
3✔
815
        }
816

817
        // Exchange local and global features, the init message should be very
818
        // first between two nodes.
819
        if err := p.sendInitMsg(haveLegacyChan); err != nil {
9✔
820
                return fmt.Errorf("unable to send init msg: %w", err)
3✔
821
        }
3✔
822

823
        // Before we launch any of the helper goroutines off the peer struct,
824
        // we'll first ensure proper adherence to the p2p protocol. The init
825
        // message MUST be sent before any other message.
826
        readErr := make(chan error, 1)
6✔
827
        msgChan := make(chan lnwire.Message, 1)
6✔
828
        p.cg.WgAdd(1)
6✔
829
        go func() {
12✔
830
                defer p.cg.WgDone()
6✔
831

6✔
832
                msg, err := p.readNextMessage()
6✔
833
                if err != nil {
9✔
834
                        readErr <- err
3✔
835
                        msgChan <- nil
3✔
836
                        return
3✔
837
                }
3✔
838
                readErr <- nil
6✔
839
                msgChan <- msg
6✔
840
        }()
841

842
        select {
6✔
843
        // In order to avoid blocking indefinitely, we'll give the other peer
844
        // an upper timeout to respond before we bail out early.
845
        case <-time.After(handshakeTimeout):
×
846
                return fmt.Errorf("peer did not complete handshake within %v",
×
847
                        handshakeTimeout)
×
848
        case err := <-readErr:
6✔
849
                if err != nil {
9✔
850
                        return fmt.Errorf("unable to read init msg: %w", err)
3✔
851
                }
3✔
852
        }
853

854
        // Once the init message arrives, we can parse it so we can figure out
855
        // the negotiation of features for this session.
856
        msg := <-msgChan
6✔
857
        if msg, ok := msg.(*lnwire.Init); ok {
12✔
858
                if err := p.handleInitMsg(msg); err != nil {
6✔
859
                        p.storeError(err)
×
860
                        return err
×
861
                }
×
862
        } else {
×
863
                return errors.New("very first message between nodes " +
×
864
                        "must be init message")
×
865
        }
×
866

867
        // Next, load all the active channels we have with this peer,
868
        // registering them with the switch and launching the necessary
869
        // goroutines required to operate them.
870
        p.log.Debugf("Loaded %v active channels from database",
6✔
871
                len(activeChans))
6✔
872

6✔
873
        // Conditionally subscribe to channel events before loading channels so
6✔
874
        // we won't miss events. This subscription is used to listen to active
6✔
875
        // channel event when reenabling channels. Once the reenabling process
6✔
876
        // is finished, this subscription will be canceled.
6✔
877
        //
6✔
878
        // NOTE: ChannelNotifier must be started before subscribing events
6✔
879
        // otherwise we'd panic here.
6✔
880
        if err := p.attachChannelEventSubscription(); err != nil {
6✔
881
                return err
×
882
        }
×
883

884
        // Register the message router now as we may need to register some
885
        // endpoints while loading the channels below.
886
        p.msgRouter.WhenSome(func(router msgmux.Router) {
12✔
887
                router.Start(context.Background())
6✔
888
        })
6✔
889

890
        msgs, err := p.loadActiveChannels(activeChans)
6✔
891
        if err != nil {
6✔
892
                return fmt.Errorf("unable to load channels: %w", err)
×
893
        }
×
894

895
        p.startTime = time.Now()
6✔
896

6✔
897
        // Before launching the writeHandler goroutine, we send any channel
6✔
898
        // sync messages that must be resent for borked channels. We do this to
6✔
899
        // avoid data races with WriteMessage & Flush calls.
6✔
900
        if len(msgs) > 0 {
11✔
901
                p.log.Infof("Sending %d channel sync messages to peer after "+
5✔
902
                        "loading active channels", len(msgs))
5✔
903

5✔
904
                // Send the messages directly via writeMessage and bypass the
5✔
905
                // writeHandler goroutine.
5✔
906
                for _, msg := range msgs {
10✔
907
                        if err := p.writeMessage(msg); err != nil {
5✔
908
                                return fmt.Errorf("unable to send "+
×
909
                                        "reestablish msg: %v", err)
×
910
                        }
×
911
                }
912
        }
913

914
        err = p.pingManager.Start()
6✔
915
        if err != nil {
6✔
916
                return fmt.Errorf("could not start ping manager %w", err)
×
917
        }
×
918

919
        p.cg.WgAdd(4)
6✔
920
        go p.queueHandler()
6✔
921
        go p.writeHandler()
6✔
922
        go p.channelManager()
6✔
923
        go p.readHandler()
6✔
924

6✔
925
        // Signal to any external processes that the peer is now active.
6✔
926
        close(p.activeSignal)
6✔
927

6✔
928
        // Node announcements don't propagate very well throughout the network
6✔
929
        // as there isn't a way to efficiently query for them through their
6✔
930
        // timestamp, mostly affecting nodes that were offline during the time
6✔
931
        // of broadcast. We'll resend our node announcement to the remote peer
6✔
932
        // as a best-effort delivery such that it can also propagate to their
6✔
933
        // peers. To ensure they can successfully process it in most cases,
6✔
934
        // we'll only resend it as long as we have at least one confirmed
6✔
935
        // advertised channel with the remote peer.
6✔
936
        //
6✔
937
        // TODO(wilmer): Remove this once we're able to query for node
6✔
938
        // announcements through their timestamps.
6✔
939
        p.cg.WgAdd(2)
6✔
940
        go p.maybeSendNodeAnn(activeChans)
6✔
941
        go p.maybeSendChannelUpdates()
6✔
942

6✔
943
        return nil
6✔
944
}
945

946
// initGossipSync initializes either a gossip syncer or an initial routing
947
// dump, depending on the negotiated synchronization method.
948
func (p *Brontide) initGossipSync() {
6✔
949
        // If the remote peer knows of the new gossip queries feature, then
6✔
950
        // we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
6✔
951
        if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) {
12✔
952
                p.log.Info("Negotiated chan series queries")
6✔
953

6✔
954
                if p.cfg.AuthGossiper == nil {
9✔
955
                        // This should only ever be hit in the unit tests.
3✔
956
                        p.log.Warn("No AuthGossiper configured. Abandoning " +
3✔
957
                                "gossip sync.")
3✔
958
                        return
3✔
959
                }
3✔
960

961
                // Register the peer's gossip syncer with the gossiper.
962
                // This blocks synchronously to ensure the gossip syncer is
963
                // registered with the gossiper before attempting to read
964
                // messages from the remote peer.
965
                //
966
                // TODO(wilmer): Only sync updates from non-channel peers. This
967
                // requires an improved version of the current network
968
                // bootstrapper to ensure we can find and connect to non-channel
969
                // peers.
970
                p.cfg.AuthGossiper.InitSyncState(p)
3✔
971
        }
972
}
973

974
// taprootShutdownAllowed returns true if both parties have negotiated the
975
// shutdown-any-segwit feature.
976
func (p *Brontide) taprootShutdownAllowed() bool {
9✔
977
        return p.RemoteFeatures().HasFeature(lnwire.ShutdownAnySegwitOptional) &&
9✔
978
                p.LocalFeatures().HasFeature(lnwire.ShutdownAnySegwitOptional)
9✔
979
}
9✔
980

981
// rbfCoopCloseAllowed returns true if both parties have negotiated the new RBF
982
// coop close feature.
983
func (p *Brontide) rbfCoopCloseAllowed() bool {
10✔
984
        bothHaveBit := func(bit lnwire.FeatureBit) bool {
27✔
985
                return p.RemoteFeatures().HasFeature(bit) &&
17✔
986
                        p.LocalFeatures().HasFeature(bit)
17✔
987
        }
17✔
988

989
        return bothHaveBit(lnwire.RbfCoopCloseOptional) ||
10✔
990
                bothHaveBit(lnwire.RbfCoopCloseOptionalStaging)
10✔
991
}
992

993
// QuitSignal is a method that should return a channel which will be sent upon
994
// or closed once the backing peer exits. This allows callers using the
995
// interface to cancel any processing in the event the backing implementation
996
// exits.
997
//
998
// NOTE: Part of the lnpeer.Peer interface.
999
func (p *Brontide) QuitSignal() <-chan struct{} {
3✔
1000
        return p.cg.Done()
3✔
1001
}
3✔
1002

1003
// addrWithInternalKey takes a delivery script, then attempts to supplement it
1004
// with information related to the internal key for the addr, but only if it's
1005
// a taproot addr.
1006
func (p *Brontide) addrWithInternalKey(
1007
        deliveryScript []byte) (*chancloser.DeliveryAddrWithKey, error) {
12✔
1008

12✔
1009
        // Currently, custom channels cannot be created with external upfront
12✔
1010
        // shutdown addresses, so this shouldn't be an issue. We only require
12✔
1011
        // the internal key for taproot addresses to be able to provide a non
12✔
1012
        // inclusion proof of any scripts.
12✔
1013
        internalKeyDesc, err := lnwallet.InternalKeyForAddr(
12✔
1014
                p.cfg.Wallet, &p.cfg.Wallet.Cfg.NetParams, deliveryScript,
12✔
1015
        )
12✔
1016
        if err != nil {
12✔
1017
                return nil, fmt.Errorf("unable to fetch internal key: %w", err)
×
1018
        }
×
1019

1020
        return &chancloser.DeliveryAddrWithKey{
12✔
1021
                DeliveryAddress: deliveryScript,
12✔
1022
                InternalKey: fn.MapOption(
12✔
1023
                        func(desc keychain.KeyDescriptor) btcec.PublicKey {
15✔
1024
                                return *desc.PubKey
3✔
1025
                        },
3✔
1026
                )(internalKeyDesc),
1027
        }, nil
1028
}
1029

1030
// loadActiveChannels creates indexes within the peer for tracking all active
1031
// channels returned by the database. It returns a slice of channel reestablish
1032
// messages that should be sent to the peer immediately, in case we have borked
1033
// channels that haven't been closed yet.
1034
func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
1035
        []lnwire.Message, error) {
6✔
1036

6✔
1037
        // Return a slice of messages to send to the peers in case the channel
6✔
1038
        // cannot be loaded normally.
6✔
1039
        var msgs []lnwire.Message
6✔
1040

6✔
1041
        scidAliasNegotiated := p.hasNegotiatedScidAlias()
6✔
1042

6✔
1043
        for _, dbChan := range chans {
11✔
1044
                hasScidFeature := dbChan.ChanType.HasScidAliasFeature()
5✔
1045
                if scidAliasNegotiated && !hasScidFeature {
8✔
1046
                        // We'll request and store an alias, making sure that a
3✔
1047
                        // gossiper mapping is not created for the alias to the
3✔
1048
                        // real SCID. This is done because the peer and funding
3✔
1049
                        // manager are not aware of each other's states and if
3✔
1050
                        // we did not do this, we would accept alias channel
3✔
1051
                        // updates after 6 confirmations, which would be buggy.
3✔
1052
                        // We'll queue a channel_ready message with the new
3✔
1053
                        // alias. This should technically be done *after* the
3✔
1054
                        // reestablish, but this behavior is pre-existing since
3✔
1055
                        // the funding manager may already queue a
3✔
1056
                        // channel_ready before the channel_reestablish.
3✔
1057
                        if !dbChan.IsPending {
6✔
1058
                                aliasScid, err := p.cfg.RequestAlias()
3✔
1059
                                if err != nil {
3✔
1060
                                        return nil, err
×
1061
                                }
×
1062

1063
                                err = p.cfg.AddLocalAlias(
3✔
1064
                                        aliasScid, dbChan.ShortChanID(), false,
3✔
1065
                                        false,
3✔
1066
                                )
3✔
1067
                                if err != nil {
3✔
1068
                                        return nil, err
×
1069
                                }
×
1070

1071
                                chanID := lnwire.NewChanIDFromOutPoint(
3✔
1072
                                        dbChan.FundingOutpoint,
3✔
1073
                                )
3✔
1074

3✔
1075
                                // Fetch the second commitment point to send in
3✔
1076
                                // the channel_ready message.
3✔
1077
                                second, err := dbChan.SecondCommitmentPoint()
3✔
1078
                                if err != nil {
3✔
1079
                                        return nil, err
×
1080
                                }
×
1081

1082
                                channelReadyMsg := lnwire.NewChannelReady(
3✔
1083
                                        chanID, second,
3✔
1084
                                )
3✔
1085
                                channelReadyMsg.AliasScid = &aliasScid
3✔
1086

3✔
1087
                                msgs = append(msgs, channelReadyMsg)
3✔
1088
                        }
1089

1090
                        // If we've negotiated the option-scid-alias feature
1091
                        // and this channel does not have ScidAliasFeature set
1092
                        // to true due to an upgrade where the feature bit was
1093
                        // turned on, we'll update the channel's database
1094
                        // state.
1095
                        err := dbChan.MarkScidAliasNegotiated()
3✔
1096
                        if err != nil {
3✔
1097
                                return nil, err
×
1098
                        }
×
1099
                }
1100

1101
                var chanOpts []lnwallet.ChannelOpt
5✔
1102
                p.cfg.AuxLeafStore.WhenSome(func(s lnwallet.AuxLeafStore) {
5✔
1103
                        chanOpts = append(chanOpts, lnwallet.WithLeafStore(s))
×
1104
                })
×
1105
                p.cfg.AuxSigner.WhenSome(func(s lnwallet.AuxSigner) {
5✔
1106
                        chanOpts = append(chanOpts, lnwallet.WithAuxSigner(s))
×
1107
                })
×
1108
                p.cfg.AuxResolver.WhenSome(
5✔
1109
                        func(s lnwallet.AuxContractResolver) {
5✔
1110
                                chanOpts = append(
×
1111
                                        chanOpts, lnwallet.WithAuxResolver(s),
×
1112
                                )
×
1113
                        },
×
1114
                )
1115

1116
                lnChan, err := lnwallet.NewLightningChannel(
5✔
1117
                        p.cfg.Signer, dbChan, p.cfg.SigPool, chanOpts...,
5✔
1118
                )
5✔
1119
                if err != nil {
5✔
1120
                        return nil, fmt.Errorf("unable to create channel "+
×
1121
                                "state machine: %w", err)
×
1122
                }
×
1123

1124
                chanPoint := dbChan.FundingOutpoint
5✔
1125

5✔
1126
                chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
5✔
1127

5✔
1128
                p.log.Infof("Loading ChannelPoint(%v), isPending=%v",
5✔
1129
                        chanPoint, lnChan.IsPending())
5✔
1130

5✔
1131
                // Skip adding any permanently irreconcilable channels to the
5✔
1132
                // htlcswitch.
5✔
1133
                if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
5✔
1134
                        !dbChan.HasChanStatus(channeldb.ChanStatusRestored) {
10✔
1135

5✔
1136
                        p.log.Warnf("ChannelPoint(%v) has status %v, won't "+
5✔
1137
                                "start.", chanPoint, dbChan.ChanStatus())
5✔
1138

5✔
1139
                        // To help our peer recover from a potential data loss,
5✔
1140
                        // we resend our channel reestablish message if the
5✔
1141
                        // channel is in a borked state. We won't process any
5✔
1142
                        // channel reestablish message sent from the peer, but
5✔
1143
                        // that's okay since the assumption is that we did when
5✔
1144
                        // marking the channel borked.
5✔
1145
                        chanSync, err := dbChan.ChanSyncMsg()
5✔
1146
                        if err != nil {
5✔
1147
                                p.log.Errorf("Unable to create channel "+
×
1148
                                        "reestablish message for channel %v: "+
×
1149
                                        "%v", chanPoint, err)
×
1150
                                continue
×
1151
                        }
1152

1153
                        msgs = append(msgs, chanSync)
5✔
1154

5✔
1155
                        // Check if this channel needs to have the cooperative
5✔
1156
                        // close process restarted. If so, we'll need to send
5✔
1157
                        // the Shutdown message that is returned.
5✔
1158
                        if dbChan.HasChanStatus(
5✔
1159
                                channeldb.ChanStatusCoopBroadcasted,
5✔
1160
                        ) {
8✔
1161

3✔
1162
                                shutdownMsg, err := p.restartCoopClose(lnChan)
3✔
1163
                                if err != nil {
3✔
1164
                                        p.log.Errorf("Unable to restart "+
×
1165
                                                "coop close for channel: %v",
×
1166
                                                err)
×
1167
                                        continue
×
1168
                                }
1169

1170
                                if shutdownMsg == nil {
6✔
1171
                                        continue
3✔
1172
                                }
1173

1174
                                // Append the message to the set of messages to
1175
                                // send.
1176
                                msgs = append(msgs, shutdownMsg)
×
1177
                        }
1178

1179
                        continue
5✔
1180
                }
1181

1182
                // Before we register this new link with the HTLC Switch, we'll
1183
                // need to fetch its current link-layer forwarding policy from
1184
                // the database.
1185
                graph := p.cfg.ChannelGraph
3✔
1186
                info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(
3✔
1187
                        &chanPoint,
3✔
1188
                )
3✔
1189
                if err != nil && !errors.Is(err, graphdb.ErrEdgeNotFound) {
3✔
1190
                        return nil, err
×
1191
                }
×
1192

1193
                // We'll filter out our policy from the directional channel
1194
                // edges based whom the edge connects to. If it doesn't connect
1195
                // to us, then we know that we were the one that advertised the
1196
                // policy.
1197
                //
1198
                // TODO(roasbeef): can add helper method to get policy for
1199
                // particular channel.
1200
                var selfPolicy *models.ChannelEdgePolicy
3✔
1201
                if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
3✔
1202
                        p.cfg.ServerPubKey[:]) {
6✔
1203

3✔
1204
                        selfPolicy = p1
3✔
1205
                } else {
6✔
1206
                        selfPolicy = p2
3✔
1207
                }
3✔
1208

1209
                // If we don't yet have an advertised routing policy, then
1210
                // we'll use the current default, otherwise we'll translate the
1211
                // routing policy into a forwarding policy.
1212
                var forwardingPolicy *models.ForwardingPolicy
3✔
1213
                if selfPolicy != nil {
6✔
1214
                        forwardingPolicy = &models.ForwardingPolicy{
3✔
1215
                                MinHTLCOut:    selfPolicy.MinHTLC,
3✔
1216
                                MaxHTLC:       selfPolicy.MaxHTLC,
3✔
1217
                                BaseFee:       selfPolicy.FeeBaseMSat,
3✔
1218
                                FeeRate:       selfPolicy.FeeProportionalMillionths,
3✔
1219
                                TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
3✔
1220
                        }
3✔
1221
                        selfPolicy.InboundFee.WhenSome(func(fee lnwire.Fee) {
3✔
1222
                                inboundFee := models.NewInboundFeeFromWire(fee)
×
1223
                                forwardingPolicy.InboundFee = inboundFee
×
1224
                        })
×
1225
                } else {
3✔
1226
                        p.log.Warnf("Unable to find our forwarding policy "+
3✔
1227
                                "for channel %v, using default values",
3✔
1228
                                chanPoint)
3✔
1229
                        forwardingPolicy = &p.cfg.RoutingPolicy
3✔
1230
                }
3✔
1231

1232
                p.log.Tracef("Using link policy of: %v",
3✔
1233
                        spew.Sdump(forwardingPolicy))
3✔
1234

3✔
1235
                // If the channel is pending, set the value to nil in the
3✔
1236
                // activeChannels map. This is done to signify that the channel
3✔
1237
                // is pending. We don't add the link to the switch here - it's
3✔
1238
                // the funding manager's responsibility to spin up pending
3✔
1239
                // channels. Adding them here would just be extra work as we'll
3✔
1240
                // tear them down when creating + adding the final link.
3✔
1241
                if lnChan.IsPending() {
6✔
1242
                        p.activeChannels.Store(chanID, nil)
3✔
1243

3✔
1244
                        continue
3✔
1245
                }
1246

1247
                shutdownInfo, err := lnChan.State().ShutdownInfo()
3✔
1248
                if err != nil && !errors.Is(err, channeldb.ErrNoShutdownInfo) {
3✔
1249
                        return nil, err
×
1250
                }
×
1251

1252
                isTaprootChan := lnChan.ChanType().IsTaproot()
3✔
1253

3✔
1254
                var (
3✔
1255
                        shutdownMsg     fn.Option[lnwire.Shutdown]
3✔
1256
                        shutdownInfoErr error
3✔
1257
                )
3✔
1258
                shutdownInfo.WhenSome(func(info channeldb.ShutdownInfo) {
6✔
1259
                        // If we can use the new RBF close feature, we don't
3✔
1260
                        // need to create the legacy closer. However for taproot
3✔
1261
                        // channels, we'll continue to use the legacy closer.
3✔
1262
                        if p.rbfCoopCloseAllowed() && !isTaprootChan {
6✔
1263
                                return
3✔
1264
                        }
3✔
1265

1266
                        // Compute an ideal fee.
1267
                        feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
3✔
1268
                                p.cfg.CoopCloseTargetConfs,
3✔
1269
                        )
3✔
1270
                        if err != nil {
3✔
1271
                                shutdownInfoErr = fmt.Errorf("unable to "+
×
1272
                                        "estimate fee: %w", err)
×
1273

×
1274
                                return
×
1275
                        }
×
1276

1277
                        addr, err := p.addrWithInternalKey(
3✔
1278
                                info.DeliveryScript.Val,
3✔
1279
                        )
3✔
1280
                        if err != nil {
3✔
1281
                                shutdownInfoErr = fmt.Errorf("unable to make "+
×
1282
                                        "delivery addr: %w", err)
×
1283
                                return
×
1284
                        }
×
1285
                        negotiateChanCloser, err := p.createChanCloser(
3✔
1286
                                lnChan, addr, feePerKw, nil,
3✔
1287
                                info.Closer(),
3✔
1288
                        )
3✔
1289
                        if err != nil {
3✔
1290
                                shutdownInfoErr = fmt.Errorf("unable to "+
×
1291
                                        "create chan closer: %w", err)
×
1292

×
1293
                                return
×
1294
                        }
×
1295

1296
                        chanID := lnwire.NewChanIDFromOutPoint(
3✔
1297
                                lnChan.State().FundingOutpoint,
3✔
1298
                        )
3✔
1299

3✔
1300
                        p.activeChanCloses.Store(chanID, makeNegotiateCloser(
3✔
1301
                                negotiateChanCloser,
3✔
1302
                        ))
3✔
1303

3✔
1304
                        // Create the Shutdown message.
3✔
1305
                        shutdown, err := negotiateChanCloser.ShutdownChan()
3✔
1306
                        if err != nil {
3✔
1307
                                p.activeChanCloses.Delete(chanID)
×
1308
                                shutdownInfoErr = err
×
1309

×
1310
                                return
×
1311
                        }
×
1312

1313
                        shutdownMsg = fn.Some(*shutdown)
3✔
1314
                })
1315
                if shutdownInfoErr != nil {
3✔
1316
                        return nil, shutdownInfoErr
×
1317
                }
×
1318

1319
                // Subscribe to the set of on-chain events for this channel.
1320
                chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
3✔
1321
                        chanPoint,
3✔
1322
                )
3✔
1323
                if err != nil {
3✔
1324
                        return nil, err
×
1325
                }
×
1326

1327
                err = p.addLink(
3✔
1328
                        &chanPoint, lnChan, forwardingPolicy, chainEvents,
3✔
1329
                        true, shutdownMsg,
3✔
1330
                )
3✔
1331
                if err != nil {
3✔
1332
                        return nil, fmt.Errorf("unable to add link %v to "+
×
1333
                                "switch: %v", chanPoint, err)
×
1334
                }
×
1335

1336
                p.activeChannels.Store(chanID, lnChan)
3✔
1337

3✔
1338
                // We're using the old co-op close, so we don't need to init
3✔
1339
                // the new RBF chan closer. If we have a taproot chan, then
3✔
1340
                // we'll also use the legacy type, so we don't need to make the
3✔
1341
                // new closer.
3✔
1342
                if !p.rbfCoopCloseAllowed() || isTaprootChan {
6✔
1343
                        continue
3✔
1344
                }
1345

1346
                // Now that the link has been added above, we'll also init an
1347
                // RBF chan closer for this channel, but only if the new close
1348
                // feature is negotiated.
1349
                //
1350
                // Creating this here ensures that any shutdown messages sent
1351
                // will be automatically routed by the msg router.
1352
                if _, err := p.initRbfChanCloser(lnChan); err != nil {
3✔
1353
                        p.activeChanCloses.Delete(chanID)
×
1354

×
1355
                        return nil, fmt.Errorf("unable to init RBF chan "+
×
1356
                                "closer during peer connect: %w", err)
×
1357
                }
×
1358

1359
                // If the shutdown info isn't blank, then we should kick things
1360
                // off by sending a shutdown message to the remote party to
1361
                // continue the old shutdown flow.
1362
                restartShutdown := func(s channeldb.ShutdownInfo) error {
6✔
1363
                        return p.startRbfChanCloser(
3✔
1364
                                newRestartShutdownInit(s),
3✔
1365
                                lnChan.ChannelPoint(),
3✔
1366
                        )
3✔
1367
                }
3✔
1368
                err = fn.MapOptionZ(shutdownInfo, restartShutdown)
3✔
1369
                if err != nil {
3✔
1370
                        return nil, fmt.Errorf("unable to start RBF "+
×
1371
                                "chan closer: %w", err)
×
1372
                }
×
1373
        }
1374

1375
        return msgs, nil
6✔
1376
}
1377

1378
// addLink creates and adds a new ChannelLink from the specified channel.
1379
func (p *Brontide) addLink(chanPoint *wire.OutPoint,
1380
        lnChan *lnwallet.LightningChannel,
1381
        forwardingPolicy *models.ForwardingPolicy,
1382
        chainEvents *contractcourt.ChainEventSubscription,
1383
        syncStates bool, shutdownMsg fn.Option[lnwire.Shutdown]) error {
3✔
1384

3✔
1385
        // onChannelFailure will be called by the link in case the channel
3✔
1386
        // fails for some reason.
3✔
1387
        onChannelFailure := func(chanID lnwire.ChannelID,
3✔
1388
                shortChanID lnwire.ShortChannelID,
3✔
1389
                linkErr htlcswitch.LinkFailureError) {
6✔
1390

3✔
1391
                failure := linkFailureReport{
3✔
1392
                        chanPoint:   *chanPoint,
3✔
1393
                        chanID:      chanID,
3✔
1394
                        shortChanID: shortChanID,
3✔
1395
                        linkErr:     linkErr,
3✔
1396
                }
3✔
1397

3✔
1398
                select {
3✔
1399
                case p.linkFailures <- failure:
3✔
1400
                case <-p.cg.Done():
×
UNCOV
1401
                case <-p.cfg.Quit:
×
1402
                }
1403
        }
1404

1405
        updateContractSignals := func(signals *contractcourt.ContractSignals) error {
6✔
1406
                return p.cfg.ChainArb.UpdateContractSignals(*chanPoint, signals)
3✔
1407
        }
3✔
1408

1409
        notifyContractUpdate := func(update *contractcourt.ContractUpdate) error {
6✔
1410
                return p.cfg.ChainArb.NotifyContractUpdate(*chanPoint, update)
3✔
1411
        }
3✔
1412

1413
        //nolint:ll
1414
        linkCfg := htlcswitch.ChannelLinkConfig{
3✔
1415
                Peer:                   p,
3✔
1416
                DecodeHopIterators:     p.cfg.Sphinx.DecodeHopIterators,
3✔
1417
                ExtractErrorEncrypter:  p.cfg.Sphinx.ExtractErrorEncrypter,
3✔
1418
                FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate,
3✔
1419
                HodlMask:               p.cfg.Hodl.Mask(),
3✔
1420
                Registry:               p.cfg.Invoices,
3✔
1421
                BestHeight:             p.cfg.Switch.BestHeight,
3✔
1422
                Circuits:               p.cfg.Switch.CircuitModifier(),
3✔
1423
                ForwardPackets:         p.cfg.InterceptSwitch.ForwardPackets,
3✔
1424
                FwrdingPolicy:          *forwardingPolicy,
3✔
1425
                FeeEstimator:           p.cfg.FeeEstimator,
3✔
1426
                PreimageCache:          p.cfg.WitnessBeacon,
3✔
1427
                ChainEvents:            chainEvents,
3✔
1428
                UpdateContractSignals:  updateContractSignals,
3✔
1429
                NotifyContractUpdate:   notifyContractUpdate,
3✔
1430
                OnChannelFailure:       onChannelFailure,
3✔
1431
                SyncStates:             syncStates,
3✔
1432
                BatchTicker:            ticker.New(p.cfg.ChannelCommitInterval),
3✔
1433
                FwdPkgGCTicker:         ticker.New(time.Hour),
3✔
1434
                PendingCommitTicker: ticker.New(
3✔
1435
                        p.cfg.PendingCommitInterval,
3✔
1436
                ),
3✔
1437
                BatchSize:               p.cfg.ChannelCommitBatchSize,
3✔
1438
                UnsafeReplay:            p.cfg.UnsafeReplay,
3✔
1439
                MinUpdateTimeout:        htlcswitch.DefaultMinLinkFeeUpdateTimeout,
3✔
1440
                MaxUpdateTimeout:        htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
3✔
1441
                OutgoingCltvRejectDelta: p.cfg.OutgoingCltvRejectDelta,
3✔
1442
                TowerClient:             p.cfg.TowerClient,
3✔
1443
                MaxOutgoingCltvExpiry:   p.cfg.MaxOutgoingCltvExpiry,
3✔
1444
                MaxFeeAllocation:        p.cfg.MaxChannelFeeAllocation,
3✔
1445
                MaxAnchorsCommitFeeRate: p.cfg.MaxAnchorsCommitFeeRate,
3✔
1446
                NotifyActiveLink:        p.cfg.ChannelNotifier.NotifyActiveLinkEvent,
3✔
1447
                NotifyActiveChannel:     p.cfg.ChannelNotifier.NotifyActiveChannelEvent,
3✔
1448
                NotifyInactiveChannel:   p.cfg.ChannelNotifier.NotifyInactiveChannelEvent,
3✔
1449
                NotifyInactiveLinkEvent: p.cfg.ChannelNotifier.NotifyInactiveLinkEvent,
3✔
1450
                HtlcNotifier:            p.cfg.HtlcNotifier,
3✔
1451
                GetAliases:              p.cfg.GetAliases,
3✔
1452
                PreviouslySentShutdown:  shutdownMsg,
3✔
1453
                DisallowRouteBlinding:   p.cfg.DisallowRouteBlinding,
3✔
1454
                MaxFeeExposure:          p.cfg.MaxFeeExposure,
3✔
1455
                ShouldFwdExpEndorsement: p.cfg.ShouldFwdExpEndorsement,
3✔
1456
                DisallowQuiescence: p.cfg.DisallowQuiescence ||
3✔
1457
                        !p.remoteFeatures.HasFeature(lnwire.QuiescenceOptional),
3✔
1458
                AuxTrafficShaper:  p.cfg.AuxTrafficShaper,
3✔
1459
                QuiescenceTimeout: p.cfg.QuiescenceTimeout,
3✔
1460
        }
3✔
1461

3✔
1462
        // Before adding our new link, purge the switch of any pending or live
3✔
1463
        // links going by the same channel id. If one is found, we'll shut it
3✔
1464
        // down to ensure that the mailboxes are only ever under the control of
3✔
1465
        // one link.
3✔
1466
        chanID := lnwire.NewChanIDFromOutPoint(*chanPoint)
3✔
1467
        p.cfg.Switch.RemoveLink(chanID)
3✔
1468

3✔
1469
        // With the channel link created, we'll now notify the htlc switch so
3✔
1470
        // this channel can be used to dispatch local payments and also
3✔
1471
        // passively forward payments.
3✔
1472
        return p.cfg.Switch.CreateAndAddLink(linkCfg, lnChan)
3✔
1473
}
1474

1475
// maybeSendNodeAnn sends our node announcement to the remote peer if at least
1476
// one confirmed public channel exists with them.
1477
func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
6✔
1478
        defer p.cg.WgDone()
6✔
1479

6✔
1480
        hasConfirmedPublicChan := false
6✔
1481
        for _, channel := range channels {
11✔
1482
                if channel.IsPending {
8✔
1483
                        continue
3✔
1484
                }
1485
                if channel.ChannelFlags&lnwire.FFAnnounceChannel == 0 {
10✔
1486
                        continue
5✔
1487
                }
1488

1489
                hasConfirmedPublicChan = true
3✔
1490
                break
3✔
1491
        }
1492
        if !hasConfirmedPublicChan {
12✔
1493
                return
6✔
1494
        }
6✔
1495

1496
        ourNodeAnn, err := p.cfg.GenNodeAnnouncement()
3✔
1497
        if err != nil {
3✔
1498
                p.log.Debugf("Unable to retrieve node announcement: %v", err)
×
1499
                return
×
1500
        }
×
1501

1502
        if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
3✔
1503
                p.log.Debugf("Unable to resend node announcement: %v", err)
×
1504
        }
×
1505
}
1506

1507
// maybeSendChannelUpdates sends our channel updates to the remote peer if we
1508
// have any active channels with them.
1509
func (p *Brontide) maybeSendChannelUpdates() {
6✔
1510
        defer p.cg.WgDone()
6✔
1511

6✔
1512
        // If we don't have any active channels, then we can exit early.
6✔
1513
        if p.activeChannels.Len() == 0 {
10✔
1514
                return
4✔
1515
        }
4✔
1516

1517
        maybeSendUpd := func(cid lnwire.ChannelID,
5✔
1518
                lnChan *lnwallet.LightningChannel) error {
10✔
1519

5✔
1520
                // Nil channels are pending, so we'll skip them.
5✔
1521
                if lnChan == nil {
8✔
1522
                        return nil
3✔
1523
                }
3✔
1524

1525
                dbChan := lnChan.State()
5✔
1526
                scid := func() lnwire.ShortChannelID {
10✔
1527
                        switch {
5✔
1528
                        // Otherwise if it's a zero conf channel and confirmed,
1529
                        // then we need to use the "real" scid.
1530
                        case dbChan.IsZeroConf() && dbChan.ZeroConfConfirmed():
3✔
1531
                                return dbChan.ZeroConfRealScid()
3✔
1532

1533
                        // Otherwise, we can use the normal scid.
1534
                        default:
5✔
1535
                                return dbChan.ShortChanID()
5✔
1536
                        }
1537
                }()
1538

1539
                // Now that we know the channel is in a good state, we'll try
1540
                // to fetch the update to send to the remote peer. If the
1541
                // channel is pending, and not a zero conf channel, we'll get
1542
                // an error here which we'll ignore.
1543
                chanUpd, err := p.cfg.FetchLastChanUpdate(scid)
5✔
1544
                if err != nil {
8✔
1545
                        p.log.Debugf("Unable to fetch channel update for "+
3✔
1546
                                "ChannelPoint(%v), scid=%v: %v",
3✔
1547
                                dbChan.FundingOutpoint, dbChan.ShortChanID, err)
3✔
1548

3✔
1549
                        return nil
3✔
1550
                }
3✔
1551

1552
                p.log.Debugf("Sending channel update for ChannelPoint(%v), "+
5✔
1553
                        "scid=%v", dbChan.FundingOutpoint, dbChan.ShortChanID)
5✔
1554

5✔
1555
                // We'll send it as a normal message instead of using the lazy
5✔
1556
                // queue to prioritize transmission of the fresh update.
5✔
1557
                if err := p.SendMessage(false, chanUpd); err != nil {
5✔
1558
                        err := fmt.Errorf("unable to send channel update for "+
×
1559
                                "ChannelPoint(%v), scid=%v: %w",
×
1560
                                dbChan.FundingOutpoint, dbChan.ShortChanID(),
×
1561
                                err)
×
1562
                        p.log.Errorf(err.Error())
×
1563

×
1564
                        return err
×
1565
                }
×
1566

1567
                return nil
5✔
1568
        }
1569

1570
        p.activeChannels.ForEach(maybeSendUpd)
5✔
1571
}
1572

1573
// WaitForDisconnect waits until the peer has disconnected. A peer may be
1574
// disconnected if the local or remote side terminates the connection, or an
1575
// irrecoverable protocol error has been encountered. This method will only
1576
// begin watching the peer's waitgroup after the ready channel or the peer's
1577
// quit channel are signaled. The ready channel should only be signaled if a
1578
// call to Start returns no error. Otherwise, if the peer fails to start,
1579
// calling Disconnect will signal the quit channel and the method will not
1580
// block, since no goroutines were spawned.
1581
func (p *Brontide) WaitForDisconnect(ready chan struct{}) {
3✔
1582
        // Before we try to call the `Wait` goroutine, we'll make sure the main
3✔
1583
        // set of goroutines are already active.
3✔
1584
        select {
3✔
1585
        case <-p.startReady:
3✔
1586
        case <-p.cg.Done():
2✔
1587
                return
2✔
1588
        }
1589

1590
        select {
3✔
1591
        case <-ready:
3✔
1592
        case <-p.cg.Done():
3✔
1593
        }
1594

1595
        p.cg.WgWait()
3✔
1596
}
1597

1598
// Disconnect terminates the connection with the remote peer. Additionally, a
1599
// signal is sent to the server and htlcSwitch indicating the resources
1600
// allocated to the peer can now be cleaned up.
1601
//
1602
// NOTE: Be aware that this method will block if the peer is still starting up.
1603
// Therefore consider starting it in a goroutine if you cannot guarantee that
1604
// the peer has finished starting up before calling this method.
1605
func (p *Brontide) Disconnect(reason error) {
3✔
1606
        if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
6✔
1607
                return
3✔
1608
        }
3✔
1609

1610
        // Make sure initialization has completed before we try to tear things
1611
        // down.
1612
        //
1613
        // NOTE: We only read the `startReady` chan if the peer has been
1614
        // started, otherwise we will skip reading it as this chan won't be
1615
        // closed, hence blocks forever.
1616
        if atomic.LoadInt32(&p.started) == 1 {
6✔
1617
                p.log.Debugf("Peer hasn't finished starting up yet, waiting " +
3✔
1618
                        "on startReady signal before closing connection")
3✔
1619

3✔
1620
                select {
3✔
1621
                case <-p.startReady:
3✔
1622
                case <-p.cg.Done():
×
1623
                        return
×
1624
                }
1625
        }
1626

1627
        err := fmt.Errorf("disconnecting %s, reason: %v", p, reason)
3✔
1628
        p.storeError(err)
3✔
1629

3✔
1630
        p.log.Infof(err.Error())
3✔
1631

3✔
1632
        // Stop PingManager before closing TCP connection.
3✔
1633
        p.pingManager.Stop()
3✔
1634

3✔
1635
        // Ensure that the TCP connection is properly closed before continuing.
3✔
1636
        p.cfg.Conn.Close()
3✔
1637

3✔
1638
        p.cg.Quit()
3✔
1639

3✔
1640
        // If our msg router isn't global (local to this instance), then we'll
3✔
1641
        // stop it. Otherwise, we'll leave it running.
3✔
1642
        if !p.globalMsgRouter {
6✔
1643
                p.msgRouter.WhenSome(func(router msgmux.Router) {
6✔
1644
                        router.Stop()
3✔
1645
                })
3✔
1646
        }
1647
}
1648

1649
// String returns the string representation of this peer.
1650
func (p *Brontide) String() string {
3✔
1651
        return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr())
3✔
1652
}
3✔
1653

1654
// readNextMessage reads, and returns the next message on the wire along with
1655
// any additional raw payload.
1656
func (p *Brontide) readNextMessage() (lnwire.Message, error) {
10✔
1657
        noiseConn := p.cfg.Conn
10✔
1658
        err := noiseConn.SetReadDeadline(time.Time{})
10✔
1659
        if err != nil {
10✔
1660
                return nil, err
×
1661
        }
×
1662

1663
        pktLen, err := noiseConn.ReadNextHeader()
10✔
1664
        if err != nil {
13✔
1665
                return nil, fmt.Errorf("read next header: %w", err)
3✔
1666
        }
3✔
1667

1668
        // First we'll read the next _full_ message. We do this rather than
1669
        // reading incrementally from the stream as the Lightning wire protocol
1670
        // is message oriented and allows nodes to pad on additional data to
1671
        // the message stream.
1672
        var (
7✔
1673
                nextMsg lnwire.Message
7✔
1674
                msgLen  uint64
7✔
1675
        )
7✔
1676
        err = p.cfg.ReadPool.Submit(func(buf *buffer.Read) error {
14✔
1677
                // Before reading the body of the message, set the read timeout
7✔
1678
                // accordingly to ensure we don't block other readers using the
7✔
1679
                // pool. We do so only after the task has been scheduled to
7✔
1680
                // ensure the deadline doesn't expire while the message is in
7✔
1681
                // the process of being scheduled.
7✔
1682
                readDeadline := time.Now().Add(
7✔
1683
                        p.scaleTimeout(readMessageTimeout),
7✔
1684
                )
7✔
1685
                readErr := noiseConn.SetReadDeadline(readDeadline)
7✔
1686
                if readErr != nil {
7✔
1687
                        return readErr
×
1688
                }
×
1689

1690
                // The ReadNextBody method will actually end up re-using the
1691
                // buffer, so within this closure, we can continue to use
1692
                // rawMsg as it's just a slice into the buf from the buffer
1693
                // pool.
1694
                rawMsg, readErr := noiseConn.ReadNextBody(buf[:pktLen])
7✔
1695
                if readErr != nil {
7✔
1696
                        return fmt.Errorf("read next body: %w", readErr)
×
1697
                }
×
1698
                msgLen = uint64(len(rawMsg))
7✔
1699

7✔
1700
                // Next, create a new io.Reader implementation from the raw
7✔
1701
                // message, and use this to decode the message directly from.
7✔
1702
                msgReader := bytes.NewReader(rawMsg)
7✔
1703
                nextMsg, err = lnwire.ReadMessage(msgReader, 0)
7✔
1704
                if err != nil {
10✔
1705
                        return err
3✔
1706
                }
3✔
1707

1708
                // At this point, rawMsg and buf will be returned back to the
1709
                // buffer pool for re-use.
1710
                return nil
7✔
1711
        })
1712
        atomic.AddUint64(&p.bytesReceived, msgLen)
7✔
1713
        if err != nil {
10✔
1714
                return nil, err
3✔
1715
        }
3✔
1716

1717
        p.logWireMessage(nextMsg, true)
7✔
1718

7✔
1719
        return nextMsg, nil
7✔
1720
}
1721

1722
// msgStream implements a goroutine-safe, in-order stream of messages to be
1723
// delivered via closure to a receiver. These messages MUST be in order due to
1724
// the nature of the lightning channel commitment and gossiper state machines.
1725
// TODO(conner): use stream handler interface to abstract out stream
1726
// state/logging.
1727
type msgStream struct {
1728
        streamShutdown int32 // To be used atomically.
1729

1730
        peer *Brontide
1731

1732
        apply func(lnwire.Message)
1733

1734
        startMsg string
1735
        stopMsg  string
1736

1737
        msgCond *sync.Cond
1738
        msgs    []lnwire.Message
1739

1740
        mtx sync.Mutex
1741

1742
        producerSema chan struct{}
1743

1744
        wg   sync.WaitGroup
1745
        quit chan struct{}
1746
}
1747

1748
// newMsgStream creates a new instance of a chanMsgStream for a particular
1749
// channel identified by its channel ID. bufSize is the max number of messages
1750
// that should be buffered in the internal queue. Callers should set this to a
1751
// sane value that avoids blocking unnecessarily, but doesn't allow an
1752
// unbounded amount of memory to be allocated to buffer incoming messages.
1753
func newMsgStream(p *Brontide, startMsg, stopMsg string, bufSize uint32,
1754
        apply func(lnwire.Message)) *msgStream {
6✔
1755

6✔
1756
        stream := &msgStream{
6✔
1757
                peer:         p,
6✔
1758
                apply:        apply,
6✔
1759
                startMsg:     startMsg,
6✔
1760
                stopMsg:      stopMsg,
6✔
1761
                producerSema: make(chan struct{}, bufSize),
6✔
1762
                quit:         make(chan struct{}),
6✔
1763
        }
6✔
1764
        stream.msgCond = sync.NewCond(&stream.mtx)
6✔
1765

6✔
1766
        // Before we return the active stream, we'll populate the producer's
6✔
1767
        // semaphore channel. We'll use this to ensure that the producer won't
6✔
1768
        // attempt to allocate memory in the queue for an item until it has
6✔
1769
        // sufficient extra space.
6✔
1770
        for i := uint32(0); i < bufSize; i++ {
159✔
1771
                stream.producerSema <- struct{}{}
153✔
1772
        }
153✔
1773

1774
        return stream
6✔
1775
}
1776

1777
// Start starts the chanMsgStream.
1778
func (ms *msgStream) Start() {
6✔
1779
        ms.wg.Add(1)
6✔
1780
        go ms.msgConsumer()
6✔
1781
}
6✔
1782

1783
// Stop stops the chanMsgStream.
1784
func (ms *msgStream) Stop() {
3✔
1785
        // TODO(roasbeef): signal too?
3✔
1786

3✔
1787
        close(ms.quit)
3✔
1788

3✔
1789
        // Now that we've closed the channel, we'll repeatedly signal the msg
3✔
1790
        // consumer until we've detected that it has exited.
3✔
1791
        for atomic.LoadInt32(&ms.streamShutdown) == 0 {
6✔
1792
                ms.msgCond.Signal()
3✔
1793
                time.Sleep(time.Millisecond * 100)
3✔
1794
        }
3✔
1795

1796
        ms.wg.Wait()
3✔
1797
}
1798

1799
// msgConsumer is the main goroutine that streams messages from the peer's
1800
// readHandler directly to the target channel.
1801
func (ms *msgStream) msgConsumer() {
6✔
1802
        defer ms.wg.Done()
6✔
1803
        defer peerLog.Tracef(ms.stopMsg)
6✔
1804
        defer atomic.StoreInt32(&ms.streamShutdown, 1)
6✔
1805

6✔
1806
        peerLog.Tracef(ms.startMsg)
6✔
1807

6✔
1808
        for {
12✔
1809
                // First, we'll check our condition. If the queue of messages
6✔
1810
                // is empty, then we'll wait until a new item is added.
6✔
1811
                ms.msgCond.L.Lock()
6✔
1812
                for len(ms.msgs) == 0 {
12✔
1813
                        ms.msgCond.Wait()
6✔
1814

6✔
1815
                        // If we woke up in order to exit, then we'll do so.
6✔
1816
                        // Otherwise, we'll check the message queue for any new
6✔
1817
                        // items.
6✔
1818
                        select {
6✔
1819
                        case <-ms.peer.cg.Done():
3✔
1820
                                ms.msgCond.L.Unlock()
3✔
1821
                                return
3✔
1822
                        case <-ms.quit:
3✔
1823
                                ms.msgCond.L.Unlock()
3✔
1824
                                return
3✔
1825
                        default:
3✔
1826
                        }
1827
                }
1828

1829
                // Grab the message off the front of the queue, shifting the
1830
                // slice's reference down one in order to remove the message
1831
                // from the queue.
1832
                msg := ms.msgs[0]
3✔
1833
                ms.msgs[0] = nil // Set to nil to prevent GC leak.
3✔
1834
                ms.msgs = ms.msgs[1:]
3✔
1835

3✔
1836
                ms.msgCond.L.Unlock()
3✔
1837

3✔
1838
                ms.apply(msg)
3✔
1839

3✔
1840
                // We've just successfully processed an item, so we'll signal
3✔
1841
                // to the producer that a new slot in the buffer. We'll use
3✔
1842
                // this to bound the size of the buffer to avoid allowing it to
3✔
1843
                // grow indefinitely.
3✔
1844
                select {
3✔
1845
                case ms.producerSema <- struct{}{}:
3✔
1846
                case <-ms.peer.cg.Done():
3✔
1847
                        return
3✔
1848
                case <-ms.quit:
1✔
1849
                        return
1✔
1850
                }
1851
        }
1852
}
1853

1854
// AddMsg adds a new message to the msgStream. This function is safe for
1855
// concurrent access.
1856
func (ms *msgStream) AddMsg(msg lnwire.Message) {
3✔
1857
        // First, we'll attempt to receive from the producerSema struct. This
3✔
1858
        // acts as a semaphore to prevent us from indefinitely buffering
3✔
1859
        // incoming items from the wire. Either the msg queue isn't full, and
3✔
1860
        // we'll not block, or the queue is full, and we'll block until either
3✔
1861
        // we're signalled to quit, or a slot is freed up.
3✔
1862
        select {
3✔
1863
        case <-ms.producerSema:
3✔
1864
        case <-ms.peer.cg.Done():
×
1865
                return
×
1866
        case <-ms.quit:
×
1867
                return
×
1868
        }
1869

1870
        // Next, we'll lock the condition, and add the message to the end of
1871
        // the message queue.
1872
        ms.msgCond.L.Lock()
3✔
1873
        ms.msgs = append(ms.msgs, msg)
3✔
1874
        ms.msgCond.L.Unlock()
3✔
1875

3✔
1876
        // With the message added, we signal to the msgConsumer that there are
3✔
1877
        // additional messages to consume.
3✔
1878
        ms.msgCond.Signal()
3✔
1879
}
1880

1881
// waitUntilLinkActive waits until the target link is active and returns a
1882
// ChannelLink to pass messages to. It accomplishes this by subscribing to
1883
// an ActiveLinkEvent which is emitted by the link when it first starts up.
1884
func waitUntilLinkActive(p *Brontide,
1885
        cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
3✔
1886

3✔
1887
        p.log.Tracef("Waiting for link=%v to be active", cid)
3✔
1888

3✔
1889
        // Subscribe to receive channel events.
3✔
1890
        //
3✔
1891
        // NOTE: If the link is already active by SubscribeChannelEvents, then
3✔
1892
        // GetLink will retrieve the link and we can send messages. If the link
3✔
1893
        // becomes active between SubscribeChannelEvents and GetLink, then GetLink
3✔
1894
        // will retrieve the link. If the link becomes active after GetLink, then
3✔
1895
        // we will get an ActiveLinkEvent notification and retrieve the link. If
3✔
1896
        // the call to GetLink is before SubscribeChannelEvents, however, there
3✔
1897
        // will be a race condition.
3✔
1898
        sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
3✔
1899
        if err != nil {
6✔
1900
                // If we have a non-nil error, then the server is shutting down and we
3✔
1901
                // can exit here and return nil. This means no message will be delivered
3✔
1902
                // to the link.
3✔
1903
                return nil
3✔
1904
        }
3✔
1905
        defer sub.Cancel()
3✔
1906

3✔
1907
        // The link may already be active by this point, and we may have missed the
3✔
1908
        // ActiveLinkEvent. Check if the link exists.
3✔
1909
        link := p.fetchLinkFromKeyAndCid(cid)
3✔
1910
        if link != nil {
6✔
1911
                return link
3✔
1912
        }
3✔
1913

1914
        // If the link is nil, we must wait for it to be active.
1915
        for {
6✔
1916
                select {
3✔
1917
                // A new event has been sent by the ChannelNotifier. We first check
1918
                // whether the event is an ActiveLinkEvent. If it is, we'll check
1919
                // that the event is for this channel. Otherwise, we discard the
1920
                // message.
1921
                case e := <-sub.Updates():
3✔
1922
                        event, ok := e.(channelnotifier.ActiveLinkEvent)
3✔
1923
                        if !ok {
6✔
1924
                                // Ignore this notification.
3✔
1925
                                continue
3✔
1926
                        }
1927

1928
                        chanPoint := event.ChannelPoint
3✔
1929

3✔
1930
                        // Check whether the retrieved chanPoint matches the target
3✔
1931
                        // channel id.
3✔
1932
                        if !cid.IsChanPoint(chanPoint) {
3✔
1933
                                continue
×
1934
                        }
1935

1936
                        // The link shouldn't be nil as we received an
1937
                        // ActiveLinkEvent. If it is nil, we return nil and the
1938
                        // calling function should catch it.
1939
                        return p.fetchLinkFromKeyAndCid(cid)
3✔
1940

1941
                case <-p.cg.Done():
3✔
1942
                        return nil
3✔
1943
                }
1944
        }
1945
}
1946

1947
// newChanMsgStream is used to create a msgStream between the peer and
1948
// particular channel link in the htlcswitch. We utilize additional
1949
// synchronization with the fundingManager to ensure we don't attempt to
1950
// dispatch a message to a channel before it is fully active. A reference to the
1951
// channel this stream forwards to is held in scope to prevent unnecessary
1952
// lookups.
1953
func newChanMsgStream(p *Brontide, cid lnwire.ChannelID) *msgStream {
3✔
1954
        var chanLink htlcswitch.ChannelUpdateHandler
3✔
1955

3✔
1956
        apply := func(msg lnwire.Message) {
6✔
1957
                // This check is fine because if the link no longer exists, it will
3✔
1958
                // be removed from the activeChannels map and subsequent messages
3✔
1959
                // shouldn't reach the chan msg stream.
3✔
1960
                if chanLink == nil {
6✔
1961
                        chanLink = waitUntilLinkActive(p, cid)
3✔
1962

3✔
1963
                        // If the link is still not active and the calling function
3✔
1964
                        // errored out, just return.
3✔
1965
                        if chanLink == nil {
6✔
1966
                                p.log.Warnf("Link=%v is not active", cid)
3✔
1967
                                return
3✔
1968
                        }
3✔
1969
                }
1970

1971
                // In order to avoid unnecessarily delivering message
1972
                // as the peer is exiting, we'll check quickly to see
1973
                // if we need to exit.
1974
                select {
3✔
1975
                case <-p.cg.Done():
×
1976
                        return
×
1977
                default:
3✔
1978
                }
1979

1980
                chanLink.HandleChannelUpdate(msg)
3✔
1981
        }
1982

1983
        return newMsgStream(p,
3✔
1984
                fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
3✔
1985
                fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
3✔
1986
                msgStreamSize,
3✔
1987
                apply,
3✔
1988
        )
3✔
1989
}
1990

1991
// newDiscMsgStream is used to setup a msgStream between the peer and the
1992
// authenticated gossiper. This stream should be used to forward all remote
1993
// channel announcements.
1994
func newDiscMsgStream(p *Brontide) *msgStream {
6✔
1995
        apply := func(msg lnwire.Message) {
9✔
1996
                // TODO(elle): thread contexts through the peer system properly
3✔
1997
                // so that a parent context can be passed in here.
3✔
1998
                ctx := context.TODO()
3✔
1999

3✔
2000
                // Processing here means we send it to the gossiper which then
3✔
2001
                // decides whether this message is processed immediately or
3✔
2002
                // waits for dependent messages to be processed. It can also
3✔
2003
                // happen that the message is not processed at all if it is
3✔
2004
                // premature and the LRU cache fills up and the message is
3✔
2005
                // deleted.
3✔
2006
                p.log.Debugf("Processing remote msg %T", msg)
3✔
2007

3✔
2008
                // TODO(ziggie): ProcessRemoteAnnouncement returns an error
3✔
2009
                // channel, but we cannot rely on it being written to.
3✔
2010
                // Because some messages might never be processed (e.g.
3✔
2011
                // premature channel updates). We should change the design here
3✔
2012
                // and use the actor model pattern as soon as it is available.
3✔
2013
                // So for now we should NOT use the error channel.
3✔
2014
                // See https://github.com/lightningnetwork/lnd/pull/9820.
3✔
2015
                p.cfg.AuthGossiper.ProcessRemoteAnnouncement(ctx, msg, p)
3✔
2016
        }
3✔
2017

2018
        return newMsgStream(
6✔
2019
                p,
6✔
2020
                "Update stream for gossiper created",
6✔
2021
                "Update stream for gossiper exited",
6✔
2022
                msgStreamSize,
6✔
2023
                apply,
6✔
2024
        )
6✔
2025
}
2026

2027
// readHandler is responsible for reading messages off the wire in series, then
2028
// properly dispatching the handling of the message to the proper subsystem.
2029
//
2030
// NOTE: This method MUST be run as a goroutine.
2031
func (p *Brontide) readHandler() {
6✔
2032
        defer p.cg.WgDone()
6✔
2033

6✔
2034
        // We'll stop the timer after a new messages is received, and also
6✔
2035
        // reset it after we process the next message.
6✔
2036
        idleTimer := time.AfterFunc(idleTimeout, func() {
6✔
2037
                err := fmt.Errorf("peer %s no answer for %s -- disconnecting",
×
2038
                        p, idleTimeout)
×
2039
                p.Disconnect(err)
×
2040
        })
×
2041

2042
        // Initialize our negotiated gossip sync method before reading messages
2043
        // off the wire. When using gossip queries, this ensures a gossip
2044
        // syncer is active by the time query messages arrive.
2045
        //
2046
        // TODO(conner): have peer store gossip syncer directly and bypass
2047
        // gossiper?
2048
        p.initGossipSync()
6✔
2049

6✔
2050
        discStream := newDiscMsgStream(p)
6✔
2051
        discStream.Start()
6✔
2052
        defer discStream.Stop()
6✔
2053
out:
6✔
2054
        for atomic.LoadInt32(&p.disconnect) == 0 {
13✔
2055
                nextMsg, err := p.readNextMessage()
7✔
2056
                if !idleTimer.Stop() {
10✔
2057
                        select {
3✔
2058
                        case <-idleTimer.C:
×
2059
                        default:
3✔
2060
                        }
2061
                }
2062
                if err != nil {
7✔
2063
                        p.log.Infof("unable to read message from peer: %v", err)
3✔
2064

3✔
2065
                        // If we could not read our peer's message due to an
3✔
2066
                        // unknown type or invalid alias, we continue processing
3✔
2067
                        // as normal. We store unknown message and address
3✔
2068
                        // types, as they may provide debugging insight.
3✔
2069
                        switch e := err.(type) {
3✔
2070
                        // If this is just a message we don't yet recognize,
2071
                        // we'll continue processing as normal as this allows
2072
                        // us to introduce new messages in a forwards
2073
                        // compatible manner.
2074
                        case *lnwire.UnknownMessage:
3✔
2075
                                p.storeError(e)
3✔
2076
                                idleTimer.Reset(idleTimeout)
3✔
2077
                                continue
3✔
2078

2079
                        // If they sent us an address type that we don't yet
2080
                        // know of, then this isn't a wire error, so we'll
2081
                        // simply continue parsing the remainder of their
2082
                        // messages.
2083
                        case *lnwire.ErrUnknownAddrType:
×
2084
                                p.storeError(e)
×
2085
                                idleTimer.Reset(idleTimeout)
×
2086
                                continue
×
2087

2088
                        // If the NodeAnnouncement has an invalid alias, then
2089
                        // we'll log that error above and continue so we can
2090
                        // continue to read messages from the peer. We do not
2091
                        // store this error because it is of little debugging
2092
                        // value.
2093
                        case *lnwire.ErrInvalidNodeAlias:
×
2094
                                idleTimer.Reset(idleTimeout)
×
2095
                                continue
×
2096

2097
                        // If the error we encountered wasn't just a message we
2098
                        // didn't recognize, then we'll stop all processing as
2099
                        // this is a fatal error.
2100
                        default:
3✔
2101
                                break out
3✔
2102
                        }
2103
                }
2104

2105
                // If a message router is active, then we'll try to have it
2106
                // handle this message. If it can, then we're able to skip the
2107
                // rest of the message handling logic.
2108
                err = fn.MapOptionZ(p.msgRouter, func(r msgmux.Router) error {
8✔
2109
                        return r.RouteMsg(msgmux.PeerMsg{
4✔
2110
                                PeerPub: *p.IdentityKey(),
4✔
2111
                                Message: nextMsg,
4✔
2112
                        })
4✔
2113
                })
4✔
2114

2115
                // No error occurred, and the message was handled by the
2116
                // router.
2117
                if err == nil {
7✔
2118
                        continue
3✔
2119
                }
2120

2121
                var (
4✔
2122
                        targetChan   lnwire.ChannelID
4✔
2123
                        isLinkUpdate bool
4✔
2124
                )
4✔
2125

4✔
2126
                switch msg := nextMsg.(type) {
4✔
2127
                case *lnwire.Pong:
×
2128
                        // When we receive a Pong message in response to our
×
2129
                        // last ping message, we send it to the pingManager
×
2130
                        p.pingManager.ReceivedPong(msg)
×
2131

2132
                case *lnwire.Ping:
×
2133
                        // First, we'll store their latest ping payload within
×
2134
                        // the relevant atomic variable.
×
2135
                        p.lastPingPayload.Store(msg.PaddingBytes[:])
×
2136

×
2137
                        // Next, we'll send over the amount of specified pong
×
2138
                        // bytes.
×
2139
                        pong := lnwire.NewPong(p.cfg.PongBuf[0:msg.NumPongBytes])
×
2140
                        p.queueMsg(pong, nil)
×
2141

2142
                case *lnwire.OpenChannel,
2143
                        *lnwire.AcceptChannel,
2144
                        *lnwire.FundingCreated,
2145
                        *lnwire.FundingSigned,
2146
                        *lnwire.ChannelReady:
3✔
2147

3✔
2148
                        p.cfg.FundingManager.ProcessFundingMsg(msg, p)
3✔
2149

2150
                case *lnwire.Shutdown:
3✔
2151
                        select {
3✔
2152
                        case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
3✔
2153
                        case <-p.cg.Done():
×
2154
                                break out
×
2155
                        }
2156
                case *lnwire.ClosingSigned:
3✔
2157
                        select {
3✔
2158
                        case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
3✔
2159
                        case <-p.cg.Done():
×
2160
                                break out
×
2161
                        }
2162

2163
                case *lnwire.Warning:
×
2164
                        targetChan = msg.ChanID
×
2165
                        isLinkUpdate = p.handleWarningOrError(targetChan, msg)
×
2166

2167
                case *lnwire.Error:
3✔
2168
                        targetChan = msg.ChanID
3✔
2169
                        isLinkUpdate = p.handleWarningOrError(targetChan, msg)
3✔
2170

2171
                case *lnwire.ChannelReestablish:
3✔
2172
                        targetChan = msg.ChanID
3✔
2173
                        isLinkUpdate = p.hasChannel(targetChan)
3✔
2174

3✔
2175
                        // If we failed to find the link in question, and the
3✔
2176
                        // message received was a channel sync message, then
3✔
2177
                        // this might be a peer trying to resync closed channel.
3✔
2178
                        // In this case we'll try to resend our last channel
3✔
2179
                        // sync message, such that the peer can recover funds
3✔
2180
                        // from the closed channel.
3✔
2181
                        if !isLinkUpdate {
6✔
2182
                                err := p.resendChanSyncMsg(targetChan)
3✔
2183
                                if err != nil {
6✔
2184
                                        // TODO(halseth): send error to peer?
3✔
2185
                                        p.log.Errorf("resend failed: %v",
3✔
2186
                                                err)
3✔
2187
                                }
3✔
2188
                        }
2189

2190
                // For messages that implement the LinkUpdater interface, we
2191
                // will consider them as link updates and send them to
2192
                // chanStream. These messages will be queued inside chanStream
2193
                // if the channel is not active yet.
2194
                case lnwire.LinkUpdater:
3✔
2195
                        targetChan = msg.TargetChanID()
3✔
2196
                        isLinkUpdate = p.hasChannel(targetChan)
3✔
2197

3✔
2198
                        // Log an error if we don't have this channel. This
3✔
2199
                        // means the peer has sent us a message with unknown
3✔
2200
                        // channel ID.
3✔
2201
                        if !isLinkUpdate {
6✔
2202
                                p.log.Errorf("Unknown channel ID: %v found "+
3✔
2203
                                        "in received msg=%s", targetChan,
3✔
2204
                                        nextMsg.MsgType())
3✔
2205
                        }
3✔
2206

2207
                case *lnwire.ChannelUpdate1,
2208
                        *lnwire.ChannelAnnouncement1,
2209
                        *lnwire.NodeAnnouncement,
2210
                        *lnwire.AnnounceSignatures1,
2211
                        *lnwire.GossipTimestampRange,
2212
                        *lnwire.QueryShortChanIDs,
2213
                        *lnwire.QueryChannelRange,
2214
                        *lnwire.ReplyChannelRange,
2215
                        *lnwire.ReplyShortChanIDsEnd:
3✔
2216

3✔
2217
                        discStream.AddMsg(msg)
3✔
2218

2219
                case *lnwire.Custom:
4✔
2220
                        err := p.handleCustomMessage(msg)
4✔
2221
                        if err != nil {
4✔
2222
                                p.storeError(err)
×
2223
                                p.log.Errorf("%v", err)
×
2224
                        }
×
2225

2226
                default:
×
2227
                        // If the message we received is unknown to us, store
×
2228
                        // the type to track the failure.
×
2229
                        err := fmt.Errorf("unknown message type %v received",
×
2230
                                uint16(msg.MsgType()))
×
2231
                        p.storeError(err)
×
2232

×
2233
                        p.log.Errorf("%v", err)
×
2234
                }
2235

2236
                if isLinkUpdate {
7✔
2237
                        // If this is a channel update, then we need to feed it
3✔
2238
                        // into the channel's in-order message stream.
3✔
2239
                        p.sendLinkUpdateMsg(targetChan, nextMsg)
3✔
2240
                }
3✔
2241

2242
                idleTimer.Reset(idleTimeout)
4✔
2243
        }
2244

2245
        p.Disconnect(errors.New("read handler closed"))
3✔
2246

3✔
2247
        p.log.Trace("readHandler for peer done")
3✔
2248
}
2249

2250
// handleCustomMessage handles the given custom message if a handler is
2251
// registered.
2252
func (p *Brontide) handleCustomMessage(msg *lnwire.Custom) error {
4✔
2253
        if p.cfg.HandleCustomMessage == nil {
4✔
2254
                return fmt.Errorf("no custom message handler for "+
×
2255
                        "message type %v", uint16(msg.MsgType()))
×
2256
        }
×
2257

2258
        return p.cfg.HandleCustomMessage(p.PubKey(), msg)
4✔
2259
}
2260

2261
// isLoadedFromDisk returns true if the provided channel ID is loaded from
2262
// disk.
2263
//
2264
// NOTE: only returns true for pending channels.
2265
func (p *Brontide) isLoadedFromDisk(chanID lnwire.ChannelID) bool {
3✔
2266
        // If this is a newly added channel, no need to reestablish.
3✔
2267
        _, added := p.addedChannels.Load(chanID)
3✔
2268
        if added {
6✔
2269
                return false
3✔
2270
        }
3✔
2271

2272
        // Return false if the channel is unknown.
2273
        channel, ok := p.activeChannels.Load(chanID)
3✔
2274
        if !ok {
3✔
2275
                return false
×
2276
        }
×
2277

2278
        // During startup, we will use a nil value to mark a pending channel
2279
        // that's loaded from disk.
2280
        return channel == nil
3✔
2281
}
2282

2283
// isActiveChannel returns true if the provided channel id is active, otherwise
2284
// returns false.
2285
func (p *Brontide) isActiveChannel(chanID lnwire.ChannelID) bool {
11✔
2286
        // The channel would be nil if,
11✔
2287
        // - the channel doesn't exist, or,
11✔
2288
        // - the channel exists, but is pending. In this case, we don't
11✔
2289
        //   consider this channel active.
11✔
2290
        channel, _ := p.activeChannels.Load(chanID)
11✔
2291

11✔
2292
        return channel != nil
11✔
2293
}
11✔
2294

2295
// isPendingChannel returns true if the provided channel ID is pending, and
2296
// returns false if the channel is active or unknown.
2297
func (p *Brontide) isPendingChannel(chanID lnwire.ChannelID) bool {
9✔
2298
        // Return false if the channel is unknown.
9✔
2299
        channel, ok := p.activeChannels.Load(chanID)
9✔
2300
        if !ok {
15✔
2301
                return false
6✔
2302
        }
6✔
2303

2304
        return channel == nil
6✔
2305
}
2306

2307
// hasChannel returns true if the peer has a pending/active channel specified
2308
// by the channel ID.
2309
func (p *Brontide) hasChannel(chanID lnwire.ChannelID) bool {
3✔
2310
        _, ok := p.activeChannels.Load(chanID)
3✔
2311
        return ok
3✔
2312
}
3✔
2313

2314
// storeError stores an error in our peer's buffer of recent errors with the
2315
// current timestamp. Errors are only stored if we have at least one active
2316
// channel with the peer to mitigate a dos vector where a peer costlessly
2317
// connects to us and spams us with errors.
2318
func (p *Brontide) storeError(err error) {
3✔
2319
        var haveChannels bool
3✔
2320

3✔
2321
        p.activeChannels.Range(func(_ lnwire.ChannelID,
3✔
2322
                channel *lnwallet.LightningChannel) bool {
6✔
2323

3✔
2324
                // Pending channels will be nil in the activeChannels map.
3✔
2325
                if channel == nil {
6✔
2326
                        // Return true to continue the iteration.
3✔
2327
                        return true
3✔
2328
                }
3✔
2329

2330
                haveChannels = true
3✔
2331

3✔
2332
                // Return false to break the iteration.
3✔
2333
                return false
3✔
2334
        })
2335

2336
        // If we do not have any active channels with the peer, we do not store
2337
        // errors as a dos mitigation.
2338
        if !haveChannels {
6✔
2339
                p.log.Trace("no channels with peer, not storing err")
3✔
2340
                return
3✔
2341
        }
3✔
2342

2343
        p.cfg.ErrorBuffer.Add(
3✔
2344
                &TimestampedError{Timestamp: time.Now(), Error: err},
3✔
2345
        )
3✔
2346
}
2347

2348
// handleWarningOrError processes a warning or error msg and returns true if
2349
// msg should be forwarded to the associated channel link. False is returned if
2350
// any necessary forwarding of msg was already handled by this method. If msg is
2351
// an error from a peer with an active channel, we'll store it in memory.
2352
//
2353
// NOTE: This method should only be called from within the readHandler.
2354
func (p *Brontide) handleWarningOrError(chanID lnwire.ChannelID,
2355
        msg lnwire.Message) bool {
3✔
2356

3✔
2357
        if errMsg, ok := msg.(*lnwire.Error); ok {
6✔
2358
                p.storeError(errMsg)
3✔
2359
        }
3✔
2360

2361
        switch {
3✔
2362
        // Connection wide messages should be forwarded to all channel links
2363
        // with this peer.
2364
        case chanID == lnwire.ConnectionWideID:
×
2365
                for _, chanStream := range p.activeMsgStreams {
×
2366
                        chanStream.AddMsg(msg)
×
2367
                }
×
2368

2369
                return false
×
2370

2371
        // If the channel ID for the message corresponds to a pending channel,
2372
        // then the funding manager will handle it.
2373
        case p.cfg.FundingManager.IsPendingChannel(chanID, p):
3✔
2374
                p.cfg.FundingManager.ProcessFundingMsg(msg, p)
3✔
2375
                return false
3✔
2376

2377
        // If not we hand the message to the channel link for this channel.
2378
        case p.isActiveChannel(chanID):
3✔
2379
                return true
3✔
2380

2381
        default:
3✔
2382
                return false
3✔
2383
        }
2384
}
2385

2386
// messageSummary returns a human-readable string that summarizes a
2387
// incoming/outgoing message. Not all messages will have a summary, only those
2388
// which have additional data that can be informative at a glance.
2389
func messageSummary(msg lnwire.Message) string {
3✔
2390
        switch msg := msg.(type) {
3✔
2391
        case *lnwire.Init:
3✔
2392
                // No summary.
3✔
2393
                return ""
3✔
2394

2395
        case *lnwire.OpenChannel:
3✔
2396
                return fmt.Sprintf("temp_chan_id=%x, chain=%v, csv=%v, amt=%v, "+
3✔
2397
                        "push_amt=%v, reserve=%v, flags=%v",
3✔
2398
                        msg.PendingChannelID[:], msg.ChainHash,
3✔
2399
                        msg.CsvDelay, msg.FundingAmount, msg.PushAmount,
3✔
2400
                        msg.ChannelReserve, msg.ChannelFlags)
3✔
2401

2402
        case *lnwire.AcceptChannel:
3✔
2403
                return fmt.Sprintf("temp_chan_id=%x, reserve=%v, csv=%v, num_confs=%v",
3✔
2404
                        msg.PendingChannelID[:], msg.ChannelReserve, msg.CsvDelay,
3✔
2405
                        msg.MinAcceptDepth)
3✔
2406

2407
        case *lnwire.FundingCreated:
3✔
2408
                return fmt.Sprintf("temp_chan_id=%x, chan_point=%v",
3✔
2409
                        msg.PendingChannelID[:], msg.FundingPoint)
3✔
2410

2411
        case *lnwire.FundingSigned:
3✔
2412
                return fmt.Sprintf("chan_id=%v", msg.ChanID)
3✔
2413

2414
        case *lnwire.ChannelReady:
3✔
2415
                return fmt.Sprintf("chan_id=%v, next_point=%x",
3✔
2416
                        msg.ChanID, msg.NextPerCommitmentPoint.SerializeCompressed())
3✔
2417

2418
        case *lnwire.Shutdown:
3✔
2419
                return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID,
3✔
2420
                        msg.Address[:])
3✔
2421

2422
        case *lnwire.ClosingComplete:
3✔
2423
                return fmt.Sprintf("chan_id=%v, fee_sat=%v, locktime=%v",
3✔
2424
                        msg.ChannelID, msg.FeeSatoshis, msg.LockTime)
3✔
2425

2426
        case *lnwire.ClosingSig:
3✔
2427
                return fmt.Sprintf("chan_id=%v", msg.ChannelID)
3✔
2428

2429
        case *lnwire.ClosingSigned:
3✔
2430
                return fmt.Sprintf("chan_id=%v, fee_sat=%v", msg.ChannelID,
3✔
2431
                        msg.FeeSatoshis)
3✔
2432

2433
        case *lnwire.UpdateAddHTLC:
3✔
2434
                var blindingPoint []byte
3✔
2435
                msg.BlindingPoint.WhenSome(
3✔
2436
                        func(b tlv.RecordT[lnwire.BlindingPointTlvType,
3✔
2437
                                *btcec.PublicKey]) {
6✔
2438

3✔
2439
                                blindingPoint = b.Val.SerializeCompressed()
3✔
2440
                        },
3✔
2441
                )
2442

2443
                return fmt.Sprintf("chan_id=%v, id=%v, amt=%v, expiry=%v, "+
3✔
2444
                        "hash=%x, blinding_point=%x, custom_records=%v",
3✔
2445
                        msg.ChanID, msg.ID, msg.Amount, msg.Expiry,
3✔
2446
                        msg.PaymentHash[:], blindingPoint, msg.CustomRecords)
3✔
2447

2448
        case *lnwire.UpdateFailHTLC:
3✔
2449
                return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
3✔
2450
                        msg.ID, msg.Reason)
3✔
2451

2452
        case *lnwire.UpdateFulfillHTLC:
3✔
2453
                return fmt.Sprintf("chan_id=%v, id=%v, preimage=%x, "+
3✔
2454
                        "custom_records=%v", msg.ChanID, msg.ID,
3✔
2455
                        msg.PaymentPreimage[:], msg.CustomRecords)
3✔
2456

2457
        case *lnwire.CommitSig:
3✔
2458
                return fmt.Sprintf("chan_id=%v, num_htlcs=%v", msg.ChanID,
3✔
2459
                        len(msg.HtlcSigs))
3✔
2460

2461
        case *lnwire.RevokeAndAck:
3✔
2462
                return fmt.Sprintf("chan_id=%v, rev=%x, next_point=%x",
3✔
2463
                        msg.ChanID, msg.Revocation[:],
3✔
2464
                        msg.NextRevocationKey.SerializeCompressed())
3✔
2465

2466
        case *lnwire.UpdateFailMalformedHTLC:
3✔
2467
                return fmt.Sprintf("chan_id=%v, id=%v, fail_code=%v",
3✔
2468
                        msg.ChanID, msg.ID, msg.FailureCode)
3✔
2469

2470
        case *lnwire.Warning:
×
2471
                return fmt.Sprintf("%v", msg.Warning())
×
2472

2473
        case *lnwire.Error:
3✔
2474
                return fmt.Sprintf("%v", msg.Error())
3✔
2475

2476
        case *lnwire.AnnounceSignatures1:
3✔
2477
                return fmt.Sprintf("chan_id=%v, short_chan_id=%v", msg.ChannelID,
3✔
2478
                        msg.ShortChannelID.ToUint64())
3✔
2479

2480
        case *lnwire.ChannelAnnouncement1:
3✔
2481
                return fmt.Sprintf("chain_hash=%v, short_chan_id=%v",
3✔
2482
                        msg.ChainHash, msg.ShortChannelID.ToUint64())
3✔
2483

2484
        case *lnwire.ChannelUpdate1:
3✔
2485
                return fmt.Sprintf("chain_hash=%v, short_chan_id=%v, "+
3✔
2486
                        "mflags=%v, cflags=%v, update_time=%v", msg.ChainHash,
3✔
2487
                        msg.ShortChannelID.ToUint64(), msg.MessageFlags,
3✔
2488
                        msg.ChannelFlags, time.Unix(int64(msg.Timestamp), 0))
3✔
2489

2490
        case *lnwire.NodeAnnouncement:
3✔
2491
                return fmt.Sprintf("node=%x, update_time=%v",
3✔
2492
                        msg.NodeID, time.Unix(int64(msg.Timestamp), 0))
3✔
2493

2494
        case *lnwire.Ping:
×
2495
                return fmt.Sprintf("ping_bytes=%x", msg.PaddingBytes[:])
×
2496

2497
        case *lnwire.Pong:
×
2498
                return fmt.Sprintf("len(pong_bytes)=%d", len(msg.PongBytes[:]))
×
2499

2500
        case *lnwire.UpdateFee:
×
2501
                return fmt.Sprintf("chan_id=%v, fee_update_sat=%v",
×
2502
                        msg.ChanID, int64(msg.FeePerKw))
×
2503

2504
        case *lnwire.ChannelReestablish:
3✔
2505
                return fmt.Sprintf("chan_id=%v, next_local_height=%v, "+
3✔
2506
                        "remote_tail_height=%v", msg.ChanID,
3✔
2507
                        msg.NextLocalCommitHeight, msg.RemoteCommitTailHeight)
3✔
2508

2509
        case *lnwire.ReplyShortChanIDsEnd:
3✔
2510
                return fmt.Sprintf("chain_hash=%v, complete=%v", msg.ChainHash,
3✔
2511
                        msg.Complete)
3✔
2512

2513
        case *lnwire.ReplyChannelRange:
3✔
2514
                return fmt.Sprintf("start_height=%v, end_height=%v, "+
3✔
2515
                        "num_chans=%v, encoding=%v", msg.FirstBlockHeight,
3✔
2516
                        msg.LastBlockHeight(), len(msg.ShortChanIDs),
3✔
2517
                        msg.EncodingType)
3✔
2518

2519
        case *lnwire.QueryShortChanIDs:
3✔
2520
                return fmt.Sprintf("chain_hash=%v, encoding=%v, num_chans=%v",
3✔
2521
                        msg.ChainHash, msg.EncodingType, len(msg.ShortChanIDs))
3✔
2522

2523
        case *lnwire.QueryChannelRange:
3✔
2524
                return fmt.Sprintf("chain_hash=%v, start_height=%v, "+
3✔
2525
                        "end_height=%v", msg.ChainHash, msg.FirstBlockHeight,
3✔
2526
                        msg.LastBlockHeight())
3✔
2527

2528
        case *lnwire.GossipTimestampRange:
3✔
2529
                return fmt.Sprintf("chain_hash=%v, first_stamp=%v, "+
3✔
2530
                        "stamp_range=%v", msg.ChainHash,
3✔
2531
                        time.Unix(int64(msg.FirstTimestamp), 0),
3✔
2532
                        msg.TimestampRange)
3✔
2533

2534
        case *lnwire.Stfu:
3✔
2535
                return fmt.Sprintf("chan_id=%v, initiator=%v", msg.ChanID,
3✔
2536
                        msg.Initiator)
3✔
2537

2538
        case *lnwire.Custom:
3✔
2539
                return fmt.Sprintf("type=%d", msg.Type)
3✔
2540
        }
2541

2542
        return fmt.Sprintf("unknown msg type=%T", msg)
×
2543
}
2544

2545
// logWireMessage logs the receipt or sending of particular wire message. This
2546
// function is used rather than just logging the message in order to produce
2547
// less spammy log messages in trace mode by setting the 'Curve" parameter to
2548
// nil. Doing this avoids printing out each of the field elements in the curve
2549
// parameters for secp256k1.
2550
func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
20✔
2551
        summaryPrefix := "Received"
20✔
2552
        if !read {
36✔
2553
                summaryPrefix = "Sending"
16✔
2554
        }
16✔
2555

2556
        p.log.Debugf("%v", lnutils.NewLogClosure(func() string {
23✔
2557
                // Debug summary of message.
3✔
2558
                summary := messageSummary(msg)
3✔
2559
                if len(summary) > 0 {
6✔
2560
                        summary = "(" + summary + ")"
3✔
2561
                }
3✔
2562

2563
                preposition := "to"
3✔
2564
                if read {
6✔
2565
                        preposition = "from"
3✔
2566
                }
3✔
2567

2568
                var msgType string
3✔
2569
                if msg.MsgType() < lnwire.CustomTypeStart {
6✔
2570
                        msgType = msg.MsgType().String()
3✔
2571
                } else {
6✔
2572
                        msgType = "custom"
3✔
2573
                }
3✔
2574

2575
                return fmt.Sprintf("%v %v%s %v %s", summaryPrefix,
3✔
2576
                        msgType, summary, preposition, p)
3✔
2577
        }))
2578

2579
        prefix := "readMessage from peer"
20✔
2580
        if !read {
36✔
2581
                prefix = "writeMessage to peer"
16✔
2582
        }
16✔
2583

2584
        p.log.Tracef(prefix+": %v", lnutils.SpewLogClosure(msg))
20✔
2585
}
2586

2587
// writeMessage writes and flushes the target lnwire.Message to the remote peer.
2588
// If the passed message is nil, this method will only try to flush an existing
2589
// message buffered on the connection. It is safe to call this method again
2590
// with a nil message iff a timeout error is returned. This will continue to
2591
// flush the pending message to the wire.
2592
//
2593
// NOTE:
2594
// Besides its usage in Start, this function should not be used elsewhere
2595
// except in writeHandler. If multiple goroutines call writeMessage at the same
2596
// time, panics can occur because WriteMessage and Flush don't use any locking
2597
// internally.
2598
func (p *Brontide) writeMessage(msg lnwire.Message) error {
16✔
2599
        // Only log the message on the first attempt.
16✔
2600
        if msg != nil {
32✔
2601
                p.logWireMessage(msg, false)
16✔
2602
        }
16✔
2603

2604
        noiseConn := p.cfg.Conn
16✔
2605

16✔
2606
        flushMsg := func() error {
32✔
2607
                // Ensure the write deadline is set before we attempt to send
16✔
2608
                // the message.
16✔
2609
                writeDeadline := time.Now().Add(
16✔
2610
                        p.scaleTimeout(writeMessageTimeout),
16✔
2611
                )
16✔
2612
                err := noiseConn.SetWriteDeadline(writeDeadline)
16✔
2613
                if err != nil {
16✔
2614
                        return err
×
2615
                }
×
2616

2617
                // Flush the pending message to the wire. If an error is
2618
                // encountered, e.g. write timeout, the number of bytes written
2619
                // so far will be returned.
2620
                n, err := noiseConn.Flush()
16✔
2621

16✔
2622
                // Record the number of bytes written on the wire, if any.
16✔
2623
                if n > 0 {
19✔
2624
                        atomic.AddUint64(&p.bytesSent, uint64(n))
3✔
2625
                }
3✔
2626

2627
                return err
16✔
2628
        }
2629

2630
        // If the current message has already been serialized, encrypted, and
2631
        // buffered on the underlying connection we will skip straight to
2632
        // flushing it to the wire.
2633
        if msg == nil {
16✔
2634
                return flushMsg()
×
2635
        }
×
2636

2637
        // Otherwise, this is a new message. We'll acquire a write buffer to
2638
        // serialize the message and buffer the ciphertext on the connection.
2639
        err := p.cfg.WritePool.Submit(func(buf *bytes.Buffer) error {
32✔
2640
                // Using a buffer allocated by the write pool, encode the
16✔
2641
                // message directly into the buffer.
16✔
2642
                _, writeErr := lnwire.WriteMessage(buf, msg, 0)
16✔
2643
                if writeErr != nil {
16✔
2644
                        return writeErr
×
2645
                }
×
2646

2647
                // Finally, write the message itself in a single swoop. This
2648
                // will buffer the ciphertext on the underlying connection. We
2649
                // will defer flushing the message until the write pool has been
2650
                // released.
2651
                return noiseConn.WriteMessage(buf.Bytes())
16✔
2652
        })
2653
        if err != nil {
16✔
2654
                return err
×
2655
        }
×
2656

2657
        return flushMsg()
16✔
2658
}
2659

2660
// writeHandler is a goroutine dedicated to reading messages off of an incoming
2661
// queue, and writing them out to the wire. This goroutine coordinates with the
2662
// queueHandler in order to ensure the incoming message queue is quickly
2663
// drained.
2664
//
2665
// NOTE: This method MUST be run as a goroutine.
2666
func (p *Brontide) writeHandler() {
6✔
2667
        // We'll stop the timer after a new messages is sent, and also reset it
6✔
2668
        // after we process the next message.
6✔
2669
        idleTimer := time.AfterFunc(idleTimeout, func() {
6✔
2670
                err := fmt.Errorf("peer %s no write for %s -- disconnecting",
×
2671
                        p, idleTimeout)
×
2672
                p.Disconnect(err)
×
2673
        })
×
2674

2675
        var exitErr error
6✔
2676

6✔
2677
out:
6✔
2678
        for {
16✔
2679
                select {
10✔
2680
                case outMsg := <-p.sendQueue:
7✔
2681
                        // Record the time at which we first attempt to send the
7✔
2682
                        // message.
7✔
2683
                        startTime := time.Now()
7✔
2684

7✔
2685
                retry:
7✔
2686
                        // Write out the message to the socket. If a timeout
2687
                        // error is encountered, we will catch this and retry
2688
                        // after backing off in case the remote peer is just
2689
                        // slow to process messages from the wire.
2690
                        err := p.writeMessage(outMsg.msg)
7✔
2691
                        if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
7✔
2692
                                p.log.Debugf("Write timeout detected for "+
×
2693
                                        "peer, first write for message "+
×
2694
                                        "attempted %v ago",
×
2695
                                        time.Since(startTime))
×
2696

×
2697
                                // If we received a timeout error, this implies
×
2698
                                // that the message was buffered on the
×
2699
                                // connection successfully and that a flush was
×
2700
                                // attempted. We'll set the message to nil so
×
2701
                                // that on a subsequent pass we only try to
×
2702
                                // flush the buffered message, and forgo
×
2703
                                // reserializing or reencrypting it.
×
2704
                                outMsg.msg = nil
×
2705

×
2706
                                goto retry
×
2707
                        }
2708

2709
                        // The write succeeded, reset the idle timer to prevent
2710
                        // us from disconnecting the peer.
2711
                        if !idleTimer.Stop() {
7✔
2712
                                select {
×
2713
                                case <-idleTimer.C:
×
2714
                                default:
×
2715
                                }
2716
                        }
2717
                        idleTimer.Reset(idleTimeout)
7✔
2718

7✔
2719
                        // If the peer requested a synchronous write, respond
7✔
2720
                        // with the error.
7✔
2721
                        if outMsg.errChan != nil {
11✔
2722
                                outMsg.errChan <- err
4✔
2723
                        }
4✔
2724

2725
                        if err != nil {
7✔
2726
                                exitErr = fmt.Errorf("unable to write "+
×
2727
                                        "message: %v", err)
×
2728
                                break out
×
2729
                        }
2730

2731
                case <-p.cg.Done():
3✔
2732
                        exitErr = lnpeer.ErrPeerExiting
3✔
2733
                        break out
3✔
2734
                }
2735
        }
2736

2737
        // Avoid an exit deadlock by ensuring WaitGroups are decremented before
2738
        // disconnect.
2739
        p.cg.WgDone()
3✔
2740

3✔
2741
        p.Disconnect(exitErr)
3✔
2742

3✔
2743
        p.log.Trace("writeHandler for peer done")
3✔
2744
}
2745

2746
// queueHandler is responsible for accepting messages from outside subsystems
2747
// to be eventually sent out on the wire by the writeHandler.
2748
//
2749
// NOTE: This method MUST be run as a goroutine.
2750
func (p *Brontide) queueHandler() {
6✔
2751
        defer p.cg.WgDone()
6✔
2752

6✔
2753
        // priorityMsgs holds an in order list of messages deemed high-priority
6✔
2754
        // to be added to the sendQueue. This predominately includes messages
6✔
2755
        // from the funding manager and htlcswitch.
6✔
2756
        priorityMsgs := list.New()
6✔
2757

6✔
2758
        // lazyMsgs holds an in order list of messages deemed low-priority to be
6✔
2759
        // added to the sendQueue only after all high-priority messages have
6✔
2760
        // been queued. This predominately includes messages from the gossiper.
6✔
2761
        lazyMsgs := list.New()
6✔
2762

6✔
2763
        for {
20✔
2764
                // Examine the front of the priority queue, if it is empty check
14✔
2765
                // the low priority queue.
14✔
2766
                elem := priorityMsgs.Front()
14✔
2767
                if elem == nil {
25✔
2768
                        elem = lazyMsgs.Front()
11✔
2769
                }
11✔
2770

2771
                if elem != nil {
21✔
2772
                        front := elem.Value.(outgoingMsg)
7✔
2773

7✔
2774
                        // There's an element on the queue, try adding
7✔
2775
                        // it to the sendQueue. We also watch for
7✔
2776
                        // messages on the outgoingQueue, in case the
7✔
2777
                        // writeHandler cannot accept messages on the
7✔
2778
                        // sendQueue.
7✔
2779
                        select {
7✔
2780
                        case p.sendQueue <- front:
7✔
2781
                                if front.priority {
13✔
2782
                                        priorityMsgs.Remove(elem)
6✔
2783
                                } else {
10✔
2784
                                        lazyMsgs.Remove(elem)
4✔
2785
                                }
4✔
2786
                        case msg := <-p.outgoingQueue:
3✔
2787
                                if msg.priority {
6✔
2788
                                        priorityMsgs.PushBack(msg)
3✔
2789
                                } else {
6✔
2790
                                        lazyMsgs.PushBack(msg)
3✔
2791
                                }
3✔
2792
                        case <-p.cg.Done():
×
2793
                                return
×
2794
                        }
2795
                } else {
10✔
2796
                        // If there weren't any messages to send to the
10✔
2797
                        // writeHandler, then we'll accept a new message
10✔
2798
                        // into the queue from outside sub-systems.
10✔
2799
                        select {
10✔
2800
                        case msg := <-p.outgoingQueue:
7✔
2801
                                if msg.priority {
13✔
2802
                                        priorityMsgs.PushBack(msg)
6✔
2803
                                } else {
10✔
2804
                                        lazyMsgs.PushBack(msg)
4✔
2805
                                }
4✔
2806
                        case <-p.cg.Done():
3✔
2807
                                return
3✔
2808
                        }
2809
                }
2810
        }
2811
}
2812

2813
// PingTime returns the estimated ping time to the peer in microseconds.
2814
func (p *Brontide) PingTime() int64 {
3✔
2815
        return p.pingManager.GetPingTimeMicroSeconds()
3✔
2816
}
3✔
2817

2818
// queueMsg adds the lnwire.Message to the back of the high priority send queue.
2819
// If the errChan is non-nil, an error is sent back if the msg failed to queue
2820
// or failed to write, and nil otherwise.
2821
func (p *Brontide) queueMsg(msg lnwire.Message, errChan chan error) {
28✔
2822
        p.queue(true, msg, errChan)
28✔
2823
}
28✔
2824

2825
// queueMsgLazy adds the lnwire.Message to the back of the low priority send
2826
// queue. If the errChan is non-nil, an error is sent back if the msg failed to
2827
// queue or failed to write, and nil otherwise.
2828
func (p *Brontide) queueMsgLazy(msg lnwire.Message, errChan chan error) {
4✔
2829
        p.queue(false, msg, errChan)
4✔
2830
}
4✔
2831

2832
// queue sends a given message to the queueHandler using the passed priority. If
2833
// the errChan is non-nil, an error is sent back if the msg failed to queue or
2834
// failed to write, and nil otherwise.
2835
func (p *Brontide) queue(priority bool, msg lnwire.Message,
2836
        errChan chan error) {
29✔
2837

29✔
2838
        select {
29✔
2839
        case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
28✔
2840
        case <-p.cg.Done():
×
2841
                p.log.Tracef("Peer shutting down, could not enqueue msg: %v.",
×
2842
                        spew.Sdump(msg))
×
2843
                if errChan != nil {
×
2844
                        errChan <- lnpeer.ErrPeerExiting
×
2845
                }
×
2846
        }
2847
}
2848

2849
// ChannelSnapshots returns a slice of channel snapshots detailing all
2850
// currently active channels maintained with the remote peer.
2851
func (p *Brontide) ChannelSnapshots() []*channeldb.ChannelSnapshot {
3✔
2852
        snapshots := make(
3✔
2853
                []*channeldb.ChannelSnapshot, 0, p.activeChannels.Len(),
3✔
2854
        )
3✔
2855

3✔
2856
        p.activeChannels.ForEach(func(_ lnwire.ChannelID,
3✔
2857
                activeChan *lnwallet.LightningChannel) error {
6✔
2858

3✔
2859
                // If the activeChan is nil, then we skip it as the channel is
3✔
2860
                // pending.
3✔
2861
                if activeChan == nil {
6✔
2862
                        return nil
3✔
2863
                }
3✔
2864

2865
                // We'll only return a snapshot for channels that are
2866
                // *immediately* available for routing payments over.
2867
                if activeChan.RemoteNextRevocation() == nil {
6✔
2868
                        return nil
3✔
2869
                }
3✔
2870

2871
                snapshot := activeChan.StateSnapshot()
3✔
2872
                snapshots = append(snapshots, snapshot)
3✔
2873

3✔
2874
                return nil
3✔
2875
        })
2876

2877
        return snapshots
3✔
2878
}
2879

2880
// genDeliveryScript returns a new script to be used to send our funds to in
2881
// the case of a cooperative channel close negotiation.
2882
func (p *Brontide) genDeliveryScript() ([]byte, error) {
9✔
2883
        // We'll send a normal p2wkh address unless we've negotiated the
9✔
2884
        // shutdown-any-segwit feature.
9✔
2885
        addrType := lnwallet.WitnessPubKey
9✔
2886
        if p.taprootShutdownAllowed() {
12✔
2887
                addrType = lnwallet.TaprootPubkey
3✔
2888
        }
3✔
2889

2890
        deliveryAddr, err := p.cfg.Wallet.NewAddress(
9✔
2891
                addrType, false, lnwallet.DefaultAccountName,
9✔
2892
        )
9✔
2893
        if err != nil {
9✔
2894
                return nil, err
×
2895
        }
×
2896
        p.log.Infof("Delivery addr for channel close: %v",
9✔
2897
                deliveryAddr)
9✔
2898

9✔
2899
        return txscript.PayToAddrScript(deliveryAddr)
9✔
2900
}
2901

2902
// channelManager is goroutine dedicated to handling all requests/signals
2903
// pertaining to the opening, cooperative closing, and force closing of all
2904
// channels maintained with the remote peer.
2905
//
2906
// NOTE: This method MUST be run as a goroutine.
2907
func (p *Brontide) channelManager() {
20✔
2908
        defer p.cg.WgDone()
20✔
2909

20✔
2910
        // reenableTimeout will fire once after the configured channel status
20✔
2911
        // interval has elapsed. This will trigger us to sign new channel
20✔
2912
        // updates and broadcast them with the "disabled" flag unset.
20✔
2913
        reenableTimeout := time.After(p.cfg.ChanActiveTimeout)
20✔
2914

20✔
2915
out:
20✔
2916
        for {
61✔
2917
                select {
41✔
2918
                // A new pending channel has arrived which means we are about
2919
                // to complete a funding workflow and is waiting for the final
2920
                // `ChannelReady` messages to be exchanged. We will add this
2921
                // channel to the `activeChannels` with a nil value to indicate
2922
                // this is a pending channel.
2923
                case req := <-p.newPendingChannel:
4✔
2924
                        p.handleNewPendingChannel(req)
4✔
2925

2926
                // A new channel has arrived which means we've just completed a
2927
                // funding workflow. We'll initialize the necessary local
2928
                // state, and notify the htlc switch of a new link.
2929
                case req := <-p.newActiveChannel:
3✔
2930
                        p.handleNewActiveChannel(req)
3✔
2931

2932
                // The funding flow for a pending channel is failed, we will
2933
                // remove it from Brontide.
2934
                case req := <-p.removePendingChannel:
4✔
2935
                        p.handleRemovePendingChannel(req)
4✔
2936

2937
                // We've just received a local request to close an active
2938
                // channel. It will either kick of a cooperative channel
2939
                // closure negotiation, or be a notification of a breached
2940
                // contract that should be abandoned.
2941
                case req := <-p.localCloseChanReqs:
10✔
2942
                        p.handleLocalCloseReq(req)
10✔
2943

2944
                // We've received a link failure from a link that was added to
2945
                // the switch. This will initiate the teardown of the link, and
2946
                // initiate any on-chain closures if necessary.
2947
                case failure := <-p.linkFailures:
3✔
2948
                        p.handleLinkFailure(failure)
3✔
2949

2950
                // We've received a new cooperative channel closure related
2951
                // message from the remote peer, we'll use this message to
2952
                // advance the chan closer state machine.
2953
                case closeMsg := <-p.chanCloseMsgs:
16✔
2954
                        p.handleCloseMsg(closeMsg)
16✔
2955

2956
                // The channel reannounce delay has elapsed, broadcast the
2957
                // reenabled channel updates to the network. This should only
2958
                // fire once, so we set the reenableTimeout channel to nil to
2959
                // mark it for garbage collection. If the peer is torn down
2960
                // before firing, reenabling will not be attempted.
2961
                // TODO(conner): consolidate reenables timers inside chan status
2962
                // manager
2963
                case <-reenableTimeout:
3✔
2964
                        p.reenableActiveChannels()
3✔
2965

3✔
2966
                        // Since this channel will never fire again during the
3✔
2967
                        // lifecycle of the peer, we nil the channel to mark it
3✔
2968
                        // eligible for garbage collection, and make this
3✔
2969
                        // explicitly ineligible to receive in future calls to
3✔
2970
                        // select. This also shaves a few CPU cycles since the
3✔
2971
                        // select will ignore this case entirely.
3✔
2972
                        reenableTimeout = nil
3✔
2973

3✔
2974
                        // Once the reenabling is attempted, we also cancel the
3✔
2975
                        // channel event subscription to free up the overflow
3✔
2976
                        // queue used in channel notifier.
3✔
2977
                        //
3✔
2978
                        // NOTE: channelEventClient will be nil if the
3✔
2979
                        // reenableTimeout is greater than 1 minute.
3✔
2980
                        if p.channelEventClient != nil {
6✔
2981
                                p.channelEventClient.Cancel()
3✔
2982
                        }
3✔
2983

2984
                case <-p.cg.Done():
3✔
2985
                        // As, we've been signalled to exit, we'll reset all
3✔
2986
                        // our active channel back to their default state.
3✔
2987
                        p.activeChannels.ForEach(func(_ lnwire.ChannelID,
3✔
2988
                                lc *lnwallet.LightningChannel) error {
6✔
2989

3✔
2990
                                // Exit if the channel is nil as it's a pending
3✔
2991
                                // channel.
3✔
2992
                                if lc == nil {
6✔
2993
                                        return nil
3✔
2994
                                }
3✔
2995

2996
                                lc.ResetState()
3✔
2997

3✔
2998
                                return nil
3✔
2999
                        })
3000

3001
                        break out
3✔
3002
                }
3003
        }
3004
}
3005

3006
// reenableActiveChannels searches the index of channels maintained with this
3007
// peer, and reenables each public, non-pending channel. This is done at the
3008
// gossip level by broadcasting a new ChannelUpdate with the disabled bit unset.
3009
// No message will be sent if the channel is already enabled.
3010
func (p *Brontide) reenableActiveChannels() {
3✔
3011
        // First, filter all known channels with this peer for ones that are
3✔
3012
        // both public and not pending.
3✔
3013
        activePublicChans := p.filterChannelsToEnable()
3✔
3014

3✔
3015
        // Create a map to hold channels that needs to be retried.
3✔
3016
        retryChans := make(map[wire.OutPoint]struct{}, len(activePublicChans))
3✔
3017

3✔
3018
        // For each of the public, non-pending channels, set the channel
3✔
3019
        // disabled bit to false and send out a new ChannelUpdate. If this
3✔
3020
        // channel is already active, the update won't be sent.
3✔
3021
        for _, chanPoint := range activePublicChans {
6✔
3022
                err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
3✔
3023

3✔
3024
                switch {
3✔
3025
                // No error occurred, continue to request the next channel.
3026
                case err == nil:
3✔
3027
                        continue
3✔
3028

3029
                // Cannot auto enable a manually disabled channel so we do
3030
                // nothing but proceed to the next channel.
3031
                case errors.Is(err, netann.ErrEnableManuallyDisabledChan):
3✔
3032
                        p.log.Debugf("Channel(%v) was manually disabled, "+
3✔
3033
                                "ignoring automatic enable request", chanPoint)
3✔
3034

3✔
3035
                        continue
3✔
3036

3037
                // If the channel is reported as inactive, we will give it
3038
                // another chance. When handling the request, ChanStatusManager
3039
                // will check whether the link is active or not. One of the
3040
                // conditions is whether the link has been marked as
3041
                // reestablished, which happens inside a goroutine(htlcManager)
3042
                // after the link is started. And we may get a false negative
3043
                // saying the link is not active because that goroutine hasn't
3044
                // reached the line to mark the reestablishment. Thus we give
3045
                // it a second chance to send the request.
3046
                case errors.Is(err, netann.ErrEnableInactiveChan):
×
3047
                        // If we don't have a client created, it means we
×
3048
                        // shouldn't retry enabling the channel.
×
3049
                        if p.channelEventClient == nil {
×
3050
                                p.log.Errorf("Channel(%v) request enabling "+
×
3051
                                        "failed due to inactive link",
×
3052
                                        chanPoint)
×
3053

×
3054
                                continue
×
3055
                        }
3056

3057
                        p.log.Warnf("Channel(%v) cannot be enabled as " +
×
3058
                                "ChanStatusManager reported inactive, retrying")
×
3059

×
3060
                        // Add the channel to the retry map.
×
3061
                        retryChans[chanPoint] = struct{}{}
×
3062
                }
3063
        }
3064

3065
        // Retry the channels if we have any.
3066
        if len(retryChans) != 0 {
3✔
3067
                p.retryRequestEnable(retryChans)
×
3068
        }
×
3069
}
3070

3071
// fetchActiveChanCloser attempts to fetch the active chan closer state machine
3072
// for the target channel ID. If the channel isn't active an error is returned.
3073
// Otherwise, either an existing state machine will be returned, or a new one
3074
// will be created.
3075
func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
3076
        *chanCloserFsm, error) {
16✔
3077

16✔
3078
        chanCloser, found := p.activeChanCloses.Load(chanID)
16✔
3079
        if found {
29✔
3080
                // An entry will only be found if the closer has already been
13✔
3081
                // created for a non-pending channel or for a channel that had
13✔
3082
                // previously started the shutdown process but the connection
13✔
3083
                // was restarted.
13✔
3084
                return &chanCloser, nil
13✔
3085
        }
13✔
3086

3087
        // First, we'll ensure that we actually know of the target channel. If
3088
        // not, we'll ignore this message.
3089
        channel, ok := p.activeChannels.Load(chanID)
6✔
3090

6✔
3091
        // If the channel isn't in the map or the channel is nil, return
6✔
3092
        // ErrChannelNotFound as the channel is pending.
6✔
3093
        if !ok || channel == nil {
9✔
3094
                return nil, ErrChannelNotFound
3✔
3095
        }
3✔
3096

3097
        // We'll create a valid closing state machine in order to respond to
3098
        // the initiated cooperative channel closure. First, we set the
3099
        // delivery script that our funds will be paid out to. If an upfront
3100
        // shutdown script was set, we will use it. Otherwise, we get a fresh
3101
        // delivery script.
3102
        //
3103
        // TODO: Expose option to allow upfront shutdown script from watch-only
3104
        // accounts.
3105
        deliveryScript := channel.LocalUpfrontShutdownScript()
6✔
3106
        if len(deliveryScript) == 0 {
12✔
3107
                var err error
6✔
3108
                deliveryScript, err = p.genDeliveryScript()
6✔
3109
                if err != nil {
6✔
3110
                        p.log.Errorf("unable to gen delivery script: %v",
×
3111
                                err)
×
3112
                        return nil, fmt.Errorf("close addr unavailable")
×
3113
                }
×
3114
        }
3115

3116
        // In order to begin fee negotiations, we'll first compute our target
3117
        // ideal fee-per-kw.
3118
        feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
6✔
3119
                p.cfg.CoopCloseTargetConfs,
6✔
3120
        )
6✔
3121
        if err != nil {
6✔
3122
                p.log.Errorf("unable to query fee estimator: %v", err)
×
3123
                return nil, fmt.Errorf("unable to estimate fee")
×
3124
        }
×
3125

3126
        addr, err := p.addrWithInternalKey(deliveryScript)
6✔
3127
        if err != nil {
6✔
3128
                return nil, fmt.Errorf("unable to parse addr: %w", err)
×
3129
        }
×
3130
        negotiateChanCloser, err := p.createChanCloser(
6✔
3131
                channel, addr, feePerKw, nil, lntypes.Remote,
6✔
3132
        )
6✔
3133
        if err != nil {
6✔
3134
                p.log.Errorf("unable to create chan closer: %v", err)
×
3135
                return nil, fmt.Errorf("unable to create chan closer")
×
3136
        }
×
3137

3138
        chanCloser = makeNegotiateCloser(negotiateChanCloser)
6✔
3139

6✔
3140
        p.activeChanCloses.Store(chanID, chanCloser)
6✔
3141

6✔
3142
        return &chanCloser, nil
6✔
3143
}
3144

3145
// filterChannelsToEnable filters a list of channels to be enabled upon start.
3146
// The filtered channels are active channels that's neither private nor
3147
// pending.
3148
func (p *Brontide) filterChannelsToEnable() []wire.OutPoint {
3✔
3149
        var activePublicChans []wire.OutPoint
3✔
3150

3✔
3151
        p.activeChannels.Range(func(chanID lnwire.ChannelID,
3✔
3152
                lnChan *lnwallet.LightningChannel) bool {
6✔
3153

3✔
3154
                // If the lnChan is nil, continue as this is a pending channel.
3✔
3155
                if lnChan == nil {
5✔
3156
                        return true
2✔
3157
                }
2✔
3158

3159
                dbChan := lnChan.State()
3✔
3160
                isPublic := dbChan.ChannelFlags&lnwire.FFAnnounceChannel != 0
3✔
3161
                if !isPublic || dbChan.IsPending {
3✔
3162
                        return true
×
3163
                }
×
3164

3165
                // We'll also skip any channels added during this peer's
3166
                // lifecycle since they haven't waited out the timeout. Their
3167
                // first announcement will be enabled, and the chan status
3168
                // manager will begin monitoring them passively since they exist
3169
                // in the database.
3170
                if _, ok := p.addedChannels.Load(chanID); ok {
3✔
UNCOV
3171
                        return true
×
UNCOV
3172
                }
×
3173

3174
                activePublicChans = append(
3✔
3175
                        activePublicChans, dbChan.FundingOutpoint,
3✔
3176
                )
3✔
3177

3✔
3178
                return true
3✔
3179
        })
3180

3181
        return activePublicChans
3✔
3182
}
3183

3184
// retryRequestEnable takes a map of channel outpoints and a channel event
3185
// client. It listens to the channel events and removes a channel from the map
3186
// if it's matched to the event. Upon receiving an active channel event, it
3187
// will send the enabling request again.
3188
func (p *Brontide) retryRequestEnable(activeChans map[wire.OutPoint]struct{}) {
×
3189
        p.log.Debugf("Retry enabling %v channels", len(activeChans))
×
3190

×
3191
        // retryEnable is a helper closure that sends an enable request and
×
3192
        // removes the channel from the map if it's matched.
×
3193
        retryEnable := func(chanPoint wire.OutPoint) error {
×
3194
                // If this is an active channel event, check whether it's in
×
3195
                // our targeted channels map.
×
3196
                _, found := activeChans[chanPoint]
×
3197

×
3198
                // If this channel is irrelevant, return nil so the loop can
×
3199
                // jump to next iteration.
×
3200
                if !found {
×
3201
                        return nil
×
3202
                }
×
3203

3204
                // Otherwise we've just received an active signal for a channel
3205
                // that's previously failed to be enabled, we send the request
3206
                // again.
3207
                //
3208
                // We only give the channel one more shot, so we delete it from
3209
                // our map first to keep it from being attempted again.
3210
                delete(activeChans, chanPoint)
×
3211

×
3212
                // Send the request.
×
3213
                err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
×
3214
                if err != nil {
×
3215
                        return fmt.Errorf("request enabling channel %v "+
×
3216
                                "failed: %w", chanPoint, err)
×
3217
                }
×
3218

3219
                return nil
×
3220
        }
3221

3222
        for {
×
3223
                // If activeChans is empty, we've done processing all the
×
3224
                // channels.
×
3225
                if len(activeChans) == 0 {
×
3226
                        p.log.Debug("Finished retry enabling channels")
×
3227
                        return
×
3228
                }
×
3229

3230
                select {
×
3231
                // A new event has been sent by the ChannelNotifier. We now
3232
                // check whether it's an active or inactive channel event.
3233
                case e := <-p.channelEventClient.Updates():
×
3234
                        // If this is an active channel event, try enable the
×
3235
                        // channel then jump to the next iteration.
×
3236
                        active, ok := e.(channelnotifier.ActiveChannelEvent)
×
3237
                        if ok {
×
3238
                                chanPoint := *active.ChannelPoint
×
3239

×
3240
                                // If we received an error for this particular
×
3241
                                // channel, we log an error and won't quit as
×
3242
                                // we still want to retry other channels.
×
3243
                                if err := retryEnable(chanPoint); err != nil {
×
3244
                                        p.log.Errorf("Retry failed: %v", err)
×
3245
                                }
×
3246

3247
                                continue
×
3248
                        }
3249

3250
                        // Otherwise check for inactive link event, and jump to
3251
                        // next iteration if it's not.
3252
                        inactive, ok := e.(channelnotifier.InactiveLinkEvent)
×
3253
                        if !ok {
×
3254
                                continue
×
3255
                        }
3256

3257
                        // Found an inactive link event, if this is our
3258
                        // targeted channel, remove it from our map.
3259
                        chanPoint := *inactive.ChannelPoint
×
3260
                        _, found := activeChans[chanPoint]
×
3261
                        if !found {
×
3262
                                continue
×
3263
                        }
3264

3265
                        delete(activeChans, chanPoint)
×
3266
                        p.log.Warnf("Re-enable channel %v failed, received "+
×
3267
                                "inactive link event", chanPoint)
×
3268

3269
                case <-p.cg.Done():
×
3270
                        p.log.Debugf("Peer shutdown during retry enabling")
×
3271
                        return
×
3272
                }
3273
        }
3274
}
3275

3276
// chooseDeliveryScript takes two optionally set shutdown scripts and returns
3277
// a suitable script to close out to. This may be nil if neither script is
3278
// set. If both scripts are set, this function will error if they do not match.
3279
func chooseDeliveryScript(upfront, requested lnwire.DeliveryAddress,
3280
        genDeliveryScript func() ([]byte, error),
3281
) (lnwire.DeliveryAddress, error) {
15✔
3282

15✔
3283
        switch {
15✔
3284
        // If no script was provided, then we'll generate a new delivery script.
3285
        case len(upfront) == 0 && len(requested) == 0:
7✔
3286
                return genDeliveryScript()
7✔
3287

3288
        // If no upfront shutdown script was provided, return the user
3289
        // requested address (which may be nil).
3290
        case len(upfront) == 0:
5✔
3291
                return requested, nil
5✔
3292

3293
        // If an upfront shutdown script was provided, and the user did not
3294
        // request a custom shutdown script, return the upfront address.
3295
        case len(requested) == 0:
5✔
3296
                return upfront, nil
5✔
3297

3298
        // If both an upfront shutdown script and a custom close script were
3299
        // provided, error if the user provided shutdown script does not match
3300
        // the upfront shutdown script (because closing out to a different
3301
        // script would violate upfront shutdown).
3302
        case !bytes.Equal(upfront, requested):
2✔
3303
                return nil, chancloser.ErrUpfrontShutdownScriptMismatch
2✔
3304

3305
        // The user requested script matches the upfront shutdown script, so we
3306
        // can return it without error.
3307
        default:
2✔
3308
                return upfront, nil
2✔
3309
        }
3310
}
3311

3312
// restartCoopClose checks whether we need to restart the cooperative close
3313
// process for a given channel.
3314
func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
3315
        *lnwire.Shutdown, error) {
3✔
3316

3✔
3317
        isTaprootChan := lnChan.ChanType().IsTaproot()
3✔
3318

3✔
3319
        // If this channel has status ChanStatusCoopBroadcasted and does not
3✔
3320
        // have a closing transaction, then the cooperative close process was
3✔
3321
        // started but never finished. We'll re-create the chanCloser state
3✔
3322
        // machine and resend Shutdown. BOLT#2 requires that we retransmit
3✔
3323
        // Shutdown exactly, but doing so would mean persisting the RPC
3✔
3324
        // provided close script. Instead use the LocalUpfrontShutdownScript
3✔
3325
        // or generate a script.
3✔
3326
        c := lnChan.State()
3✔
3327
        _, err := c.BroadcastedCooperative()
3✔
3328
        if err != nil && err != channeldb.ErrNoCloseTx {
3✔
3329
                // An error other than ErrNoCloseTx was encountered.
×
3330
                return nil, err
×
3331
        } else if err == nil && !p.rbfCoopCloseAllowed() {
3✔
3332
                // This is a channel that doesn't support RBF coop close, and it
×
3333
                // already had a coop close txn broadcast. As a result, we can
×
3334
                // just exit here as all we can do is wait for it to confirm.
×
3335
                return nil, nil
×
3336
        }
×
3337

3338
        chanID := lnwire.NewChanIDFromOutPoint(c.FundingOutpoint)
3✔
3339

3✔
3340
        var deliveryScript []byte
3✔
3341

3✔
3342
        shutdownInfo, err := c.ShutdownInfo()
3✔
3343
        switch {
3✔
3344
        // We have previously stored the delivery script that we need to use
3345
        // in the shutdown message. Re-use this script.
3346
        case err == nil:
3✔
3347
                shutdownInfo.WhenSome(func(info channeldb.ShutdownInfo) {
6✔
3348
                        deliveryScript = info.DeliveryScript.Val
3✔
3349
                })
3✔
3350

3351
        // An error other than ErrNoShutdownInfo was returned
3352
        case !errors.Is(err, channeldb.ErrNoShutdownInfo):
×
3353
                return nil, err
×
3354

3355
        case errors.Is(err, channeldb.ErrNoShutdownInfo):
×
3356
                deliveryScript = c.LocalShutdownScript
×
3357
                if len(deliveryScript) == 0 {
×
3358
                        var err error
×
3359
                        deliveryScript, err = p.genDeliveryScript()
×
3360
                        if err != nil {
×
3361
                                p.log.Errorf("unable to gen delivery script: "+
×
3362
                                        "%v", err)
×
3363

×
3364
                                return nil, fmt.Errorf("close addr unavailable")
×
3365
                        }
×
3366
                }
3367
        }
3368

3369
        // If the new RBF co-op close is negotiated, then we'll init and start
3370
        // that state machine, skipping the steps for the negotiate machine
3371
        // below. We don't support this close type for taproot channels though.
3372
        if p.rbfCoopCloseAllowed() && !isTaprootChan {
6✔
3373
                _, err := p.initRbfChanCloser(lnChan)
3✔
3374
                if err != nil {
3✔
3375
                        return nil, fmt.Errorf("unable to init rbf chan "+
×
3376
                                "closer during restart: %w", err)
×
3377
                }
×
3378

3379
                shutdownDesc := fn.MapOption(
3✔
3380
                        newRestartShutdownInit,
3✔
3381
                )(shutdownInfo)
3✔
3382

3✔
3383
                err = p.startRbfChanCloser(
3✔
3384
                        fn.FlattenOption(shutdownDesc), lnChan.ChannelPoint(),
3✔
3385
                )
3✔
3386

3✔
3387
                return nil, err
3✔
3388
        }
3389

3390
        // Compute an ideal fee.
3391
        feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
×
3392
                p.cfg.CoopCloseTargetConfs,
×
3393
        )
×
3394
        if err != nil {
×
3395
                p.log.Errorf("unable to query fee estimator: %v", err)
×
3396
                return nil, fmt.Errorf("unable to estimate fee")
×
3397
        }
×
3398

3399
        // Determine whether we or the peer are the initiator of the coop
3400
        // close attempt by looking at the channel's status.
3401
        closingParty := lntypes.Remote
×
3402
        if c.HasChanStatus(channeldb.ChanStatusLocalCloseInitiator) {
×
3403
                closingParty = lntypes.Local
×
3404
        }
×
3405

3406
        addr, err := p.addrWithInternalKey(deliveryScript)
×
3407
        if err != nil {
×
3408
                return nil, fmt.Errorf("unable to parse addr: %w", err)
×
3409
        }
×
3410
        chanCloser, err := p.createChanCloser(
×
3411
                lnChan, addr, feePerKw, nil, closingParty,
×
3412
        )
×
3413
        if err != nil {
×
3414
                p.log.Errorf("unable to create chan closer: %v", err)
×
3415
                return nil, fmt.Errorf("unable to create chan closer")
×
3416
        }
×
3417

3418
        p.activeChanCloses.Store(chanID, makeNegotiateCloser(chanCloser))
×
3419

×
3420
        // Create the Shutdown message.
×
3421
        shutdownMsg, err := chanCloser.ShutdownChan()
×
3422
        if err != nil {
×
3423
                p.log.Errorf("unable to create shutdown message: %v", err)
×
3424
                p.activeChanCloses.Delete(chanID)
×
3425
                return nil, err
×
3426
        }
×
3427

3428
        return shutdownMsg, nil
×
3429
}
3430

3431
// createChanCloser constructs a ChanCloser from the passed parameters and is
3432
// used to de-duplicate code.
3433
func (p *Brontide) createChanCloser(channel *lnwallet.LightningChannel,
3434
        deliveryScript *chancloser.DeliveryAddrWithKey,
3435
        fee chainfee.SatPerKWeight, req *htlcswitch.ChanClose,
3436
        closer lntypes.ChannelParty) (*chancloser.ChanCloser, error) {
12✔
3437

12✔
3438
        _, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
12✔
3439
        if err != nil {
12✔
3440
                p.log.Errorf("unable to obtain best block: %v", err)
×
3441
                return nil, fmt.Errorf("cannot obtain best block")
×
3442
        }
×
3443

3444
        // The req will only be set if we initiated the co-op closing flow.
3445
        var maxFee chainfee.SatPerKWeight
12✔
3446
        if req != nil {
21✔
3447
                maxFee = req.MaxFee
9✔
3448
        }
9✔
3449

3450
        chanCloser := chancloser.NewChanCloser(
12✔
3451
                chancloser.ChanCloseCfg{
12✔
3452
                        Channel:      channel,
12✔
3453
                        MusigSession: NewMusigChanCloser(channel),
12✔
3454
                        FeeEstimator: &chancloser.SimpleCoopFeeEstimator{},
12✔
3455
                        BroadcastTx:  p.cfg.Wallet.PublishTransaction,
12✔
3456
                        AuxCloser:    p.cfg.AuxChanCloser,
12✔
3457
                        DisableChannel: func(op wire.OutPoint) error {
24✔
3458
                                return p.cfg.ChanStatusMgr.RequestDisable(
12✔
3459
                                        op, false,
12✔
3460
                                )
12✔
3461
                        },
12✔
3462
                        MaxFee: maxFee,
3463
                        Disconnect: func() error {
×
3464
                                return p.cfg.DisconnectPeer(p.IdentityKey())
×
3465
                        },
×
3466
                        ChainParams: &p.cfg.Wallet.Cfg.NetParams,
3467
                },
3468
                *deliveryScript,
3469
                fee,
3470
                uint32(startingHeight),
3471
                req,
3472
                closer,
3473
        )
3474

3475
        return chanCloser, nil
12✔
3476
}
3477

3478
// initNegotiateChanCloser initializes the channel closer for a channel that is
3479
// using the original "negotiation" based protocol. This path is used when
3480
// we're the one initiating the channel close.
3481
//
3482
// TODO(roasbeef): can make a MsgEndpoint for existing handling logic to
3483
// further abstract.
3484
func (p *Brontide) initNegotiateChanCloser(req *htlcswitch.ChanClose,
3485
        channel *lnwallet.LightningChannel) error {
10✔
3486

10✔
3487
        // First, we'll choose a delivery address that we'll use to send the
10✔
3488
        // funds to in the case of a successful negotiation.
10✔
3489

10✔
3490
        // An upfront shutdown and user provided script are both optional, but
10✔
3491
        // must be equal if both set  (because we cannot serve a request to
10✔
3492
        // close out to a script which violates upfront shutdown). Get the
10✔
3493
        // appropriate address to close out to (which may be nil if neither are
10✔
3494
        // set) and error if they are both set and do not match.
10✔
3495
        deliveryScript, err := chooseDeliveryScript(
10✔
3496
                channel.LocalUpfrontShutdownScript(), req.DeliveryScript,
10✔
3497
                p.genDeliveryScript,
10✔
3498
        )
10✔
3499
        if err != nil {
11✔
3500
                return fmt.Errorf("cannot close channel %v: %w",
1✔
3501
                        req.ChanPoint, err)
1✔
3502
        }
1✔
3503

3504
        addr, err := p.addrWithInternalKey(deliveryScript)
9✔
3505
        if err != nil {
9✔
3506
                return fmt.Errorf("unable to parse addr for channel "+
×
3507
                        "%v: %w", req.ChanPoint, err)
×
3508
        }
×
3509

3510
        chanCloser, err := p.createChanCloser(
9✔
3511
                channel, addr, req.TargetFeePerKw, req, lntypes.Local,
9✔
3512
        )
9✔
3513
        if err != nil {
9✔
3514
                return fmt.Errorf("unable to make chan closer: %w", err)
×
3515
        }
×
3516

3517
        chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
9✔
3518
        p.activeChanCloses.Store(chanID, makeNegotiateCloser(chanCloser))
9✔
3519

9✔
3520
        // Finally, we'll initiate the channel shutdown within the
9✔
3521
        // chanCloser, and send the shutdown message to the remote
9✔
3522
        // party to kick things off.
9✔
3523
        shutdownMsg, err := chanCloser.ShutdownChan()
9✔
3524
        if err != nil {
9✔
3525
                // As we were unable to shutdown the channel, we'll return it
×
3526
                // back to its normal state.
×
3527
                defer channel.ResetState()
×
3528

×
3529
                p.activeChanCloses.Delete(chanID)
×
3530

×
3531
                return fmt.Errorf("unable to shutdown channel: %w", err)
×
3532
        }
×
3533

3534
        link := p.fetchLinkFromKeyAndCid(chanID)
9✔
3535
        if link == nil {
9✔
3536
                // If the link is nil then it means it was already removed from
×
3537
                // the switch or it never existed in the first place. The
×
3538
                // latter case is handled at the beginning of this function, so
×
3539
                // in the case where it has already been removed, we can skip
×
3540
                // adding the commit hook to queue a Shutdown message.
×
3541
                p.log.Warnf("link not found during attempted closure: "+
×
3542
                        "%v", chanID)
×
3543
                return nil
×
3544
        }
×
3545

3546
        if !link.DisableAdds(htlcswitch.Outgoing) {
9✔
3547
                p.log.Warnf("Outgoing link adds already "+
×
3548
                        "disabled: %v", link.ChanID())
×
3549
        }
×
3550

3551
        link.OnCommitOnce(htlcswitch.Outgoing, func() {
18✔
3552
                p.queueMsg(shutdownMsg, nil)
9✔
3553
        })
9✔
3554

3555
        return nil
9✔
3556
}
3557

3558
// chooseAddr returns the provided address if it is non-zero length, otherwise
3559
// None.
3560
func chooseAddr(addr lnwire.DeliveryAddress) fn.Option[lnwire.DeliveryAddress] {
3✔
3561
        if len(addr) == 0 {
6✔
3562
                return fn.None[lnwire.DeliveryAddress]()
3✔
3563
        }
3✔
3564

3565
        return fn.Some(addr)
×
3566
}
3567

3568
// observeRbfCloseUpdates observes the channel for any updates that may
3569
// indicate that a new txid has been broadcasted, or the channel fully closed
3570
// on chain.
3571
func (p *Brontide) observeRbfCloseUpdates(chanCloser *chancloser.RbfChanCloser,
3572
        closeReq *htlcswitch.ChanClose,
3573
        coopCloseStates chancloser.RbfStateSub) {
3✔
3574

3✔
3575
        newStateChan := coopCloseStates.NewItemCreated.ChanOut()
3✔
3576
        defer chanCloser.RemoveStateSub(coopCloseStates)
3✔
3577

3✔
3578
        var (
3✔
3579
                lastTxids    lntypes.Dual[chainhash.Hash]
3✔
3580
                lastFeeRates lntypes.Dual[chainfee.SatPerVByte]
3✔
3581
        )
3✔
3582

3✔
3583
        maybeNotifyTxBroadcast := func(state chancloser.AsymmetricPeerState,
3✔
3584
                party lntypes.ChannelParty) {
6✔
3585

3✔
3586
                // First, check to see if we have an error to report to the
3✔
3587
                // caller. If so, then we''ll return that error and exit, as the
3✔
3588
                // stream will exit as well.
3✔
3589
                if closeErr, ok := state.(*chancloser.CloseErr); ok {
6✔
3590
                        // We hit an error during the last state transition, so
3✔
3591
                        // we'll extract the error then send it to the
3✔
3592
                        // user.
3✔
3593
                        err := closeErr.Err()
3✔
3594

3✔
3595
                        peerLog.Warnf("ChannelPoint(%v): encountered close "+
3✔
3596
                                "err: %v", closeReq.ChanPoint, err)
3✔
3597

3✔
3598
                        select {
3✔
3599
                        case closeReq.Err <- err:
3✔
3600
                        case <-closeReq.Ctx.Done():
×
3601
                        case <-p.cg.Done():
×
3602
                        }
3603

3604
                        return
3✔
3605
                }
3606

3607
                closePending, ok := state.(*chancloser.ClosePending)
3✔
3608

3✔
3609
                // If this isn't the close pending state, we aren't at the
3✔
3610
                // terminal state yet.
3✔
3611
                if !ok {
6✔
3612
                        return
3✔
3613
                }
3✔
3614

3615
                // Only notify if the fee rate is greater.
3616
                newFeeRate := closePending.FeeRate
3✔
3617
                lastFeeRate := lastFeeRates.GetForParty(party)
3✔
3618
                if newFeeRate <= lastFeeRate {
6✔
3619
                        peerLog.Debugf("ChannelPoint(%v): remote party made "+
3✔
3620
                                "update for fee rate %v, but we already have "+
3✔
3621
                                "a higher fee rate of %v", closeReq.ChanPoint,
3✔
3622
                                newFeeRate, lastFeeRate)
3✔
3623

3✔
3624
                        return
3✔
3625
                }
3✔
3626

3627
                feeRate := closePending.FeeRate
3✔
3628
                lastFeeRates.SetForParty(party, feeRate)
3✔
3629

3✔
3630
                // At this point, we'll have a txid that we can use to notify
3✔
3631
                // the client, but only if it's different from the last one we
3✔
3632
                // sent. If the user attempted to bump, but was rejected due to
3✔
3633
                // RBF, then we'll send a redundant update.
3✔
3634
                closingTxid := closePending.CloseTx.TxHash()
3✔
3635
                lastTxid := lastTxids.GetForParty(party)
3✔
3636
                if closeReq != nil && closingTxid != lastTxid {
6✔
3637
                        select {
3✔
3638
                        case closeReq.Updates <- &PendingUpdate{
3639
                                Txid:        closingTxid[:],
3640
                                FeePerVbyte: fn.Some(closePending.FeeRate),
3641
                                IsLocalCloseTx: fn.Some(
3642
                                        party == lntypes.Local,
3643
                                ),
3644
                        }:
3✔
3645

3646
                        case <-closeReq.Ctx.Done():
×
3647
                                return
×
3648

3649
                        case <-p.cg.Done():
×
3650
                                return
×
3651
                        }
3652
                }
3653

3654
                lastTxids.SetForParty(party, closingTxid)
3✔
3655
        }
3656

3657
        peerLog.Infof("Observing RBF close updates for channel %v",
3✔
3658
                closeReq.ChanPoint)
3✔
3659

3✔
3660
        // We'll consume each new incoming state to send out the appropriate
3✔
3661
        // RPC update.
3✔
3662
        for {
6✔
3663
                select {
3✔
3664
                case newState := <-newStateChan:
3✔
3665

3✔
3666
                        switch closeState := newState.(type) {
3✔
3667
                        // Once we've reached the state of pending close, we
3668
                        // have a txid that we broadcasted.
3669
                        case *chancloser.ClosingNegotiation:
3✔
3670
                                peerState := closeState.PeerState
3✔
3671

3✔
3672
                                // Each side may have gained a new co-op close
3✔
3673
                                // tx, so we'll examine both to see if they've
3✔
3674
                                // changed.
3✔
3675
                                maybeNotifyTxBroadcast(
3✔
3676
                                        peerState.GetForParty(lntypes.Local),
3✔
3677
                                        lntypes.Local,
3✔
3678
                                )
3✔
3679
                                maybeNotifyTxBroadcast(
3✔
3680
                                        peerState.GetForParty(lntypes.Remote),
3✔
3681
                                        lntypes.Remote,
3✔
3682
                                )
3✔
3683

3684
                        // Otherwise, if we're transition to CloseFin, then we
3685
                        // know that we're done.
3686
                        case *chancloser.CloseFin:
3✔
3687
                                // To clean up, we'll remove the chan closer
3✔
3688
                                // from the active map, and send the final
3✔
3689
                                // update to the client.
3✔
3690
                                closingTxid := closeState.ConfirmedTx.TxHash()
3✔
3691
                                if closeReq != nil {
6✔
3692
                                        closeReq.Updates <- &ChannelCloseUpdate{
3✔
3693
                                                ClosingTxid: closingTxid[:],
3✔
3694
                                                Success:     true,
3✔
3695
                                        }
3✔
3696
                                }
3✔
3697
                                chanID := lnwire.NewChanIDFromOutPoint(
3✔
3698
                                        *closeReq.ChanPoint,
3✔
3699
                                )
3✔
3700
                                p.activeChanCloses.Delete(chanID)
3✔
3701

3✔
3702
                                return
3✔
3703
                        }
3704

3705
                case <-closeReq.Ctx.Done():
3✔
3706
                        return
3✔
3707

3708
                case <-p.cg.Done():
3✔
3709
                        return
3✔
3710
                }
3711
        }
3712
}
3713

3714
// chanErrorReporter is a simple implementation of the
3715
// chancloser.ErrorReporter. This is bound to a single channel by the channel
3716
// ID.
3717
type chanErrorReporter struct {
3718
        chanID lnwire.ChannelID
3719
        peer   *Brontide
3720
}
3721

3722
// newChanErrorReporter creates a new instance of the chanErrorReporter.
3723
func newChanErrorReporter(chanID lnwire.ChannelID,
3724
        peer *Brontide) *chanErrorReporter {
3✔
3725

3✔
3726
        return &chanErrorReporter{
3✔
3727
                chanID: chanID,
3✔
3728
                peer:   peer,
3✔
3729
        }
3✔
3730
}
3✔
3731

3732
// ReportError is a method that's used to report an error that occurred during
3733
// state machine execution. This is used by the RBF close state machine to
3734
// terminate the state machine and send an error to the remote peer.
3735
//
3736
// This is a part of the chancloser.ErrorReporter interface.
3737
func (c *chanErrorReporter) ReportError(chanErr error) {
×
3738
        c.peer.log.Errorf("coop close error for channel %v: %v",
×
3739
                c.chanID, chanErr)
×
3740

×
3741
        var errMsg []byte
×
3742
        if errors.Is(chanErr, chancloser.ErrInvalidStateTransition) {
×
3743
                errMsg = []byte("unexpected protocol message")
×
3744
        } else {
×
3745
                errMsg = []byte(chanErr.Error())
×
3746
        }
×
3747

3748
        err := c.peer.SendMessageLazy(false, &lnwire.Error{
×
3749
                ChanID: c.chanID,
×
3750
                Data:   errMsg,
×
3751
        })
×
3752
        if err != nil {
×
3753
                c.peer.log.Warnf("unable to send error message to peer: %v",
×
3754
                        err)
×
3755
        }
×
3756

3757
        // After we send the error message to the peer, we'll re-initialize the
3758
        // coop close state machine as they may send a shutdown message to
3759
        // retry the coop close.
3760
        lnChan, ok := c.peer.activeChannels.Load(c.chanID)
×
3761
        if !ok {
×
3762
                return
×
3763
        }
×
3764

3765
        if lnChan == nil {
×
3766
                c.peer.log.Debugf("channel %v is pending, not "+
×
3767
                        "re-initializing coop close state machine",
×
3768
                        c.chanID)
×
3769

×
3770
                return
×
3771
        }
×
3772

3773
        if _, err := c.peer.initRbfChanCloser(lnChan); err != nil {
×
3774
                c.peer.activeChanCloses.Delete(c.chanID)
×
3775

×
3776
                c.peer.log.Errorf("unable to init RBF chan closer after "+
×
3777
                        "error case: %v", err)
×
3778
        }
×
3779
}
3780

3781
// chanFlushEventSentinel is used to send the RBF coop close state machine the
3782
// channel flushed event. We'll wait until the state machine enters the
3783
// ChannelFlushing state, then request the link to send the event once flushed.
3784
//
3785
// NOTE: This MUST be run as a goroutine.
3786
func (p *Brontide) chanFlushEventSentinel(chanCloser *chancloser.RbfChanCloser,
3787
        link htlcswitch.ChannelUpdateHandler,
3788
        channel *lnwallet.LightningChannel) {
3✔
3789

3✔
3790
        defer p.cg.WgDone()
3✔
3791

3✔
3792
        // If there's no link, then the channel has already been flushed, so we
3✔
3793
        // don't need to continue.
3✔
3794
        if link == nil {
6✔
3795
                return
3✔
3796
        }
3✔
3797

3798
        coopCloseStates := chanCloser.RegisterStateEvents()
3✔
3799
        defer chanCloser.RemoveStateSub(coopCloseStates)
3✔
3800

3✔
3801
        newStateChan := coopCloseStates.NewItemCreated.ChanOut()
3✔
3802

3✔
3803
        sendChanFlushed := func() {
6✔
3804
                chanState := channel.StateSnapshot()
3✔
3805

3✔
3806
                peerLog.Infof("ChannelPoint(%v) has been flushed for co-op "+
3✔
3807
                        "close, sending event to chan closer",
3✔
3808
                        channel.ChannelPoint())
3✔
3809

3✔
3810
                chanBalances := chancloser.ShutdownBalances{
3✔
3811
                        LocalBalance:  chanState.LocalBalance,
3✔
3812
                        RemoteBalance: chanState.RemoteBalance,
3✔
3813
                }
3✔
3814
                ctx := context.Background()
3✔
3815
                chanCloser.SendEvent(ctx, &chancloser.ChannelFlushed{
3✔
3816
                        ShutdownBalances: chanBalances,
3✔
3817
                        FreshFlush:       true,
3✔
3818
                })
3✔
3819
        }
3✔
3820

3821
        // We'll wait until the channel enters the ChannelFlushing state. We
3822
        // exit after a success loop. As after the first RBF iteration, the
3823
        // channel will always be flushed.
3824
        for {
6✔
3825
                select {
3✔
3826
                case newState, ok := <-newStateChan:
3✔
3827
                        if !ok {
3✔
3828
                                return
×
3829
                        }
×
3830

3831
                        if _, ok := newState.(*chancloser.ChannelFlushing); ok {
6✔
3832
                                peerLog.Infof("ChannelPoint(%v): rbf coop "+
3✔
3833
                                        "close is awaiting a flushed state, "+
3✔
3834
                                        "registering with link..., ",
3✔
3835
                                        channel.ChannelPoint())
3✔
3836

3✔
3837
                                // Request the link to send the event once the
3✔
3838
                                // channel is flushed. We only need this event
3✔
3839
                                // sent once, so we can exit now.
3✔
3840
                                link.OnFlushedOnce(sendChanFlushed)
3✔
3841

3✔
3842
                                return
3✔
3843
                        }
3✔
3844

3845
                case <-p.cg.Done():
3✔
3846
                        return
3✔
3847
                }
3848
        }
3849
}
3850

3851
// initRbfChanCloser initializes the channel closer for a channel that
3852
// is using the new RBF based co-op close protocol. This only creates the chan
3853
// closer, but doesn't attempt to trigger any manual state transitions.
3854
func (p *Brontide) initRbfChanCloser(
3855
        channel *lnwallet.LightningChannel) (*chancloser.RbfChanCloser, error) {
3✔
3856

3✔
3857
        chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
3✔
3858

3✔
3859
        link := p.fetchLinkFromKeyAndCid(chanID)
3✔
3860

3✔
3861
        _, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
3✔
3862
        if err != nil {
3✔
3863
                return nil, fmt.Errorf("cannot obtain best block: %w", err)
×
3864
        }
×
3865

3866
        defaultFeePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
3✔
3867
                p.cfg.CoopCloseTargetConfs,
3✔
3868
        )
3✔
3869
        if err != nil {
3✔
3870
                return nil, fmt.Errorf("unable to estimate fee: %w", err)
×
3871
        }
×
3872

3873
        thawHeight, err := channel.AbsoluteThawHeight()
3✔
3874
        if err != nil {
3✔
3875
                return nil, fmt.Errorf("unable to get thaw height: %w", err)
×
3876
        }
×
3877

3878
        peerPub := *p.IdentityKey()
3✔
3879

3✔
3880
        msgMapper := chancloser.NewRbfMsgMapper(
3✔
3881
                uint32(startingHeight), chanID, peerPub,
3✔
3882
        )
3✔
3883

3✔
3884
        initialState := chancloser.ChannelActive{}
3✔
3885

3✔
3886
        scid := channel.ZeroConfRealScid().UnwrapOr(
3✔
3887
                channel.ShortChanID(),
3✔
3888
        )
3✔
3889

3✔
3890
        env := chancloser.Environment{
3✔
3891
                ChainParams:    p.cfg.Wallet.Cfg.NetParams,
3✔
3892
                ChanPeer:       peerPub,
3✔
3893
                ChanPoint:      channel.ChannelPoint(),
3✔
3894
                ChanID:         chanID,
3✔
3895
                Scid:           scid,
3✔
3896
                ChanType:       channel.ChanType(),
3✔
3897
                DefaultFeeRate: defaultFeePerKw.FeePerVByte(),
3✔
3898
                ThawHeight:     fn.Some(thawHeight),
3✔
3899
                RemoteUpfrontShutdown: chooseAddr(
3✔
3900
                        channel.RemoteUpfrontShutdownScript(),
3✔
3901
                ),
3✔
3902
                LocalUpfrontShutdown: chooseAddr(
3✔
3903
                        channel.LocalUpfrontShutdownScript(),
3✔
3904
                ),
3✔
3905
                NewDeliveryScript: func() (lnwire.DeliveryAddress, error) {
6✔
3906
                        return p.genDeliveryScript()
3✔
3907
                },
3✔
3908
                FeeEstimator: &chancloser.SimpleCoopFeeEstimator{},
3909
                CloseSigner:  channel,
3910
                ChanObserver: newChanObserver(
3911
                        channel, link, p.cfg.ChanStatusMgr,
3912
                ),
3913
        }
3914

3915
        spendEvent := protofsm.RegisterSpend[chancloser.ProtocolEvent]{
3✔
3916
                OutPoint:   channel.ChannelPoint(),
3✔
3917
                PkScript:   channel.FundingTxOut().PkScript,
3✔
3918
                HeightHint: channel.DeriveHeightHint(),
3✔
3919
                PostSpendEvent: fn.Some[chancloser.RbfSpendMapper](
3✔
3920
                        chancloser.SpendMapper,
3✔
3921
                ),
3✔
3922
        }
3✔
3923

3✔
3924
        daemonAdapters := NewLndDaemonAdapters(LndAdapterCfg{
3✔
3925
                MsgSender:     newPeerMsgSender(peerPub, p),
3✔
3926
                TxBroadcaster: p.cfg.Wallet,
3✔
3927
                ChainNotifier: p.cfg.ChainNotifier,
3✔
3928
        })
3✔
3929

3✔
3930
        protoCfg := chancloser.RbfChanCloserCfg{
3✔
3931
                Daemon:        daemonAdapters,
3✔
3932
                InitialState:  &initialState,
3✔
3933
                Env:           &env,
3✔
3934
                InitEvent:     fn.Some[protofsm.DaemonEvent](&spendEvent),
3✔
3935
                ErrorReporter: newChanErrorReporter(chanID, p),
3✔
3936
                MsgMapper: fn.Some[protofsm.MsgMapper[chancloser.ProtocolEvent]]( //nolint:ll
3✔
3937
                        msgMapper,
3✔
3938
                ),
3✔
3939
        }
3✔
3940

3✔
3941
        ctx := context.Background()
3✔
3942
        chanCloser := protofsm.NewStateMachine(protoCfg)
3✔
3943
        chanCloser.Start(ctx)
3✔
3944

3✔
3945
        // Finally, we'll register this new endpoint with the message router so
3✔
3946
        // future co-op close messages are handled by this state machine.
3✔
3947
        err = fn.MapOptionZ(p.msgRouter, func(r msgmux.Router) error {
6✔
3948
                _ = r.UnregisterEndpoint(chanCloser.Name())
3✔
3949

3✔
3950
                return r.RegisterEndpoint(&chanCloser)
3✔
3951
        })
3✔
3952
        if err != nil {
3✔
3953
                chanCloser.Stop()
×
3954

×
3955
                return nil, fmt.Errorf("unable to register endpoint for co-op "+
×
3956
                        "close: %w", err)
×
3957
        }
×
3958

3959
        p.activeChanCloses.Store(chanID, makeRbfCloser(&chanCloser))
3✔
3960

3✔
3961
        // Now that we've created the rbf closer state machine, we'll launch a
3✔
3962
        // new goroutine to eventually send in the ChannelFlushed event once
3✔
3963
        // needed.
3✔
3964
        p.cg.WgAdd(1)
3✔
3965
        go p.chanFlushEventSentinel(&chanCloser, link, channel)
3✔
3966

3✔
3967
        return &chanCloser, nil
3✔
3968
}
3969

3970
// shutdownInit describes the two ways we can initiate a new shutdown. Either we
3971
// got an RPC request to do so (left), or we sent a shutdown message to the
3972
// party (for w/e reason), but crashed before the close was complete.
3973
//
3974
//nolint:ll
3975
type shutdownInit = fn.Option[fn.Either[*htlcswitch.ChanClose, channeldb.ShutdownInfo]]
3976

3977
// shutdownStartFeeRate returns the fee rate that should be used for the
3978
// shutdown.  This returns a doubly wrapped option as the shutdown info might
3979
// be none, and the fee rate is only defined for the user initiated shutdown.
3980
func shutdownStartFeeRate(s shutdownInit) fn.Option[chainfee.SatPerKWeight] {
3✔
3981
        feeRateOpt := fn.MapOption(func(init fn.Either[*htlcswitch.ChanClose,
3✔
3982
                channeldb.ShutdownInfo]) fn.Option[chainfee.SatPerKWeight] {
6✔
3983

3✔
3984
                var feeRate fn.Option[chainfee.SatPerKWeight]
3✔
3985
                init.WhenLeft(func(req *htlcswitch.ChanClose) {
6✔
3986
                        feeRate = fn.Some(req.TargetFeePerKw)
3✔
3987
                })
3✔
3988

3989
                return feeRate
3✔
3990
        })(s)
3991

3992
        return fn.FlattenOption(feeRateOpt)
3✔
3993
}
3994

3995
// shutdownStartAddr returns the delivery address that should be used when
3996
// restarting the shutdown process.  If we didn't send a shutdown before we
3997
// restarted, and the user didn't initiate one either, then None is returned.
3998
func shutdownStartAddr(s shutdownInit) fn.Option[lnwire.DeliveryAddress] {
3✔
3999
        addrOpt := fn.MapOption(func(init fn.Either[*htlcswitch.ChanClose,
3✔
4000
                channeldb.ShutdownInfo]) fn.Option[lnwire.DeliveryAddress] {
6✔
4001

3✔
4002
                var addr fn.Option[lnwire.DeliveryAddress]
3✔
4003
                init.WhenLeft(func(req *htlcswitch.ChanClose) {
6✔
4004
                        if len(req.DeliveryScript) != 0 {
6✔
4005
                                addr = fn.Some(req.DeliveryScript)
3✔
4006
                        }
3✔
4007
                })
4008
                init.WhenRight(func(info channeldb.ShutdownInfo) {
6✔
4009
                        addr = fn.Some(info.DeliveryScript.Val)
3✔
4010
                })
3✔
4011

4012
                return addr
3✔
4013
        })(s)
4014

4015
        return fn.FlattenOption(addrOpt)
3✔
4016
}
4017

4018
// whenRPCShutdown registers a callback to be executed when the shutdown init
4019
// type is and RPC request.
4020
func whenRPCShutdown(s shutdownInit, f func(r *htlcswitch.ChanClose)) {
3✔
4021
        s.WhenSome(func(init fn.Either[*htlcswitch.ChanClose,
3✔
4022
                channeldb.ShutdownInfo]) {
6✔
4023

3✔
4024
                init.WhenLeft(f)
3✔
4025
        })
3✔
4026
}
4027

4028
// newRestartShutdownInit creates a new shutdownInit for the case where we need
4029
// to restart the shutdown flow after a restart.
4030
func newRestartShutdownInit(info channeldb.ShutdownInfo) shutdownInit {
3✔
4031
        return fn.Some(fn.NewRight[*htlcswitch.ChanClose](info))
3✔
4032
}
3✔
4033

4034
// newRPCShutdownInit creates a new shutdownInit for the case where we
4035
// initiated the shutdown via an RPC client.
4036
func newRPCShutdownInit(req *htlcswitch.ChanClose) shutdownInit {
3✔
4037
        return fn.Some(
3✔
4038
                fn.NewLeft[*htlcswitch.ChanClose, channeldb.ShutdownInfo](req),
3✔
4039
        )
3✔
4040
}
3✔
4041

4042
// waitUntilRbfCoastClear waits until the RBF co-op close state machine has
4043
// advanced to a terminal state before attempting another fee bump.
4044
func waitUntilRbfCoastClear(ctx context.Context,
4045
        rbfCloser *chancloser.RbfChanCloser) error {
3✔
4046

3✔
4047
        coopCloseStates := rbfCloser.RegisterStateEvents()
3✔
4048
        newStateChan := coopCloseStates.NewItemCreated.ChanOut()
3✔
4049
        defer rbfCloser.RemoveStateSub(coopCloseStates)
3✔
4050

3✔
4051
        isTerminalState := func(newState chancloser.RbfState) bool {
6✔
4052
                // If we're not in the negotiation sub-state, then we aren't at
3✔
4053
                // the terminal state yet.
3✔
4054
                state, ok := newState.(*chancloser.ClosingNegotiation)
3✔
4055
                if !ok {
3✔
4056
                        return false
×
4057
                }
×
4058

4059
                localState := state.PeerState.GetForParty(lntypes.Local)
3✔
4060

3✔
4061
                // If this isn't the close pending state, we aren't at the
3✔
4062
                // terminal state yet.
3✔
4063
                _, ok = localState.(*chancloser.ClosePending)
3✔
4064

3✔
4065
                return ok
3✔
4066
        }
4067

4068
        // Before we enter the subscription loop below, check to see if we're
4069
        // already in the terminal state.
4070
        rbfState, err := rbfCloser.CurrentState()
3✔
4071
        if err != nil {
3✔
4072
                return err
×
4073
        }
×
4074
        if isTerminalState(rbfState) {
6✔
4075
                return nil
3✔
4076
        }
3✔
4077

4078
        peerLog.Debugf("Waiting for RBF iteration to complete...")
×
4079

×
4080
        for {
×
4081
                select {
×
4082
                case newState := <-newStateChan:
×
4083
                        if isTerminalState(newState) {
×
4084
                                return nil
×
4085
                        }
×
4086

4087
                case <-ctx.Done():
×
4088
                        return fmt.Errorf("context canceled")
×
4089
                }
4090
        }
4091
}
4092

4093
// startRbfChanCloser kicks off the co-op close process using the new RBF based
4094
// co-op close protocol. This is called when we're the one that's initiating
4095
// the cooperative channel close.
4096
//
4097
// TODO(roasbeef): just accept the two shutdown pointer params instead??
4098
func (p *Brontide) startRbfChanCloser(shutdown shutdownInit,
4099
        chanPoint wire.OutPoint) error {
3✔
4100

3✔
4101
        // Unlike the old negotiate chan closer, we'll always create the RBF
3✔
4102
        // chan closer on startup, so we can skip init here.
3✔
4103
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
4104
        chanCloser, found := p.activeChanCloses.Load(chanID)
3✔
4105
        if !found {
3✔
4106
                return fmt.Errorf("rbf chan closer not found for channel %v",
×
4107
                        chanPoint)
×
4108
        }
×
4109

4110
        defaultFeePerKw, err := shutdownStartFeeRate(
3✔
4111
                shutdown,
3✔
4112
        ).UnwrapOrFuncErr(func() (chainfee.SatPerKWeight, error) {
6✔
4113
                return p.cfg.FeeEstimator.EstimateFeePerKW(
3✔
4114
                        p.cfg.CoopCloseTargetConfs,
3✔
4115
                )
3✔
4116
        })
3✔
4117
        if err != nil {
3✔
4118
                return fmt.Errorf("unable to estimate fee: %w", err)
×
4119
        }
×
4120

4121
        chanCloser.WhenRight(func(rbfCloser *chancloser.RbfChanCloser) {
6✔
4122
                peerLog.Infof("ChannelPoint(%v): rbf-coop close requested, "+
3✔
4123
                        "sending shutdown", chanPoint)
3✔
4124

3✔
4125
                rbfState, err := rbfCloser.CurrentState()
3✔
4126
                if err != nil {
3✔
4127
                        peerLog.Warnf("ChannelPoint(%v): unable to get "+
×
4128
                                "current state for rbf-coop close: %v",
×
4129
                                chanPoint, err)
×
4130

×
4131
                        return
×
4132
                }
×
4133

4134
                coopCloseStates := rbfCloser.RegisterStateEvents()
3✔
4135

3✔
4136
                // Before we send our event below, we'll launch a goroutine to
3✔
4137
                // watch for the final terminal state to send updates to the RPC
3✔
4138
                // client. We only need to do this if there's an RPC caller.
3✔
4139
                var rpcShutdown bool
3✔
4140
                whenRPCShutdown(shutdown, func(req *htlcswitch.ChanClose) {
6✔
4141
                        rpcShutdown = true
3✔
4142

3✔
4143
                        p.cg.WgAdd(1)
3✔
4144
                        go func() {
6✔
4145
                                defer p.cg.WgDone()
3✔
4146

3✔
4147
                                p.observeRbfCloseUpdates(
3✔
4148
                                        rbfCloser, req, coopCloseStates,
3✔
4149
                                )
3✔
4150
                        }()
3✔
4151
                })
4152

4153
                if !rpcShutdown {
6✔
4154
                        defer rbfCloser.RemoveStateSub(coopCloseStates)
3✔
4155
                }
3✔
4156

4157
                ctx, _ := p.cg.Create(context.Background())
3✔
4158
                feeRate := defaultFeePerKw.FeePerVByte()
3✔
4159

3✔
4160
                // Depending on the state of the state machine, we'll either
3✔
4161
                // kick things off by sending shutdown, or attempt to send a new
3✔
4162
                // offer to the remote party.
3✔
4163
                switch rbfState.(type) {
3✔
4164
                // The channel is still active, so we'll now kick off the co-op
4165
                // close process by instructing it to send a shutdown message to
4166
                // the remote party.
4167
                case *chancloser.ChannelActive:
3✔
4168
                        rbfCloser.SendEvent(
3✔
4169
                                context.Background(),
3✔
4170
                                &chancloser.SendShutdown{
3✔
4171
                                        IdealFeeRate: feeRate,
3✔
4172
                                        DeliveryAddr: shutdownStartAddr(
3✔
4173
                                                shutdown,
3✔
4174
                                        ),
3✔
4175
                                },
3✔
4176
                        )
3✔
4177

4178
                // If we haven't yet sent an offer (didn't have enough funds at
4179
                // the prior fee rate), or we've sent an offer, then we'll
4180
                // trigger a new offer event.
4181
                case *chancloser.ClosingNegotiation:
3✔
4182
                        // Before we send the event below, we'll wait until
3✔
4183
                        // we're in a semi-terminal state.
3✔
4184
                        err := waitUntilRbfCoastClear(ctx, rbfCloser)
3✔
4185
                        if err != nil {
3✔
4186
                                peerLog.Warnf("ChannelPoint(%v): unable to "+
×
4187
                                        "wait for coast to clear: %v",
×
4188
                                        chanPoint, err)
×
4189

×
4190
                                return
×
4191
                        }
×
4192

4193
                        event := chancloser.ProtocolEvent(
3✔
4194
                                &chancloser.SendOfferEvent{
3✔
4195
                                        TargetFeeRate: feeRate,
3✔
4196
                                },
3✔
4197
                        )
3✔
4198
                        rbfCloser.SendEvent(ctx, event)
3✔
4199

4200
                default:
×
4201
                        peerLog.Warnf("ChannelPoint(%v): unexpected state "+
×
4202
                                "for rbf-coop close: %T", chanPoint, rbfState)
×
4203
                }
4204
        })
4205

4206
        return nil
3✔
4207
}
4208

4209
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
4210
// forced unilateral closure of the channel initiated by a local subsystem.
4211
func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
10✔
4212
        chanID := lnwire.NewChanIDFromOutPoint(*req.ChanPoint)
10✔
4213

10✔
4214
        channel, ok := p.activeChannels.Load(chanID)
10✔
4215

10✔
4216
        // Though this function can't be called for pending channels, we still
10✔
4217
        // check whether channel is nil for safety.
10✔
4218
        if !ok || channel == nil {
10✔
4219
                err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
×
4220
                        "unknown", chanID)
×
4221
                p.log.Errorf(err.Error())
×
4222
                req.Err <- err
×
4223
                return
×
4224
        }
×
4225

4226
        isTaprootChan := channel.ChanType().IsTaproot()
10✔
4227

10✔
4228
        switch req.CloseType {
10✔
4229
        // A type of CloseRegular indicates that the user has opted to close
4230
        // out this channel on-chain, so we execute the cooperative channel
4231
        // closure workflow.
4232
        case contractcourt.CloseRegular:
10✔
4233
                var err error
10✔
4234
                switch {
10✔
4235
                // If this is the RBF coop state machine, then we'll instruct
4236
                // it to send the shutdown message. This also might be an RBF
4237
                // iteration, in which case we'll be obtaining a new
4238
                // transaction w/ a higher fee rate.
4239
                //
4240
                // We don't support this close type for taproot channels yet
4241
                // however.
4242
                case !isTaprootChan && p.rbfCoopCloseAllowed():
3✔
4243
                        err = p.startRbfChanCloser(
3✔
4244
                                newRPCShutdownInit(req), channel.ChannelPoint(),
3✔
4245
                        )
3✔
4246
                default:
10✔
4247
                        err = p.initNegotiateChanCloser(req, channel)
10✔
4248
                }
4249

4250
                if err != nil {
11✔
4251
                        p.log.Errorf(err.Error())
1✔
4252
                        req.Err <- err
1✔
4253
                }
1✔
4254

4255
        // A type of CloseBreach indicates that the counterparty has breached
4256
        // the channel therefore we need to clean up our local state.
UNCOV
4257
        case contractcourt.CloseBreach:
×
UNCOV
4258
                // TODO(roasbeef): no longer need with newer beach logic?
×
UNCOV
4259
                p.log.Infof("ChannelPoint(%v) has been breached, wiping "+
×
UNCOV
4260
                        "channel", req.ChanPoint)
×
UNCOV
4261
                p.WipeChannel(req.ChanPoint)
×
4262
        }
4263
}
4264

4265
// linkFailureReport is sent to the channelManager whenever a link reports a
4266
// link failure, and is forced to exit. The report houses the necessary
4267
// information to clean up the channel state, send back the error message, and
4268
// force close if necessary.
4269
type linkFailureReport struct {
4270
        chanPoint   wire.OutPoint
4271
        chanID      lnwire.ChannelID
4272
        shortChanID lnwire.ShortChannelID
4273
        linkErr     htlcswitch.LinkFailureError
4274
}
4275

4276
// handleLinkFailure processes a link failure report when a link in the switch
4277
// fails. It facilitates the removal of all channel state within the peer,
4278
// force closing the channel depending on severity, and sending the error
4279
// message back to the remote party.
4280
func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
3✔
4281
        // Retrieve the channel from the map of active channels. We do this to
3✔
4282
        // have access to it even after WipeChannel remove it from the map.
3✔
4283
        chanID := lnwire.NewChanIDFromOutPoint(failure.chanPoint)
3✔
4284
        lnChan, _ := p.activeChannels.Load(chanID)
3✔
4285

3✔
4286
        // We begin by wiping the link, which will remove it from the switch,
3✔
4287
        // such that it won't be attempted used for any more updates.
3✔
4288
        //
3✔
4289
        // TODO(halseth): should introduce a way to atomically stop/pause the
3✔
4290
        // link and cancel back any adds in its mailboxes such that we can
3✔
4291
        // safely force close without the link being added again and updates
3✔
4292
        // being applied.
3✔
4293
        p.WipeChannel(&failure.chanPoint)
3✔
4294

3✔
4295
        // If the error encountered was severe enough, we'll now force close
3✔
4296
        // the channel to prevent reading it to the switch in the future.
3✔
4297
        if failure.linkErr.FailureAction == htlcswitch.LinkFailureForceClose {
6✔
4298
                p.log.Warnf("Force closing link(%v)", failure.shortChanID)
3✔
4299

3✔
4300
                closeTx, err := p.cfg.ChainArb.ForceCloseContract(
3✔
4301
                        failure.chanPoint,
3✔
4302
                )
3✔
4303
                if err != nil {
6✔
4304
                        p.log.Errorf("unable to force close "+
3✔
4305
                                "link(%v): %v", failure.shortChanID, err)
3✔
4306
                } else {
6✔
4307
                        p.log.Infof("channel(%v) force "+
3✔
4308
                                "closed with txid %v",
3✔
4309
                                failure.shortChanID, closeTx.TxHash())
3✔
4310
                }
3✔
4311
        }
4312

4313
        // If this is a permanent failure, we will mark the channel borked.
4314
        if failure.linkErr.PermanentFailure && lnChan != nil {
3✔
4315
                p.log.Warnf("Marking link(%v) borked due to permanent "+
×
4316
                        "failure", failure.shortChanID)
×
4317

×
4318
                if err := lnChan.State().MarkBorked(); err != nil {
×
4319
                        p.log.Errorf("Unable to mark channel %v borked: %v",
×
4320
                                failure.shortChanID, err)
×
4321
                }
×
4322
        }
4323

4324
        // Send an error to the peer, why we failed the channel.
4325
        if failure.linkErr.ShouldSendToPeer() {
6✔
4326
                // If SendData is set, send it to the peer. If not, we'll use
3✔
4327
                // the standard error messages in the payload. We only include
3✔
4328
                // sendData in the cases where the error data does not contain
3✔
4329
                // sensitive information.
3✔
4330
                data := []byte(failure.linkErr.Error())
3✔
4331
                if failure.linkErr.SendData != nil {
3✔
4332
                        data = failure.linkErr.SendData
×
4333
                }
×
4334

4335
                var networkMsg lnwire.Message
3✔
4336
                if failure.linkErr.Warning {
3✔
4337
                        networkMsg = &lnwire.Warning{
×
4338
                                ChanID: failure.chanID,
×
4339
                                Data:   data,
×
4340
                        }
×
4341
                } else {
3✔
4342
                        networkMsg = &lnwire.Error{
3✔
4343
                                ChanID: failure.chanID,
3✔
4344
                                Data:   data,
3✔
4345
                        }
3✔
4346
                }
3✔
4347

4348
                err := p.SendMessage(true, networkMsg)
3✔
4349
                if err != nil {
3✔
4350
                        p.log.Errorf("unable to send msg to "+
×
4351
                                "remote peer: %v", err)
×
4352
                }
×
4353
        }
4354

4355
        // If the failure action is disconnect, then we'll execute that now. If
4356
        // we had to send an error above, it was a sync call, so we expect the
4357
        // message to be flushed on the wire by now.
4358
        if failure.linkErr.FailureAction == htlcswitch.LinkFailureDisconnect {
3✔
4359
                p.Disconnect(fmt.Errorf("link requested disconnect"))
×
4360
        }
×
4361
}
4362

4363
// fetchLinkFromKeyAndCid fetches a link from the switch via the remote's
4364
// public key and the channel id.
4365
func (p *Brontide) fetchLinkFromKeyAndCid(
4366
        cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
22✔
4367

22✔
4368
        var chanLink htlcswitch.ChannelUpdateHandler
22✔
4369

22✔
4370
        // We don't need to check the error here, and can instead just loop
22✔
4371
        // over the slice and return nil.
22✔
4372
        links, _ := p.cfg.Switch.GetLinksByInterface(p.cfg.PubKeyBytes)
22✔
4373
        for _, link := range links {
43✔
4374
                if link.ChanID() == cid {
42✔
4375
                        chanLink = link
21✔
4376
                        break
21✔
4377
                }
4378
        }
4379

4380
        return chanLink
22✔
4381
}
4382

4383
// finalizeChanClosure performs the final clean up steps once the cooperative
4384
// closure transaction has been fully broadcast. The finalized closing state
4385
// machine should be passed in. Once the transaction has been sufficiently
4386
// confirmed, the channel will be marked as fully closed within the database,
4387
// and any clients will be notified of updates to the closing state.
4388
func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
7✔
4389
        closeReq := chanCloser.CloseRequest()
7✔
4390

7✔
4391
        // First, we'll clear all indexes related to the channel in question.
7✔
4392
        chanPoint := chanCloser.Channel().ChannelPoint()
7✔
4393
        p.WipeChannel(&chanPoint)
7✔
4394

7✔
4395
        // Also clear the activeChanCloses map of this channel.
7✔
4396
        cid := lnwire.NewChanIDFromOutPoint(chanPoint)
7✔
4397
        p.activeChanCloses.Delete(cid) // TODO(roasbeef): existing race
7✔
4398

7✔
4399
        // Next, we'll launch a goroutine which will request to be notified by
7✔
4400
        // the ChainNotifier once the closure transaction obtains a single
7✔
4401
        // confirmation.
7✔
4402
        notifier := p.cfg.ChainNotifier
7✔
4403

7✔
4404
        // If any error happens during waitForChanToClose, forward it to
7✔
4405
        // closeReq. If this channel closure is not locally initiated, closeReq
7✔
4406
        // will be nil, so just ignore the error.
7✔
4407
        errChan := make(chan error, 1)
7✔
4408
        if closeReq != nil {
12✔
4409
                errChan = closeReq.Err
5✔
4410
        }
5✔
4411

4412
        closingTx, err := chanCloser.ClosingTx()
7✔
4413
        if err != nil {
7✔
4414
                if closeReq != nil {
×
4415
                        p.log.Error(err)
×
4416
                        closeReq.Err <- err
×
4417
                }
×
4418
        }
4419

4420
        closingTxid := closingTx.TxHash()
7✔
4421

7✔
4422
        // If this is a locally requested shutdown, update the caller with a
7✔
4423
        // new event detailing the current pending state of this request.
7✔
4424
        if closeReq != nil {
12✔
4425
                closeReq.Updates <- &PendingUpdate{
5✔
4426
                        Txid: closingTxid[:],
5✔
4427
                }
5✔
4428
        }
5✔
4429

4430
        localOut := chanCloser.LocalCloseOutput()
7✔
4431
        remoteOut := chanCloser.RemoteCloseOutput()
7✔
4432
        auxOut := chanCloser.AuxOutputs()
7✔
4433
        go WaitForChanToClose(
7✔
4434
                chanCloser.NegotiationHeight(), notifier, errChan,
7✔
4435
                &chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() {
14✔
4436
                        // Respond to the local subsystem which requested the
7✔
4437
                        // channel closure.
7✔
4438
                        if closeReq != nil {
12✔
4439
                                closeReq.Updates <- &ChannelCloseUpdate{
5✔
4440
                                        ClosingTxid:       closingTxid[:],
5✔
4441
                                        Success:           true,
5✔
4442
                                        LocalCloseOutput:  localOut,
5✔
4443
                                        RemoteCloseOutput: remoteOut,
5✔
4444
                                        AuxOutputs:        auxOut,
5✔
4445
                                }
5✔
4446
                        }
5✔
4447
                },
4448
        )
4449
}
4450

4451
// WaitForChanToClose uses the passed notifier to wait until the channel has
4452
// been detected as closed on chain and then concludes by executing the
4453
// following actions: the channel point will be sent over the settleChan, and
4454
// finally the callback will be executed. If any error is encountered within
4455
// the function, then it will be sent over the errChan.
4456
func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
4457
        errChan chan error, chanPoint *wire.OutPoint,
4458
        closingTxID *chainhash.Hash, closeScript []byte, cb func()) {
7✔
4459

7✔
4460
        peerLog.Infof("Waiting for confirmation of close of ChannelPoint(%v) "+
7✔
4461
                "with txid: %v", chanPoint, closingTxID)
7✔
4462

7✔
4463
        // TODO(roasbeef): add param for num needed confs
7✔
4464
        confNtfn, err := notifier.RegisterConfirmationsNtfn(
7✔
4465
                closingTxID, closeScript, 1, bestHeight,
7✔
4466
        )
7✔
4467
        if err != nil {
7✔
4468
                if errChan != nil {
×
4469
                        errChan <- err
×
4470
                }
×
4471
                return
×
4472
        }
4473

4474
        // In the case that the ChainNotifier is shutting down, all subscriber
4475
        // notification channels will be closed, generating a nil receive.
4476
        height, ok := <-confNtfn.Confirmed
7✔
4477
        if !ok {
10✔
4478
                return
3✔
4479
        }
3✔
4480

4481
        // The channel has been closed, remove it from any active indexes, and
4482
        // the database state.
4483
        peerLog.Infof("ChannelPoint(%v) is now closed at "+
7✔
4484
                "height %v", chanPoint, height.BlockHeight)
7✔
4485

7✔
4486
        // Finally, execute the closure call back to mark the confirmation of
7✔
4487
        // the transaction closing the contract.
7✔
4488
        cb()
7✔
4489
}
4490

4491
// WipeChannel removes the passed channel point from all indexes associated with
4492
// the peer and the switch.
4493
func (p *Brontide) WipeChannel(chanPoint *wire.OutPoint) {
7✔
4494
        chanID := lnwire.NewChanIDFromOutPoint(*chanPoint)
7✔
4495

7✔
4496
        p.activeChannels.Delete(chanID)
7✔
4497

7✔
4498
        // Instruct the HtlcSwitch to close this link as the channel is no
7✔
4499
        // longer active.
7✔
4500
        p.cfg.Switch.RemoveLink(chanID)
7✔
4501
}
7✔
4502

4503
// handleInitMsg handles the incoming init message which contains global and
4504
// local feature vectors. If feature vectors are incompatible then disconnect.
4505
func (p *Brontide) handleInitMsg(msg *lnwire.Init) error {
6✔
4506
        // First, merge any features from the legacy global features field into
6✔
4507
        // those presented in the local features fields.
6✔
4508
        err := msg.Features.Merge(msg.GlobalFeatures)
6✔
4509
        if err != nil {
6✔
4510
                return fmt.Errorf("unable to merge legacy global features: %w",
×
4511
                        err)
×
4512
        }
×
4513

4514
        // Then, finalize the remote feature vector providing the flattened
4515
        // feature bit namespace.
4516
        p.remoteFeatures = lnwire.NewFeatureVector(
6✔
4517
                msg.Features, lnwire.Features,
6✔
4518
        )
6✔
4519

6✔
4520
        // Now that we have their features loaded, we'll ensure that they
6✔
4521
        // didn't set any required bits that we don't know of.
6✔
4522
        err = feature.ValidateRequired(p.remoteFeatures)
6✔
4523
        if err != nil {
6✔
4524
                return fmt.Errorf("invalid remote features: %w", err)
×
4525
        }
×
4526

4527
        // Ensure the remote party's feature vector contains all transitive
4528
        // dependencies. We know ours are correct since they are validated
4529
        // during the feature manager's instantiation.
4530
        err = feature.ValidateDeps(p.remoteFeatures)
6✔
4531
        if err != nil {
6✔
4532
                return fmt.Errorf("invalid remote features: %w", err)
×
4533
        }
×
4534

4535
        // Now that we know we understand their requirements, we'll check to
4536
        // see if they don't support anything that we deem to be mandatory.
4537
        if !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired) {
6✔
4538
                return fmt.Errorf("data loss protection required")
×
4539
        }
×
4540

4541
        return nil
6✔
4542
}
4543

4544
// LocalFeatures returns the set of global features that has been advertised by
4545
// the local node. This allows sub-systems that use this interface to gate their
4546
// behavior off the set of negotiated feature bits.
4547
//
4548
// NOTE: Part of the lnpeer.Peer interface.
4549
func (p *Brontide) LocalFeatures() *lnwire.FeatureVector {
3✔
4550
        return p.cfg.Features
3✔
4551
}
3✔
4552

4553
// RemoteFeatures returns the set of global features that has been advertised by
4554
// the remote node. This allows sub-systems that use this interface to gate
4555
// their behavior off the set of negotiated feature bits.
4556
//
4557
// NOTE: Part of the lnpeer.Peer interface.
4558
func (p *Brontide) RemoteFeatures() *lnwire.FeatureVector {
23✔
4559
        return p.remoteFeatures
23✔
4560
}
23✔
4561

4562
// hasNegotiatedScidAlias returns true if we've negotiated the
4563
// option-scid-alias feature bit with the peer.
4564
func (p *Brontide) hasNegotiatedScidAlias() bool {
6✔
4565
        peerHas := p.remoteFeatures.HasFeature(lnwire.ScidAliasOptional)
6✔
4566
        localHas := p.cfg.Features.HasFeature(lnwire.ScidAliasOptional)
6✔
4567
        return peerHas && localHas
6✔
4568
}
6✔
4569

4570
// sendInitMsg sends the Init message to the remote peer. This message contains
4571
// our currently supported local and global features.
4572
func (p *Brontide) sendInitMsg(legacyChan bool) error {
10✔
4573
        features := p.cfg.Features.Clone()
10✔
4574
        legacyFeatures := p.cfg.LegacyFeatures.Clone()
10✔
4575

10✔
4576
        // If we have a legacy channel open with a peer, we downgrade static
10✔
4577
        // remote required to optional in case the peer does not understand the
10✔
4578
        // required feature bit. If we do not do this, the peer will reject our
10✔
4579
        // connection because it does not understand a required feature bit, and
10✔
4580
        // our channel will be unusable.
10✔
4581
        if legacyChan && features.RequiresFeature(lnwire.StaticRemoteKeyRequired) {
11✔
4582
                p.log.Infof("Legacy channel open with peer, " +
1✔
4583
                        "downgrading static remote required feature bit to " +
1✔
4584
                        "optional")
1✔
4585

1✔
4586
                // Unset and set in both the local and global features to
1✔
4587
                // ensure both sets are consistent and merge able by old and
1✔
4588
                // new nodes.
1✔
4589
                features.Unset(lnwire.StaticRemoteKeyRequired)
1✔
4590
                legacyFeatures.Unset(lnwire.StaticRemoteKeyRequired)
1✔
4591

1✔
4592
                features.Set(lnwire.StaticRemoteKeyOptional)
1✔
4593
                legacyFeatures.Set(lnwire.StaticRemoteKeyOptional)
1✔
4594
        }
1✔
4595

4596
        msg := lnwire.NewInitMessage(
10✔
4597
                legacyFeatures.RawFeatureVector,
10✔
4598
                features.RawFeatureVector,
10✔
4599
        )
10✔
4600

10✔
4601
        return p.writeMessage(msg)
10✔
4602
}
4603

4604
// resendChanSyncMsg will attempt to find a channel sync message for the closed
4605
// channel and resend it to our peer.
4606
func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error {
3✔
4607
        // If we already re-sent the mssage for this channel, we won't do it
3✔
4608
        // again.
3✔
4609
        if _, ok := p.resentChanSyncMsg[cid]; ok {
4✔
4610
                return nil
1✔
4611
        }
1✔
4612

4613
        // Check if we have any channel sync messages stored for this channel.
4614
        c, err := p.cfg.ChannelDB.FetchClosedChannelForID(cid)
3✔
4615
        if err != nil {
6✔
4616
                return fmt.Errorf("unable to fetch channel sync messages for "+
3✔
4617
                        "peer %v: %v", p, err)
3✔
4618
        }
3✔
4619

4620
        if c.LastChanSyncMsg == nil {
3✔
4621
                return fmt.Errorf("no chan sync message stored for channel %v",
×
4622
                        cid)
×
4623
        }
×
4624

4625
        if !c.RemotePub.IsEqual(p.IdentityKey()) {
3✔
4626
                return fmt.Errorf("ignoring channel reestablish from "+
×
4627
                        "peer=%x", p.IdentityKey().SerializeCompressed())
×
4628
        }
×
4629

4630
        p.log.Debugf("Re-sending channel sync message for channel %v to "+
3✔
4631
                "peer", cid)
3✔
4632

3✔
4633
        if err := p.SendMessage(true, c.LastChanSyncMsg); err != nil {
3✔
4634
                return fmt.Errorf("failed resending channel sync "+
×
4635
                        "message to peer %v: %v", p, err)
×
4636
        }
×
4637

4638
        p.log.Debugf("Re-sent channel sync message for channel %v to peer ",
3✔
4639
                cid)
3✔
4640

3✔
4641
        // Note down that we sent the message, so we won't resend it again for
3✔
4642
        // this connection.
3✔
4643
        p.resentChanSyncMsg[cid] = struct{}{}
3✔
4644

3✔
4645
        return nil
3✔
4646
}
4647

4648
// SendMessage sends a variadic number of high-priority messages to the remote
4649
// peer. The first argument denotes if the method should block until the
4650
// messages have been sent to the remote peer or an error is returned,
4651
// otherwise it returns immediately after queuing.
4652
//
4653
// NOTE: Part of the lnpeer.Peer interface.
4654
func (p *Brontide) SendMessage(sync bool, msgs ...lnwire.Message) error {
6✔
4655
        return p.sendMessage(sync, true, msgs...)
6✔
4656
}
6✔
4657

4658
// SendMessageLazy sends a variadic number of low-priority messages to the
4659
// remote peer. The first argument denotes if the method should block until
4660
// the messages have been sent to the remote peer or an error is returned,
4661
// otherwise it returns immediately after queueing.
4662
//
4663
// NOTE: Part of the lnpeer.Peer interface.
4664
func (p *Brontide) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
4✔
4665
        return p.sendMessage(sync, false, msgs...)
4✔
4666
}
4✔
4667

4668
// sendMessage queues a variadic number of messages using the passed priority
4669
// to the remote peer. If sync is true, this method will block until the
4670
// messages have been sent to the remote peer or an error is returned, otherwise
4671
// it returns immediately after queueing.
4672
func (p *Brontide) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
7✔
4673
        // Add all incoming messages to the outgoing queue. A list of error
7✔
4674
        // chans is populated for each message if the caller requested a sync
7✔
4675
        // send.
7✔
4676
        var errChans []chan error
7✔
4677
        if sync {
11✔
4678
                errChans = make([]chan error, 0, len(msgs))
4✔
4679
        }
4✔
4680
        for _, msg := range msgs {
14✔
4681
                // If a sync send was requested, create an error chan to listen
7✔
4682
                // for an ack from the writeHandler.
7✔
4683
                var errChan chan error
7✔
4684
                if sync {
11✔
4685
                        errChan = make(chan error, 1)
4✔
4686
                        errChans = append(errChans, errChan)
4✔
4687
                }
4✔
4688

4689
                if priority {
13✔
4690
                        p.queueMsg(msg, errChan)
6✔
4691
                } else {
10✔
4692
                        p.queueMsgLazy(msg, errChan)
4✔
4693
                }
4✔
4694
        }
4695

4696
        // Wait for all replies from the writeHandler. For async sends, this
4697
        // will be a NOP as the list of error chans is nil.
4698
        for _, errChan := range errChans {
11✔
4699
                select {
4✔
4700
                case err := <-errChan:
4✔
4701
                        return err
4✔
4702
                case <-p.cg.Done():
×
4703
                        return lnpeer.ErrPeerExiting
×
4704
                case <-p.cfg.Quit:
×
4705
                        return lnpeer.ErrPeerExiting
×
4706
                }
4707
        }
4708

4709
        return nil
6✔
4710
}
4711

4712
// PubKey returns the pubkey of the peer in compressed serialized format.
4713
//
4714
// NOTE: Part of the lnpeer.Peer interface.
4715
func (p *Brontide) PubKey() [33]byte {
5✔
4716
        return p.cfg.PubKeyBytes
5✔
4717
}
5✔
4718

4719
// IdentityKey returns the public key of the remote peer.
4720
//
4721
// NOTE: Part of the lnpeer.Peer interface.
4722
func (p *Brontide) IdentityKey() *btcec.PublicKey {
18✔
4723
        return p.cfg.Addr.IdentityKey
18✔
4724
}
18✔
4725

4726
// Address returns the network address of the remote peer.
4727
//
4728
// NOTE: Part of the lnpeer.Peer interface.
4729
func (p *Brontide) Address() net.Addr {
3✔
4730
        return p.cfg.Addr.Address
3✔
4731
}
3✔
4732

4733
// AddNewChannel adds a new channel to the peer. The channel should fail to be
4734
// added if the cancel channel is closed.
4735
//
4736
// NOTE: Part of the lnpeer.Peer interface.
4737
func (p *Brontide) AddNewChannel(newChan *lnpeer.NewChannel,
4738
        cancel <-chan struct{}) error {
3✔
4739

3✔
4740
        errChan := make(chan error, 1)
3✔
4741
        newChanMsg := &newChannelMsg{
3✔
4742
                channel: newChan,
3✔
4743
                err:     errChan,
3✔
4744
        }
3✔
4745

3✔
4746
        select {
3✔
4747
        case p.newActiveChannel <- newChanMsg:
3✔
4748
        case <-cancel:
×
4749
                return errors.New("canceled adding new channel")
×
4750
        case <-p.cg.Done():
×
4751
                return lnpeer.ErrPeerExiting
×
4752
        }
4753

4754
        // We pause here to wait for the peer to recognize the new channel
4755
        // before we close the channel barrier corresponding to the channel.
4756
        select {
3✔
4757
        case err := <-errChan:
3✔
4758
                return err
3✔
4759
        case <-p.cg.Done():
×
4760
                return lnpeer.ErrPeerExiting
×
4761
        }
4762
}
4763

4764
// AddPendingChannel adds a pending open channel to the peer. The channel
4765
// should fail to be added if the cancel channel is closed.
4766
//
4767
// NOTE: Part of the lnpeer.Peer interface.
4768
func (p *Brontide) AddPendingChannel(cid lnwire.ChannelID,
4769
        cancel <-chan struct{}) error {
3✔
4770

3✔
4771
        errChan := make(chan error, 1)
3✔
4772
        newChanMsg := &newChannelMsg{
3✔
4773
                channelID: cid,
3✔
4774
                err:       errChan,
3✔
4775
        }
3✔
4776

3✔
4777
        select {
3✔
4778
        case p.newPendingChannel <- newChanMsg:
3✔
4779

4780
        case <-cancel:
×
4781
                return errors.New("canceled adding pending channel")
×
4782

4783
        case <-p.cg.Done():
×
4784
                return lnpeer.ErrPeerExiting
×
4785
        }
4786

4787
        // We pause here to wait for the peer to recognize the new pending
4788
        // channel before we close the channel barrier corresponding to the
4789
        // channel.
4790
        select {
3✔
4791
        case err := <-errChan:
3✔
4792
                return err
3✔
4793

4794
        case <-cancel:
×
4795
                return errors.New("canceled adding pending channel")
×
4796

4797
        case <-p.cg.Done():
×
4798
                return lnpeer.ErrPeerExiting
×
4799
        }
4800
}
4801

4802
// RemovePendingChannel removes a pending open channel from the peer.
4803
//
4804
// NOTE: Part of the lnpeer.Peer interface.
4805
func (p *Brontide) RemovePendingChannel(cid lnwire.ChannelID) error {
3✔
4806
        errChan := make(chan error, 1)
3✔
4807
        newChanMsg := &newChannelMsg{
3✔
4808
                channelID: cid,
3✔
4809
                err:       errChan,
3✔
4810
        }
3✔
4811

3✔
4812
        select {
3✔
4813
        case p.removePendingChannel <- newChanMsg:
3✔
4814
        case <-p.cg.Done():
×
4815
                return lnpeer.ErrPeerExiting
×
4816
        }
4817

4818
        // We pause here to wait for the peer to respond to the cancellation of
4819
        // the pending channel before we close the channel barrier
4820
        // corresponding to the channel.
4821
        select {
3✔
4822
        case err := <-errChan:
3✔
4823
                return err
3✔
4824

4825
        case <-p.cg.Done():
×
4826
                return lnpeer.ErrPeerExiting
×
4827
        }
4828
}
4829

4830
// StartTime returns the time at which the connection was established if the
4831
// peer started successfully, and zero otherwise.
4832
func (p *Brontide) StartTime() time.Time {
3✔
4833
        return p.startTime
3✔
4834
}
3✔
4835

4836
// handleCloseMsg is called when a new cooperative channel closure related
4837
// message is received from the remote peer. We'll use this message to advance
4838
// the chan closer state machine.
4839
func (p *Brontide) handleCloseMsg(msg *closeMsg) {
16✔
4840
        link := p.fetchLinkFromKeyAndCid(msg.cid)
16✔
4841

16✔
4842
        // We'll now fetch the matching closing state machine in order to
16✔
4843
        // continue, or finalize the channel closure process.
16✔
4844
        chanCloserE, err := p.fetchActiveChanCloser(msg.cid)
16✔
4845
        if err != nil {
19✔
4846
                // If the channel is not known to us, we'll simply ignore this
3✔
4847
                // message.
3✔
4848
                if err == ErrChannelNotFound {
6✔
4849
                        return
3✔
4850
                }
3✔
4851

4852
                p.log.Errorf("Unable to respond to remote close msg: %v", err)
×
4853

×
4854
                errMsg := &lnwire.Error{
×
4855
                        ChanID: msg.cid,
×
4856
                        Data:   lnwire.ErrorData(err.Error()),
×
4857
                }
×
4858
                p.queueMsg(errMsg, nil)
×
4859
                return
×
4860
        }
4861

4862
        if chanCloserE.IsRight() {
16✔
4863
                // TODO(roasbeef): assert?
×
4864
                return
×
4865
        }
×
4866

4867
        // At this point, we'll only enter this call path if a negotiate chan
4868
        // closer was used. So we'll extract that from the either now.
4869
        //
4870
        // TODO(roabeef): need extra helper func for either to make cleaner
4871
        var chanCloser *chancloser.ChanCloser
16✔
4872
        chanCloserE.WhenLeft(func(c *chancloser.ChanCloser) {
32✔
4873
                chanCloser = c
16✔
4874
        })
16✔
4875

4876
        handleErr := func(err error) {
17✔
4877
                err = fmt.Errorf("unable to process close msg: %w", err)
1✔
4878
                p.log.Error(err)
1✔
4879

1✔
4880
                // As the negotiations failed, we'll reset the channel state
1✔
4881
                // machine to ensure we act to on-chain events as normal.
1✔
4882
                chanCloser.Channel().ResetState()
1✔
4883
                if chanCloser.CloseRequest() != nil {
1✔
4884
                        chanCloser.CloseRequest().Err <- err
×
4885
                }
×
4886

4887
                p.activeChanCloses.Delete(msg.cid)
1✔
4888

1✔
4889
                p.Disconnect(err)
1✔
4890
        }
4891

4892
        // Next, we'll process the next message using the target state machine.
4893
        // We'll either continue negotiation, or halt.
4894
        switch typed := msg.msg.(type) {
16✔
4895
        case *lnwire.Shutdown:
8✔
4896
                // Disable incoming adds immediately.
8✔
4897
                if link != nil && !link.DisableAdds(htlcswitch.Incoming) {
8✔
4898
                        p.log.Warnf("Incoming link adds already disabled: %v",
×
4899
                                link.ChanID())
×
4900
                }
×
4901

4902
                oShutdown, err := chanCloser.ReceiveShutdown(*typed)
8✔
4903
                if err != nil {
8✔
4904
                        handleErr(err)
×
4905
                        return
×
4906
                }
×
4907

4908
                oShutdown.WhenSome(func(msg lnwire.Shutdown) {
14✔
4909
                        // If the link is nil it means we can immediately queue
6✔
4910
                        // the Shutdown message since we don't have to wait for
6✔
4911
                        // commitment transaction synchronization.
6✔
4912
                        if link == nil {
7✔
4913
                                p.queueMsg(&msg, nil)
1✔
4914
                                return
1✔
4915
                        }
1✔
4916

4917
                        // Immediately disallow any new HTLC's from being added
4918
                        // in the outgoing direction.
4919
                        if !link.DisableAdds(htlcswitch.Outgoing) {
5✔
4920
                                p.log.Warnf("Outgoing link adds already "+
×
4921
                                        "disabled: %v", link.ChanID())
×
4922
                        }
×
4923

4924
                        // When we have a Shutdown to send, we defer it till the
4925
                        // next time we send a CommitSig to remain spec
4926
                        // compliant.
4927
                        link.OnCommitOnce(htlcswitch.Outgoing, func() {
10✔
4928
                                p.queueMsg(&msg, nil)
5✔
4929
                        })
5✔
4930
                })
4931

4932
                beginNegotiation := func() {
16✔
4933
                        oClosingSigned, err := chanCloser.BeginNegotiation()
8✔
4934
                        if err != nil {
8✔
4935
                                handleErr(err)
×
4936
                                return
×
4937
                        }
×
4938

4939
                        oClosingSigned.WhenSome(func(msg lnwire.ClosingSigned) {
16✔
4940
                                p.queueMsg(&msg, nil)
8✔
4941
                        })
8✔
4942
                }
4943

4944
                if link == nil {
9✔
4945
                        beginNegotiation()
1✔
4946
                } else {
8✔
4947
                        // Now we register a flush hook to advance the
7✔
4948
                        // ChanCloser and possibly send out a ClosingSigned
7✔
4949
                        // when the link finishes draining.
7✔
4950
                        link.OnFlushedOnce(func() {
14✔
4951
                                // Remove link in goroutine to prevent deadlock.
7✔
4952
                                go p.cfg.Switch.RemoveLink(msg.cid)
7✔
4953
                                beginNegotiation()
7✔
4954
                        })
7✔
4955
                }
4956

4957
        case *lnwire.ClosingSigned:
11✔
4958
                oClosingSigned, err := chanCloser.ReceiveClosingSigned(*typed)
11✔
4959
                if err != nil {
12✔
4960
                        handleErr(err)
1✔
4961
                        return
1✔
4962
                }
1✔
4963

4964
                oClosingSigned.WhenSome(func(msg lnwire.ClosingSigned) {
22✔
4965
                        p.queueMsg(&msg, nil)
11✔
4966
                })
11✔
4967

4968
        default:
×
4969
                panic("impossible closeMsg type")
×
4970
        }
4971

4972
        // If we haven't finished close negotiations, then we'll continue as we
4973
        // can't yet finalize the closure.
4974
        if _, err := chanCloser.ClosingTx(); err != nil {
26✔
4975
                return
11✔
4976
        }
11✔
4977

4978
        // Otherwise, we've agreed on a closing fee! In this case, we'll wrap up
4979
        // the channel closure by notifying relevant sub-systems and launching a
4980
        // goroutine to wait for close tx conf.
4981
        p.finalizeChanClosure(chanCloser)
7✔
4982
}
4983

4984
// HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto
4985
// the channelManager goroutine, which will shut down the link and possibly
4986
// close the channel.
4987
func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) {
3✔
4988
        select {
3✔
4989
        case p.localCloseChanReqs <- req:
3✔
4990
                p.log.Info("Local close channel request is going to be " +
3✔
4991
                        "delivered to the peer")
3✔
4992
        case <-p.cg.Done():
×
4993
                p.log.Info("Unable to deliver local close channel request " +
×
4994
                        "to peer")
×
4995
        }
4996
}
4997

4998
// NetAddress returns the network of the remote peer as an lnwire.NetAddress.
4999
func (p *Brontide) NetAddress() *lnwire.NetAddress {
3✔
5000
        return p.cfg.Addr
3✔
5001
}
3✔
5002

5003
// Inbound is a getter for the Brontide's Inbound boolean in cfg.
5004
func (p *Brontide) Inbound() bool {
3✔
5005
        return p.cfg.Inbound
3✔
5006
}
3✔
5007

5008
// ConnReq is a getter for the Brontide's connReq in cfg.
5009
func (p *Brontide) ConnReq() *connmgr.ConnReq {
3✔
5010
        return p.cfg.ConnReq
3✔
5011
}
3✔
5012

5013
// ErrorBuffer is a getter for the Brontide's errorBuffer in cfg.
5014
func (p *Brontide) ErrorBuffer() *queue.CircularBuffer {
3✔
5015
        return p.cfg.ErrorBuffer
3✔
5016
}
3✔
5017

5018
// SetAddress sets the remote peer's address given an address.
5019
func (p *Brontide) SetAddress(address net.Addr) {
×
5020
        p.cfg.Addr.Address = address
×
5021
}
×
5022

5023
// ActiveSignal returns the peer's active signal.
5024
func (p *Brontide) ActiveSignal() chan struct{} {
3✔
5025
        return p.activeSignal
3✔
5026
}
3✔
5027

5028
// Conn returns a pointer to the peer's connection struct.
5029
func (p *Brontide) Conn() net.Conn {
3✔
5030
        return p.cfg.Conn
3✔
5031
}
3✔
5032

5033
// BytesReceived returns the number of bytes received from the peer.
5034
func (p *Brontide) BytesReceived() uint64 {
3✔
5035
        return atomic.LoadUint64(&p.bytesReceived)
3✔
5036
}
3✔
5037

5038
// BytesSent returns the number of bytes sent to the peer.
5039
func (p *Brontide) BytesSent() uint64 {
3✔
5040
        return atomic.LoadUint64(&p.bytesSent)
3✔
5041
}
3✔
5042

5043
// LastRemotePingPayload returns the last payload the remote party sent as part
5044
// of their ping.
5045
func (p *Brontide) LastRemotePingPayload() []byte {
3✔
5046
        pingPayload := p.lastPingPayload.Load()
3✔
5047
        if pingPayload == nil {
6✔
5048
                return []byte{}
3✔
5049
        }
3✔
5050

5051
        pingBytes, ok := pingPayload.(lnwire.PingPayload)
×
5052
        if !ok {
×
5053
                return nil
×
5054
        }
×
5055

5056
        return pingBytes
×
5057
}
5058

5059
// attachChannelEventSubscription creates a channel event subscription and
5060
// attaches to client to Brontide if the reenableTimeout is no greater than 1
5061
// minute.
5062
func (p *Brontide) attachChannelEventSubscription() error {
6✔
5063
        // If the timeout is greater than 1 minute, it's unlikely that the link
6✔
5064
        // hasn't yet finished its reestablishment. Return a nil without
6✔
5065
        // creating the client to specify that we don't want to retry.
6✔
5066
        if p.cfg.ChanActiveTimeout > 1*time.Minute {
9✔
5067
                return nil
3✔
5068
        }
3✔
5069

5070
        // When the reenable timeout is less than 1 minute, it's likely the
5071
        // channel link hasn't finished its reestablishment yet. In that case,
5072
        // we'll give it a second chance by subscribing to the channel update
5073
        // events. Upon receiving the `ActiveLinkEvent`, we'll then request
5074
        // enabling the channel again.
5075
        sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
6✔
5076
        if err != nil {
6✔
5077
                return fmt.Errorf("SubscribeChannelEvents failed: %w", err)
×
5078
        }
×
5079

5080
        p.channelEventClient = sub
6✔
5081

6✔
5082
        return nil
6✔
5083
}
5084

5085
// updateNextRevocation updates the existing channel's next revocation if it's
5086
// nil.
5087
func (p *Brontide) updateNextRevocation(c *channeldb.OpenChannel) error {
6✔
5088
        chanPoint := c.FundingOutpoint
6✔
5089
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
6✔
5090

6✔
5091
        // Read the current channel.
6✔
5092
        currentChan, loaded := p.activeChannels.Load(chanID)
6✔
5093

6✔
5094
        // currentChan should exist, but we perform a check anyway to avoid nil
6✔
5095
        // pointer dereference.
6✔
5096
        if !loaded {
7✔
5097
                return fmt.Errorf("missing active channel with chanID=%v",
1✔
5098
                        chanID)
1✔
5099
        }
1✔
5100

5101
        // currentChan should not be nil, but we perform a check anyway to
5102
        // avoid nil pointer dereference.
5103
        if currentChan == nil {
6✔
5104
                return fmt.Errorf("found nil active channel with chanID=%v",
1✔
5105
                        chanID)
1✔
5106
        }
1✔
5107

5108
        // If we're being sent a new channel, and our existing channel doesn't
5109
        // have the next revocation, then we need to update the current
5110
        // existing channel.
5111
        if currentChan.RemoteNextRevocation() != nil {
4✔
5112
                return nil
×
5113
        }
×
5114

5115
        p.log.Infof("Processing retransmitted ChannelReady for "+
4✔
5116
                "ChannelPoint(%v)", chanPoint)
4✔
5117

4✔
5118
        nextRevoke := c.RemoteNextRevocation
4✔
5119

4✔
5120
        err := currentChan.InitNextRevocation(nextRevoke)
4✔
5121
        if err != nil {
4✔
5122
                return fmt.Errorf("unable to init next revocation: %w", err)
×
5123
        }
×
5124

5125
        return nil
4✔
5126
}
5127

5128
// addActiveChannel adds a new active channel to the `activeChannels` map. It
5129
// takes a `channeldb.OpenChannel`, creates a `lnwallet.LightningChannel` from
5130
// it and assembles it with a channel link.
5131
func (p *Brontide) addActiveChannel(c *lnpeer.NewChannel) error {
3✔
5132
        chanPoint := c.FundingOutpoint
3✔
5133
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
5134

3✔
5135
        // If we've reached this point, there are two possible scenarios.  If
3✔
5136
        // the channel was in the active channels map as nil, then it was
3✔
5137
        // loaded from disk and we need to send reestablish. Else, it was not
3✔
5138
        // loaded from disk and we don't need to send reestablish as this is a
3✔
5139
        // fresh channel.
3✔
5140
        shouldReestablish := p.isLoadedFromDisk(chanID)
3✔
5141

3✔
5142
        chanOpts := c.ChanOpts
3✔
5143
        if shouldReestablish {
6✔
5144
                // If we have to do the reestablish dance for this channel,
3✔
5145
                // ensure that we don't try to call InitRemoteMusigNonces twice
3✔
5146
                // by calling SkipNonceInit.
3✔
5147
                chanOpts = append(chanOpts, lnwallet.WithSkipNonceInit())
3✔
5148
        }
3✔
5149

5150
        p.cfg.AuxLeafStore.WhenSome(func(s lnwallet.AuxLeafStore) {
3✔
5151
                chanOpts = append(chanOpts, lnwallet.WithLeafStore(s))
×
5152
        })
×
5153
        p.cfg.AuxSigner.WhenSome(func(s lnwallet.AuxSigner) {
3✔
5154
                chanOpts = append(chanOpts, lnwallet.WithAuxSigner(s))
×
5155
        })
×
5156
        p.cfg.AuxResolver.WhenSome(func(s lnwallet.AuxContractResolver) {
3✔
5157
                chanOpts = append(chanOpts, lnwallet.WithAuxResolver(s))
×
5158
        })
×
5159

5160
        // If not already active, we'll add this channel to the set of active
5161
        // channels, so we can look it up later easily according to its channel
5162
        // ID.
5163
        lnChan, err := lnwallet.NewLightningChannel(
3✔
5164
                p.cfg.Signer, c.OpenChannel, p.cfg.SigPool, chanOpts...,
3✔
5165
        )
3✔
5166
        if err != nil {
3✔
5167
                return fmt.Errorf("unable to create LightningChannel: %w", err)
×
5168
        }
×
5169

5170
        // Store the channel in the activeChannels map.
5171
        p.activeChannels.Store(chanID, lnChan)
3✔
5172

3✔
5173
        p.log.Infof("New channel active ChannelPoint(%v) with peer", chanPoint)
3✔
5174

3✔
5175
        // Next, we'll assemble a ChannelLink along with the necessary items it
3✔
5176
        // needs to function.
3✔
5177
        chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(chanPoint)
3✔
5178
        if err != nil {
3✔
5179
                return fmt.Errorf("unable to subscribe to chain events: %w",
×
5180
                        err)
×
5181
        }
×
5182

5183
        // We'll query the channel DB for the new channel's initial forwarding
5184
        // policies to determine the policy we start out with.
5185
        initialPolicy, err := p.cfg.ChannelDB.GetInitialForwardingPolicy(chanID)
3✔
5186
        if err != nil {
3✔
5187
                return fmt.Errorf("unable to query for initial forwarding "+
×
5188
                        "policy: %v", err)
×
5189
        }
×
5190

5191
        // Create the link and add it to the switch.
5192
        err = p.addLink(
3✔
5193
                &chanPoint, lnChan, initialPolicy, chainEvents,
3✔
5194
                shouldReestablish, fn.None[lnwire.Shutdown](),
3✔
5195
        )
3✔
5196
        if err != nil {
3✔
5197
                return fmt.Errorf("can't register new channel link(%v) with "+
×
5198
                        "peer", chanPoint)
×
5199
        }
×
5200

5201
        isTaprootChan := c.ChanType.IsTaproot()
3✔
5202

3✔
5203
        // We're using the old co-op close, so we don't need to init the new RBF
3✔
5204
        // chan closer. If this is a taproot channel, then we'll also fall
3✔
5205
        // through, as we don't support this type yet w/ rbf close.
3✔
5206
        if !p.rbfCoopCloseAllowed() || isTaprootChan {
6✔
5207
                return nil
3✔
5208
        }
3✔
5209

5210
        // Now that the link has been added above, we'll also init an RBF chan
5211
        // closer for this channel, but only if the new close feature is
5212
        // negotiated.
5213
        //
5214
        // Creating this here ensures that any shutdown messages sent will be
5215
        // automatically routed by the msg router.
5216
        if _, err := p.initRbfChanCloser(lnChan); err != nil {
3✔
5217
                p.activeChanCloses.Delete(chanID)
×
5218

×
5219
                return fmt.Errorf("unable to init RBF chan closer for new "+
×
5220
                        "chan: %w", err)
×
5221
        }
×
5222

5223
        return nil
3✔
5224
}
5225

5226
// handleNewActiveChannel handles a `newChannelMsg` request. Depending on we
5227
// know this channel ID or not, we'll either add it to the `activeChannels` map
5228
// or init the next revocation for it.
5229
func (p *Brontide) handleNewActiveChannel(req *newChannelMsg) {
3✔
5230
        newChan := req.channel
3✔
5231
        chanPoint := newChan.FundingOutpoint
3✔
5232
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
5233

3✔
5234
        // Only update RemoteNextRevocation if the channel is in the
3✔
5235
        // activeChannels map and if we added the link to the switch. Only
3✔
5236
        // active channels will be added to the switch.
3✔
5237
        if p.isActiveChannel(chanID) {
6✔
5238
                p.log.Infof("Already have ChannelPoint(%v), ignoring",
3✔
5239
                        chanPoint)
3✔
5240

3✔
5241
                // Handle it and close the err chan on the request.
3✔
5242
                close(req.err)
3✔
5243

3✔
5244
                // Update the next revocation point.
3✔
5245
                err := p.updateNextRevocation(newChan.OpenChannel)
3✔
5246
                if err != nil {
3✔
5247
                        p.log.Errorf(err.Error())
×
5248
                }
×
5249

5250
                return
3✔
5251
        }
5252

5253
        // This is a new channel, we now add it to the map.
5254
        if err := p.addActiveChannel(req.channel); err != nil {
3✔
5255
                // Log and send back the error to the request.
×
5256
                p.log.Errorf(err.Error())
×
5257
                req.err <- err
×
5258

×
5259
                return
×
5260
        }
×
5261

5262
        // Close the err chan if everything went fine.
5263
        close(req.err)
3✔
5264
}
5265

5266
// handleNewPendingChannel takes a `newChannelMsg` request and add it to
5267
// `activeChannels` map with nil value. This pending channel will be saved as
5268
// it may become active in the future. Once active, the funding manager will
5269
// send it again via `AddNewChannel`, and we'd handle the link creation there.
5270
func (p *Brontide) handleNewPendingChannel(req *newChannelMsg) {
7✔
5271
        defer close(req.err)
7✔
5272

7✔
5273
        chanID := req.channelID
7✔
5274

7✔
5275
        // If we already have this channel, something is wrong with the funding
7✔
5276
        // flow as it will only be marked as active after `ChannelReady` is
7✔
5277
        // handled. In this case, we will do nothing but log an error, just in
7✔
5278
        // case this is a legit channel.
7✔
5279
        if p.isActiveChannel(chanID) {
8✔
5280
                p.log.Errorf("Channel(%v) is already active, ignoring "+
1✔
5281
                        "pending channel request", chanID)
1✔
5282

1✔
5283
                return
1✔
5284
        }
1✔
5285

5286
        // The channel has already been added, we will do nothing and return.
5287
        if p.isPendingChannel(chanID) {
7✔
5288
                p.log.Infof("Channel(%v) is already added, ignoring "+
1✔
5289
                        "pending channel request", chanID)
1✔
5290

1✔
5291
                return
1✔
5292
        }
1✔
5293

5294
        // This is a new channel, we now add it to the map `activeChannels`
5295
        // with nil value and mark it as a newly added channel in
5296
        // `addedChannels`.
5297
        p.activeChannels.Store(chanID, nil)
5✔
5298
        p.addedChannels.Store(chanID, struct{}{})
5✔
5299
}
5300

5301
// handleRemovePendingChannel takes a `newChannelMsg` request and removes it
5302
// from `activeChannels` map. The request will be ignored if the channel is
5303
// considered active by Brontide. Noop if the channel ID cannot be found.
5304
func (p *Brontide) handleRemovePendingChannel(req *newChannelMsg) {
7✔
5305
        defer close(req.err)
7✔
5306

7✔
5307
        chanID := req.channelID
7✔
5308

7✔
5309
        // If we already have this channel, something is wrong with the funding
7✔
5310
        // flow as it will only be marked as active after `ChannelReady` is
7✔
5311
        // handled. In this case, we will log an error and exit.
7✔
5312
        if p.isActiveChannel(chanID) {
8✔
5313
                p.log.Errorf("Channel(%v) is active, ignoring remove request",
1✔
5314
                        chanID)
1✔
5315
                return
1✔
5316
        }
1✔
5317

5318
        // The channel has not been added yet, we will log a warning as there
5319
        // is an unexpected call from funding manager.
5320
        if !p.isPendingChannel(chanID) {
10✔
5321
                p.log.Warnf("Channel(%v) not found, removing it anyway", chanID)
4✔
5322
        }
4✔
5323

5324
        // Remove the record of this pending channel.
5325
        p.activeChannels.Delete(chanID)
6✔
5326
        p.addedChannels.Delete(chanID)
6✔
5327
}
5328

5329
// sendLinkUpdateMsg sends a message that updates the channel to the
5330
// channel's message stream.
5331
func (p *Brontide) sendLinkUpdateMsg(cid lnwire.ChannelID, msg lnwire.Message) {
3✔
5332
        p.log.Tracef("Sending link update msg=%v", msg.MsgType())
3✔
5333

3✔
5334
        chanStream, ok := p.activeMsgStreams[cid]
3✔
5335
        if !ok {
6✔
5336
                // If a stream hasn't yet been created, then we'll do so, add
3✔
5337
                // it to the map, and finally start it.
3✔
5338
                chanStream = newChanMsgStream(p, cid)
3✔
5339
                p.activeMsgStreams[cid] = chanStream
3✔
5340
                chanStream.Start()
3✔
5341

3✔
5342
                // Stop the stream when quit.
3✔
5343
                go func() {
6✔
5344
                        <-p.cg.Done()
3✔
5345
                        chanStream.Stop()
3✔
5346
                }()
3✔
5347
        }
5348

5349
        // With the stream obtained, add the message to the stream so we can
5350
        // continue processing message.
5351
        chanStream.AddMsg(msg)
3✔
5352
}
5353

5354
// scaleTimeout multiplies the argument duration by a constant factor depending
5355
// on variious heuristics. Currently this is only used to check whether our peer
5356
// appears to be connected over Tor and relaxes the timout deadline. However,
5357
// this is subject to change and should be treated as opaque.
5358
func (p *Brontide) scaleTimeout(timeout time.Duration) time.Duration {
70✔
5359
        if p.isTorConnection {
73✔
5360
                return timeout * time.Duration(torTimeoutMultiplier)
3✔
5361
        }
3✔
5362

5363
        return timeout
67✔
5364
}
5365

5366
// CoopCloseUpdates is a struct used to communicate updates for an active close
5367
// to the caller.
5368
type CoopCloseUpdates struct {
5369
        UpdateChan chan interface{}
5370

5371
        ErrChan chan error
5372
}
5373

5374
// ChanHasRbfCoopCloser returns true if the channel as identifier by the channel
5375
// point has an active RBF chan closer.
5376
func (p *Brontide) ChanHasRbfCoopCloser(chanPoint wire.OutPoint) bool {
3✔
5377
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
5378
        chanCloser, found := p.activeChanCloses.Load(chanID)
3✔
5379
        if !found {
6✔
5380
                return false
3✔
5381
        }
3✔
5382

5383
        return chanCloser.IsRight()
3✔
5384
}
5385

5386
// TriggerCoopCloseRbfBump given a chan ID, and the params needed to trigger a
5387
// new RBF co-op close update, a bump is attempted. A channel used for updates,
5388
// along with one used to o=communicate any errors is returned. If no chan
5389
// closer is found, then false is returned for the second argument.
5390
func (p *Brontide) TriggerCoopCloseRbfBump(ctx context.Context,
5391
        chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
5392
        deliveryScript lnwire.DeliveryAddress) (*CoopCloseUpdates, error) {
3✔
5393

3✔
5394
        // If RBF coop close isn't permitted, then we'll an error.
3✔
5395
        if !p.rbfCoopCloseAllowed() {
3✔
5396
                return nil, fmt.Errorf("rbf coop close not enabled for " +
×
5397
                        "channel")
×
5398
        }
×
5399

5400
        closeUpdates := &CoopCloseUpdates{
3✔
5401
                UpdateChan: make(chan interface{}, 1),
3✔
5402
                ErrChan:    make(chan error, 1),
3✔
5403
        }
3✔
5404

3✔
5405
        // We'll re-use the existing switch struct here, even though we're
3✔
5406
        // bypassing the switch entirely.
3✔
5407
        closeReq := htlcswitch.ChanClose{
3✔
5408
                CloseType:      contractcourt.CloseRegular,
3✔
5409
                ChanPoint:      &chanPoint,
3✔
5410
                TargetFeePerKw: feeRate,
3✔
5411
                DeliveryScript: deliveryScript,
3✔
5412
                Updates:        closeUpdates.UpdateChan,
3✔
5413
                Err:            closeUpdates.ErrChan,
3✔
5414
                Ctx:            ctx,
3✔
5415
        }
3✔
5416

3✔
5417
        err := p.startRbfChanCloser(newRPCShutdownInit(&closeReq), chanPoint)
3✔
5418
        if err != nil {
3✔
5419
                return nil, err
×
5420
        }
×
5421

5422
        return closeUpdates, nil
3✔
5423
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc