• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14358372723

09 Apr 2025 01:26PM UTC coverage: 56.696% (-12.3%) from 69.037%
14358372723

Pull #9696

github

web-flow
Merge e2837e400 into 867d27d68
Pull Request #9696: Add `development_guidelines.md` for both human and machine

107055 of 188823 relevant lines covered (56.7%)

22721.56 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

31.49
/peer/brontide.go
1
package peer
2

3
import (
4
        "bytes"
5
        "container/list"
6
        "context"
7
        "errors"
8
        "fmt"
9
        "math/rand"
10
        "net"
11
        "strings"
12
        "sync"
13
        "sync/atomic"
14
        "time"
15

16
        "github.com/btcsuite/btcd/btcec/v2"
17
        "github.com/btcsuite/btcd/chaincfg/chainhash"
18
        "github.com/btcsuite/btcd/connmgr"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btclog/v2"
22
        "github.com/davecgh/go-spew/spew"
23
        "github.com/lightningnetwork/lnd/buffer"
24
        "github.com/lightningnetwork/lnd/chainntnfs"
25
        "github.com/lightningnetwork/lnd/channeldb"
26
        "github.com/lightningnetwork/lnd/channelnotifier"
27
        "github.com/lightningnetwork/lnd/contractcourt"
28
        "github.com/lightningnetwork/lnd/discovery"
29
        "github.com/lightningnetwork/lnd/feature"
30
        "github.com/lightningnetwork/lnd/fn/v2"
31
        "github.com/lightningnetwork/lnd/funding"
32
        graphdb "github.com/lightningnetwork/lnd/graph/db"
33
        "github.com/lightningnetwork/lnd/graph/db/models"
34
        "github.com/lightningnetwork/lnd/htlcswitch"
35
        "github.com/lightningnetwork/lnd/htlcswitch/hodl"
36
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
37
        "github.com/lightningnetwork/lnd/input"
38
        "github.com/lightningnetwork/lnd/invoices"
39
        "github.com/lightningnetwork/lnd/keychain"
40
        "github.com/lightningnetwork/lnd/lnpeer"
41
        "github.com/lightningnetwork/lnd/lntypes"
42
        "github.com/lightningnetwork/lnd/lnutils"
43
        "github.com/lightningnetwork/lnd/lnwallet"
44
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
45
        "github.com/lightningnetwork/lnd/lnwallet/chancloser"
46
        "github.com/lightningnetwork/lnd/lnwire"
47
        "github.com/lightningnetwork/lnd/msgmux"
48
        "github.com/lightningnetwork/lnd/netann"
49
        "github.com/lightningnetwork/lnd/pool"
50
        "github.com/lightningnetwork/lnd/protofsm"
51
        "github.com/lightningnetwork/lnd/queue"
52
        "github.com/lightningnetwork/lnd/subscribe"
53
        "github.com/lightningnetwork/lnd/ticker"
54
        "github.com/lightningnetwork/lnd/tlv"
55
        "github.com/lightningnetwork/lnd/watchtower/wtclient"
56
)
57

58
const (
59
        // pingInterval is the interval at which ping messages are sent.
60
        pingInterval = 1 * time.Minute
61

62
        // pingTimeout is the amount of time we will wait for a pong response
63
        // before considering the peer to be unresponsive.
64
        //
65
        // This MUST be a smaller value than the pingInterval.
66
        pingTimeout = 30 * time.Second
67

68
        // idleTimeout is the duration of inactivity before we time out a peer.
69
        idleTimeout = 5 * time.Minute
70

71
        // writeMessageTimeout is the timeout used when writing a message to the
72
        // peer.
73
        writeMessageTimeout = 5 * time.Second
74

75
        // readMessageTimeout is the timeout used when reading a message from a
76
        // peer.
77
        readMessageTimeout = 5 * time.Second
78

79
        // handshakeTimeout is the timeout used when waiting for the peer's init
80
        // message.
81
        handshakeTimeout = 15 * time.Second
82

83
        // ErrorBufferSize is the number of historic peer errors that we store.
84
        ErrorBufferSize = 10
85

86
        // pongSizeCeiling is the upper bound on a uniformly distributed random
87
        // variable that we use for requesting pong responses. We don't use the
88
        // MaxPongBytes (upper bound accepted by the protocol) because it is
89
        // needlessly wasteful of precious Tor bandwidth for little to no gain.
90
        pongSizeCeiling = 4096
91

92
        // torTimeoutMultiplier is the scaling factor we use on network timeouts
93
        // for Tor peers.
94
        torTimeoutMultiplier = 3
95

96
        // msgStreamSize is the size of the message streams.
97
        msgStreamSize = 5
98
)
99

100
var (
101
        // ErrChannelNotFound is an error returned when a channel is queried and
102
        // either the Brontide doesn't know of it, or the channel in question
103
        // is pending.
104
        ErrChannelNotFound = fmt.Errorf("channel not found")
105
)
106

107
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
108
// a buffered channel which will be sent upon once the write is complete. This
109
// buffered channel acts as a semaphore to be used for synchronization purposes.
110
type outgoingMsg struct {
111
        priority bool
112
        msg      lnwire.Message
113
        errChan  chan error // MUST be buffered.
114
}
115

116
// newChannelMsg packages a channeldb.OpenChannel with a channel that allows
117
// the receiver of the request to report when the channel creation process has
118
// completed.
119
type newChannelMsg struct {
120
        // channel is used when the pending channel becomes active.
121
        channel *lnpeer.NewChannel
122

123
        // channelID is used when there's a new pending channel.
124
        channelID lnwire.ChannelID
125

126
        err chan error
127
}
128

129
type customMsg struct {
130
        peer [33]byte
131
        msg  lnwire.Custom
132
}
133

134
// closeMsg is a wrapper struct around any wire messages that deal with the
135
// cooperative channel closure negotiation process. This struct includes the
136
// raw channel ID targeted along with the original message.
137
type closeMsg struct {
138
        cid lnwire.ChannelID
139
        msg lnwire.Message
140
}
141

142
// PendingUpdate describes the pending state of a closing channel.
143
type PendingUpdate struct {
144
        // Txid is the txid of the closing transaction.
145
        Txid []byte
146

147
        // OutputIndex is the output index of our output in the closing
148
        // transaction.
149
        OutputIndex uint32
150

151
        // FeePerVByte is an optional field, that is set only when the new RBF
152
        // coop close flow is used. This indicates the new closing fee rate on
153
        // the closing transaction.
154
        FeePerVbyte fn.Option[chainfee.SatPerVByte]
155

156
        // IsLocalCloseTx is an optional field that indicates if this update is
157
        // sent for our local close txn, or the close txn of the remote party.
158
        // This is only set if the new RBF coop close flow is used.
159
        IsLocalCloseTx fn.Option[bool]
160
}
161

162
// ChannelCloseUpdate contains the outcome of the close channel operation.
163
type ChannelCloseUpdate struct {
164
        ClosingTxid []byte
165
        Success     bool
166

167
        // LocalCloseOutput is an optional, additional output on the closing
168
        // transaction that the local party should be paid to. This will only be
169
        // populated if the local balance isn't dust.
170
        LocalCloseOutput fn.Option[chancloser.CloseOutput]
171

172
        // RemoteCloseOutput is an optional, additional output on the closing
173
        // transaction that the remote party should be paid to. This will only
174
        // be populated if the remote balance isn't dust.
175
        RemoteCloseOutput fn.Option[chancloser.CloseOutput]
176

177
        // AuxOutputs is an optional set of additional outputs that might be
178
        // included in the closing transaction. These are used for custom
179
        // channel types.
180
        AuxOutputs fn.Option[chancloser.AuxCloseOutputs]
181
}
182

183
// TimestampedError is a timestamped error that is used to store the most recent
184
// errors we have experienced with our peers.
185
type TimestampedError struct {
186
        Error     error
187
        Timestamp time.Time
188
}
189

190
// Config defines configuration fields that are necessary for a peer object
191
// to function.
192
type Config struct {
193
        // Conn is the underlying network connection for this peer.
194
        Conn MessageConn
195

196
        // ConnReq stores information related to the persistent connection request
197
        // for this peer.
198
        ConnReq *connmgr.ConnReq
199

200
        // PubKeyBytes is the serialized, compressed public key of this peer.
201
        PubKeyBytes [33]byte
202

203
        // Addr is the network address of the peer.
204
        Addr *lnwire.NetAddress
205

206
        // Inbound indicates whether or not the peer is an inbound peer.
207
        Inbound bool
208

209
        // Features is the set of features that we advertise to the remote party.
210
        Features *lnwire.FeatureVector
211

212
        // LegacyFeatures is the set of features that we advertise to the remote
213
        // peer for backwards compatibility. Nodes that have not implemented
214
        // flat features will still be able to read our feature bits from the
215
        // legacy global field, but we will also advertise everything in the
216
        // default features field.
217
        LegacyFeatures *lnwire.FeatureVector
218

219
        // OutgoingCltvRejectDelta defines the number of blocks before expiry of
220
        // an htlc where we don't offer it anymore.
221
        OutgoingCltvRejectDelta uint32
222

223
        // ChanActiveTimeout specifies the duration the peer will wait to request
224
        // a channel reenable, beginning from the time the peer was started.
225
        ChanActiveTimeout time.Duration
226

227
        // ErrorBuffer stores a set of errors related to a peer. It contains error
228
        // messages that our peer has recently sent us over the wire and records of
229
        // unknown messages that were sent to us so that we can have a full track
230
        // record of the communication errors we have had with our peer. If we
231
        // choose to disconnect from a peer, it also stores the reason we had for
232
        // disconnecting.
233
        ErrorBuffer *queue.CircularBuffer
234

235
        // WritePool is the task pool that manages reuse of write buffers. Write
236
        // tasks are submitted to the pool in order to conserve the total number of
237
        // write buffers allocated at any one time, and decouple write buffer
238
        // allocation from the peer life cycle.
239
        WritePool *pool.Write
240

241
        // ReadPool is the task pool that manages reuse of read buffers.
242
        ReadPool *pool.Read
243

244
        // Switch is a pointer to the htlcswitch. It is used to setup, get, and
245
        // tear-down ChannelLinks.
246
        Switch messageSwitch
247

248
        // InterceptSwitch is a pointer to the InterceptableSwitch, a wrapper around
249
        // the regular Switch. We only export it here to pass ForwardPackets to the
250
        // ChannelLinkConfig.
251
        InterceptSwitch *htlcswitch.InterceptableSwitch
252

253
        // ChannelDB is used to fetch opened channels, and closed channels.
254
        ChannelDB *channeldb.ChannelStateDB
255

256
        // ChannelGraph is a pointer to the channel graph which is used to
257
        // query information about the set of known active channels.
258
        ChannelGraph *graphdb.ChannelGraph
259

260
        // ChainArb is used to subscribe to channel events, update contract signals,
261
        // and force close channels.
262
        ChainArb *contractcourt.ChainArbitrator
263

264
        // AuthGossiper is needed so that the Brontide impl can register with the
265
        // gossiper and process remote channel announcements.
266
        AuthGossiper *discovery.AuthenticatedGossiper
267

268
        // ChanStatusMgr is used to set or un-set the disabled bit in channel
269
        // updates.
270
        ChanStatusMgr *netann.ChanStatusManager
271

272
        // ChainIO is used to retrieve the best block.
273
        ChainIO lnwallet.BlockChainIO
274

275
        // FeeEstimator is used to compute our target ideal fee-per-kw when
276
        // initializing the coop close process.
277
        FeeEstimator chainfee.Estimator
278

279
        // Signer is used when creating *lnwallet.LightningChannel instances.
280
        Signer input.Signer
281

282
        // SigPool is used when creating *lnwallet.LightningChannel instances.
283
        SigPool *lnwallet.SigPool
284

285
        // Wallet is used to publish transactions and generates delivery
286
        // scripts during the coop close process.
287
        Wallet *lnwallet.LightningWallet
288

289
        // ChainNotifier is used to receive confirmations of a coop close
290
        // transaction.
291
        ChainNotifier chainntnfs.ChainNotifier
292

293
        // BestBlockView is used to efficiently query for up-to-date
294
        // blockchain state information
295
        BestBlockView chainntnfs.BestBlockView
296

297
        // RoutingPolicy is used to set the forwarding policy for links created by
298
        // the Brontide.
299
        RoutingPolicy models.ForwardingPolicy
300

301
        // Sphinx is used when setting up ChannelLinks so they can decode sphinx
302
        // onion blobs.
303
        Sphinx *hop.OnionProcessor
304

305
        // WitnessBeacon is used when setting up ChannelLinks so they can add any
306
        // preimages that they learn.
307
        WitnessBeacon contractcourt.WitnessBeacon
308

309
        // Invoices is passed to the ChannelLink on creation and handles all
310
        // invoice-related logic.
311
        Invoices *invoices.InvoiceRegistry
312

313
        // ChannelNotifier is used by the link to notify other sub-systems about
314
        // channel-related events and by the Brontide to subscribe to
315
        // ActiveLinkEvents.
316
        ChannelNotifier *channelnotifier.ChannelNotifier
317

318
        // HtlcNotifier is used when creating a ChannelLink.
319
        HtlcNotifier *htlcswitch.HtlcNotifier
320

321
        // TowerClient is used to backup revoked states.
322
        TowerClient wtclient.ClientManager
323

324
        // DisconnectPeer is used to disconnect this peer if the cooperative close
325
        // process fails.
326
        DisconnectPeer func(*btcec.PublicKey) error
327

328
        // GenNodeAnnouncement is used to send our node announcement to the remote
329
        // on startup.
330
        GenNodeAnnouncement func(...netann.NodeAnnModifier) (
331
                lnwire.NodeAnnouncement, error)
332

333
        // PrunePersistentPeerConnection is used to remove all internal state
334
        // related to this peer in the server.
335
        PrunePersistentPeerConnection func([33]byte)
336

337
        // FetchLastChanUpdate fetches our latest channel update for a target
338
        // channel.
339
        FetchLastChanUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate1,
340
                error)
341

342
        // FundingManager is an implementation of the funding.Controller interface.
343
        FundingManager funding.Controller
344

345
        // Hodl is used when creating ChannelLinks to specify HodlFlags as
346
        // breakpoints in dev builds.
347
        Hodl *hodl.Config
348

349
        // UnsafeReplay is used when creating ChannelLinks to specify whether or
350
        // not to replay adds on its commitment tx.
351
        UnsafeReplay bool
352

353
        // MaxOutgoingCltvExpiry is used when creating ChannelLinks and is the max
354
        // number of blocks that funds could be locked up for when forwarding
355
        // payments.
356
        MaxOutgoingCltvExpiry uint32
357

358
        // MaxChannelFeeAllocation is used when creating ChannelLinks and is the
359
        // maximum percentage of total funds that can be allocated to a channel's
360
        // commitment fee. This only applies for the initiator of the channel.
361
        MaxChannelFeeAllocation float64
362

363
        // MaxAnchorsCommitFeeRate is the maximum fee rate we'll use as an
364
        // initiator for anchor channel commitments.
365
        MaxAnchorsCommitFeeRate chainfee.SatPerKWeight
366

367
        // CoopCloseTargetConfs is the confirmation target that will be used
368
        // to estimate the fee rate to use during a cooperative channel
369
        // closure initiated by the remote peer.
370
        CoopCloseTargetConfs uint32
371

372
        // ServerPubKey is the serialized, compressed public key of our lnd node.
373
        // It is used to determine which policy (channel edge) to pass to the
374
        // ChannelLink.
375
        ServerPubKey [33]byte
376

377
        // ChannelCommitInterval is the maximum time that is allowed to pass between
378
        // receiving a channel state update and signing the next commitment.
379
        // Setting this to a longer duration allows for more efficient channel
380
        // operations at the cost of latency.
381
        ChannelCommitInterval time.Duration
382

383
        // PendingCommitInterval is the maximum time that is allowed to pass
384
        // while waiting for the remote party to revoke a locally initiated
385
        // commitment state. Setting this to a longer duration if a slow
386
        // response is expected from the remote party or large number of
387
        // payments are attempted at the same time.
388
        PendingCommitInterval time.Duration
389

390
        // ChannelCommitBatchSize is the maximum number of channel state updates
391
        // that is accumulated before signing a new commitment.
392
        ChannelCommitBatchSize uint32
393

394
        // HandleCustomMessage is called whenever a custom message is received
395
        // from the peer.
396
        HandleCustomMessage func(peer [33]byte, msg *lnwire.Custom) error
397

398
        // GetAliases is passed to created links so the Switch and link can be
399
        // aware of the channel's aliases.
400
        GetAliases func(base lnwire.ShortChannelID) []lnwire.ShortChannelID
401

402
        // RequestAlias allows the Brontide struct to request an alias to send
403
        // to the peer.
404
        RequestAlias func() (lnwire.ShortChannelID, error)
405

406
        // AddLocalAlias persists an alias to an underlying alias store.
407
        AddLocalAlias func(alias, base lnwire.ShortChannelID,
408
                gossip, liveUpdate bool) error
409

410
        // AuxLeafStore is an optional store that can be used to store auxiliary
411
        // leaves for certain custom channel types.
412
        AuxLeafStore fn.Option[lnwallet.AuxLeafStore]
413

414
        // AuxSigner is an optional signer that can be used to sign auxiliary
415
        // leaves for certain custom channel types.
416
        AuxSigner fn.Option[lnwallet.AuxSigner]
417

418
        // AuxResolver is an optional interface that can be used to modify the
419
        // way contracts are resolved.
420
        AuxResolver fn.Option[lnwallet.AuxContractResolver]
421

422
        // AuxTrafficShaper is an optional auxiliary traffic shaper that can be
423
        // used to manage the bandwidth of peer links.
424
        AuxTrafficShaper fn.Option[htlcswitch.AuxTrafficShaper]
425

426
        // PongBuf is a slice we'll reuse instead of allocating memory on the
427
        // heap. Since only reads will occur and no writes, there is no need
428
        // for any synchronization primitives. As a result, it's safe to share
429
        // this across multiple Peer struct instances.
430
        PongBuf []byte
431

432
        // Adds the option to disable forwarding payments in blinded routes
433
        // by failing back any blinding-related payloads as if they were
434
        // invalid.
435
        DisallowRouteBlinding bool
436

437
        // DisallowQuiescence is a flag that indicates whether the Brontide
438
        // should have the quiescence feature disabled.
439
        DisallowQuiescence bool
440

441
        // MaxFeeExposure limits the number of outstanding fees in a channel.
442
        // This value will be passed to created links.
443
        MaxFeeExposure lnwire.MilliSatoshi
444

445
        // MsgRouter is an optional instance of the main message router that
446
        // the peer will use. If None, then a new default version will be used
447
        // in place.
448
        MsgRouter fn.Option[msgmux.Router]
449

450
        // AuxChanCloser is an optional instance of an abstraction that can be
451
        // used to modify the way the co-op close transaction is constructed.
452
        AuxChanCloser fn.Option[chancloser.AuxChanCloser]
453

454
        // ShouldFwdExpEndorsement is a closure that indicates whether
455
        // experimental endorsement signals should be set.
456
        ShouldFwdExpEndorsement func() bool
457

458
        // Quit is the server's quit channel. If this is closed, we halt operation.
459
        Quit chan struct{}
460
}
461

462
// chanCloserFsm is a union-like type that can hold the two versions of co-op
463
// close we support: negotiation, and RBF based.
464
//
465
// TODO(roasbeef): rename to chancloser.Negotiator and chancloser.RBF?
466
type chanCloserFsm = fn.Either[*chancloser.ChanCloser, *chancloser.RbfChanCloser] //nolint:ll
467

468
// makeNegotiateCloser creates a new negotiate closer from a
469
// chancloser.ChanCloser.
470
func makeNegotiateCloser(chanCloser *chancloser.ChanCloser) chanCloserFsm {
9✔
471
        return fn.NewLeft[*chancloser.ChanCloser, *chancloser.RbfChanCloser](
9✔
472
                chanCloser,
9✔
473
        )
9✔
474
}
9✔
475

476
// makeRbfCloser creates a new RBF closer from a chancloser.RbfChanCloser.
477
func makeRbfCloser(rbfCloser *chancloser.RbfChanCloser) chanCloserFsm {
×
478
        return fn.NewRight[*chancloser.ChanCloser](
×
479
                rbfCloser,
×
480
        )
×
481
}
×
482

483
// Brontide is an active peer on the Lightning Network. This struct is responsible
484
// for managing any channel state related to this peer. To do so, it has
485
// several helper goroutines to handle events such as HTLC timeouts, new
486
// funding workflow, and detecting an uncooperative closure of any active
487
// channels.
488
type Brontide struct {
489
        // MUST be used atomically.
490
        started    int32
491
        disconnect int32
492

493
        // MUST be used atomically.
494
        bytesReceived uint64
495
        bytesSent     uint64
496

497
        // isTorConnection is a flag that indicates whether or not we believe
498
        // the remote peer is a tor connection. It is not always possible to
499
        // know this with certainty but we have heuristics we use that should
500
        // catch most cases.
501
        //
502
        // NOTE: We judge the tor-ness of a connection by if the remote peer has
503
        // ".onion" in the address OR if it's connected over localhost.
504
        // This will miss cases where our peer is connected to our clearnet
505
        // address over the tor network (via exit nodes). It will also misjudge
506
        // actual localhost connections as tor. We need to include this because
507
        // inbound connections to our tor address will appear to come from the
508
        // local socks5 proxy. This heuristic is only used to expand the timeout
509
        // window for peers so it is OK to misjudge this. If you use this field
510
        // for any other purpose you should seriously consider whether or not
511
        // this heuristic is good enough for your use case.
512
        isTorConnection bool
513

514
        pingManager *PingManager
515

516
        // lastPingPayload stores an unsafe pointer wrapped as an atomic
517
        // variable which points to the last payload the remote party sent us
518
        // as their ping.
519
        //
520
        // MUST be used atomically.
521
        lastPingPayload atomic.Value
522

523
        cfg Config
524

525
        // activeSignal when closed signals that the peer is now active and
526
        // ready to process messages.
527
        activeSignal chan struct{}
528

529
        // startTime is the time this peer connection was successfully established.
530
        // It will be zero for peers that did not successfully call Start().
531
        startTime time.Time
532

533
        // sendQueue is the channel which is used to queue outgoing messages to be
534
        // written onto the wire. Note that this channel is unbuffered.
535
        sendQueue chan outgoingMsg
536

537
        // outgoingQueue is a buffered channel which allows second/third party
538
        // objects to queue messages to be sent out on the wire.
539
        outgoingQueue chan outgoingMsg
540

541
        // activeChannels is a map which stores the state machines of all
542
        // active channels. Channels are indexed into the map by the txid of
543
        // the funding transaction which opened the channel.
544
        //
545
        // NOTE: On startup, pending channels are stored as nil in this map.
546
        // Confirmed channels have channel data populated in the map. This means
547
        // that accesses to this map should nil-check the LightningChannel to
548
        // see if this is a pending channel or not. The tradeoff here is either
549
        // having two maps everywhere (one for pending, one for confirmed chans)
550
        // or having an extra nil-check per access.
551
        activeChannels *lnutils.SyncMap[
552
                lnwire.ChannelID, *lnwallet.LightningChannel]
553

554
        // addedChannels tracks any new channels opened during this peer's
555
        // lifecycle. We use this to filter out these new channels when the time
556
        // comes to request a reenable for active channels, since they will have
557
        // waited a shorter duration.
558
        addedChannels *lnutils.SyncMap[lnwire.ChannelID, struct{}]
559

560
        // newActiveChannel is used by the fundingManager to send fully opened
561
        // channels to the source peer which handled the funding workflow.
562
        newActiveChannel chan *newChannelMsg
563

564
        // newPendingChannel is used by the fundingManager to send pending open
565
        // channels to the source peer which handled the funding workflow.
566
        newPendingChannel chan *newChannelMsg
567

568
        // removePendingChannel is used by the fundingManager to cancel pending
569
        // open channels to the source peer when the funding flow is failed.
570
        removePendingChannel chan *newChannelMsg
571

572
        // activeMsgStreams is a map from channel id to the channel streams that
573
        // proxy messages to individual, active links.
574
        activeMsgStreams map[lnwire.ChannelID]*msgStream
575

576
        // activeChanCloses is a map that keeps track of all the active
577
        // cooperative channel closures. Any channel closing messages are directed
578
        // to one of these active state machines. Once the channel has been closed,
579
        // the state machine will be deleted from the map.
580
        activeChanCloses *lnutils.SyncMap[lnwire.ChannelID, chanCloserFsm]
581

582
        // localCloseChanReqs is a channel in which any local requests to close
583
        // a particular channel are sent over.
584
        localCloseChanReqs chan *htlcswitch.ChanClose
585

586
        // linkFailures receives all reported channel failures from the switch,
587
        // and instructs the channelManager to clean remaining channel state.
588
        linkFailures chan linkFailureReport
589

590
        // chanCloseMsgs is a channel that any message related to channel
591
        // closures are sent over. This includes lnwire.Shutdown message as
592
        // well as lnwire.ClosingSigned messages.
593
        chanCloseMsgs chan *closeMsg
594

595
        // remoteFeatures is the feature vector received from the peer during
596
        // the connection handshake.
597
        remoteFeatures *lnwire.FeatureVector
598

599
        // resentChanSyncMsg is a set that keeps track of which channels we
600
        // have re-sent channel reestablishment messages for. This is done to
601
        // avoid getting into loop where both peers will respond to the other
602
        // peer's chansync message with its own over and over again.
603
        resentChanSyncMsg map[lnwire.ChannelID]struct{}
604

605
        // channelEventClient is the channel event subscription client that's
606
        // used to assist retry enabling the channels. This client is only
607
        // created when the reenableTimeout is no greater than 1 minute. Once
608
        // created, it is canceled once the reenabling has been finished.
609
        //
610
        // NOTE: we choose to create the client conditionally to avoid
611
        // potentially holding lots of un-consumed events.
612
        channelEventClient *subscribe.Client
613

614
        // msgRouter is an instance of the msgmux.Router which is used to send
615
        // off new wire messages for handing.
616
        msgRouter fn.Option[msgmux.Router]
617

618
        // globalMsgRouter is a flag that indicates whether we have a global
619
        // msg router. If so, then we don't worry about stopping the msg router
620
        // when a peer disconnects.
621
        globalMsgRouter bool
622

623
        startReady chan struct{}
624

625
        // cg is a helper that encapsulates a wait group and quit channel and
626
        // allows contexts that either block or cancel on those depending on
627
        // the use case.
628
        cg *fn.ContextGuard
629

630
        // log is a peer-specific logging instance.
631
        log btclog.Logger
632
}
633

634
// A compile-time check to ensure that Brontide satisfies the lnpeer.Peer
635
// interface.
636
var _ lnpeer.Peer = (*Brontide)(nil)
637

638
// NewBrontide creates a new Brontide from a peer.Config struct.
639
func NewBrontide(cfg Config) *Brontide {
25✔
640
        logPrefix := fmt.Sprintf("Peer(%x):", cfg.PubKeyBytes)
25✔
641

25✔
642
        // We have a global message router if one was passed in via the config.
25✔
643
        // In this case, we don't need to attempt to tear it down when the peer
25✔
644
        // is stopped.
25✔
645
        globalMsgRouter := cfg.MsgRouter.IsSome()
25✔
646

25✔
647
        // We'll either use the msg router instance passed in, or create a new
25✔
648
        // blank instance.
25✔
649
        msgRouter := cfg.MsgRouter.Alt(fn.Some[msgmux.Router](
25✔
650
                msgmux.NewMultiMsgRouter(),
25✔
651
        ))
25✔
652

25✔
653
        p := &Brontide{
25✔
654
                cfg:           cfg,
25✔
655
                activeSignal:  make(chan struct{}),
25✔
656
                sendQueue:     make(chan outgoingMsg),
25✔
657
                outgoingQueue: make(chan outgoingMsg),
25✔
658
                addedChannels: &lnutils.SyncMap[lnwire.ChannelID, struct{}]{},
25✔
659
                activeChannels: &lnutils.SyncMap[
25✔
660
                        lnwire.ChannelID, *lnwallet.LightningChannel,
25✔
661
                ]{},
25✔
662
                newActiveChannel:     make(chan *newChannelMsg, 1),
25✔
663
                newPendingChannel:    make(chan *newChannelMsg, 1),
25✔
664
                removePendingChannel: make(chan *newChannelMsg),
25✔
665

25✔
666
                activeMsgStreams: make(map[lnwire.ChannelID]*msgStream),
25✔
667
                activeChanCloses: &lnutils.SyncMap[
25✔
668
                        lnwire.ChannelID, chanCloserFsm,
25✔
669
                ]{},
25✔
670
                localCloseChanReqs: make(chan *htlcswitch.ChanClose),
25✔
671
                linkFailures:       make(chan linkFailureReport),
25✔
672
                chanCloseMsgs:      make(chan *closeMsg),
25✔
673
                resentChanSyncMsg:  make(map[lnwire.ChannelID]struct{}),
25✔
674
                startReady:         make(chan struct{}),
25✔
675
                log:                peerLog.WithPrefix(logPrefix),
25✔
676
                msgRouter:          msgRouter,
25✔
677
                globalMsgRouter:    globalMsgRouter,
25✔
678
                cg:                 fn.NewContextGuard(),
25✔
679
        }
25✔
680

25✔
681
        if cfg.Conn != nil && cfg.Conn.RemoteAddr() != nil {
25✔
682
                remoteAddr := cfg.Conn.RemoteAddr().String()
×
683
                p.isTorConnection = strings.Contains(remoteAddr, ".onion") ||
×
684
                        strings.Contains(remoteAddr, "127.0.0.1")
×
685
        }
×
686

687
        var (
25✔
688
                lastBlockHeader           *wire.BlockHeader
25✔
689
                lastSerializedBlockHeader [wire.MaxBlockHeaderPayload]byte
25✔
690
        )
25✔
691
        newPingPayload := func() []byte {
25✔
692
                // We query the BestBlockHeader from our BestBlockView each time
×
693
                // this is called, and update our serialized block header if
×
694
                // they differ.  Over time, we'll use this to disseminate the
×
695
                // latest block header between all our peers, which can later be
×
696
                // used to cross-check our own view of the network to mitigate
×
697
                // various types of eclipse attacks.
×
698
                header, err := p.cfg.BestBlockView.BestBlockHeader()
×
699
                if err != nil && header == lastBlockHeader {
×
700
                        return lastSerializedBlockHeader[:]
×
701
                }
×
702

703
                buf := bytes.NewBuffer(lastSerializedBlockHeader[0:0])
×
704
                err = header.Serialize(buf)
×
705
                if err == nil {
×
706
                        lastBlockHeader = header
×
707
                } else {
×
708
                        p.log.Warn("unable to serialize current block" +
×
709
                                "header for ping payload generation." +
×
710
                                "This should be impossible and means" +
×
711
                                "there is an implementation bug.")
×
712
                }
×
713

714
                return lastSerializedBlockHeader[:]
×
715
        }
716

717
        // TODO(roasbeef): make dynamic in order to create fake cover traffic.
718
        //
719
        // NOTE(proofofkeags): this was changed to be dynamic to allow better
720
        // pong identification, however, more thought is needed to make this
721
        // actually usable as a traffic decoy.
722
        randPongSize := func() uint16 {
25✔
723
                return uint16(
×
724
                        // We don't need cryptographic randomness here.
×
725
                        /* #nosec */
×
726
                        rand.Intn(pongSizeCeiling) + 1,
×
727
                )
×
728
        }
×
729

730
        p.pingManager = NewPingManager(&PingManagerConfig{
25✔
731
                NewPingPayload:   newPingPayload,
25✔
732
                NewPongSize:      randPongSize,
25✔
733
                IntervalDuration: p.scaleTimeout(pingInterval),
25✔
734
                TimeoutDuration:  p.scaleTimeout(pingTimeout),
25✔
735
                SendPing: func(ping *lnwire.Ping) {
25✔
736
                        p.queueMsg(ping, nil)
×
737
                },
×
738
                OnPongFailure: func(err error) {
×
739
                        eStr := "pong response failure for %s: %v " +
×
740
                                "-- disconnecting"
×
741
                        p.log.Warnf(eStr, p, err)
×
742
                        go p.Disconnect(fmt.Errorf(eStr, p, err))
×
743
                },
×
744
        })
745

746
        return p
25✔
747
}
748

749
// Start starts all helper goroutines the peer needs for normal operations.  In
750
// the case this peer has already been started, then this function is a noop.
751
func (p *Brontide) Start() error {
3✔
752
        if atomic.AddInt32(&p.started, 1) != 1 {
3✔
753
                return nil
×
754
        }
×
755

756
        // Once we've finished starting up the peer, we'll signal to other
757
        // goroutines that the they can move forward to tear down the peer, or
758
        // carry out other relevant changes.
759
        defer close(p.startReady)
3✔
760

3✔
761
        p.log.Tracef("starting with conn[%v->%v]",
3✔
762
                p.cfg.Conn.LocalAddr(), p.cfg.Conn.RemoteAddr())
3✔
763

3✔
764
        // Fetch and then load all the active channels we have with this remote
3✔
765
        // peer from the database.
3✔
766
        activeChans, err := p.cfg.ChannelDB.FetchOpenChannels(
3✔
767
                p.cfg.Addr.IdentityKey,
3✔
768
        )
3✔
769
        if err != nil {
3✔
770
                p.log.Errorf("Unable to fetch active chans "+
×
771
                        "for peer: %v", err)
×
772
                return err
×
773
        }
×
774

775
        if len(activeChans) == 0 {
4✔
776
                go p.cfg.PrunePersistentPeerConnection(p.cfg.PubKeyBytes)
1✔
777
        }
1✔
778

779
        // Quickly check if we have any existing legacy channels with this
780
        // peer.
781
        haveLegacyChan := false
3✔
782
        for _, c := range activeChans {
5✔
783
                if c.ChanType.IsTweakless() {
4✔
784
                        continue
2✔
785
                }
786

787
                haveLegacyChan = true
×
788
                break
×
789
        }
790

791
        // Exchange local and global features, the init message should be very
792
        // first between two nodes.
793
        if err := p.sendInitMsg(haveLegacyChan); err != nil {
3✔
794
                return fmt.Errorf("unable to send init msg: %w", err)
×
795
        }
×
796

797
        // Before we launch any of the helper goroutines off the peer struct,
798
        // we'll first ensure proper adherence to the p2p protocol. The init
799
        // message MUST be sent before any other message.
800
        readErr := make(chan error, 1)
3✔
801
        msgChan := make(chan lnwire.Message, 1)
3✔
802
        p.cg.WgAdd(1)
3✔
803
        go func() {
6✔
804
                defer p.cg.WgDone()
3✔
805

3✔
806
                msg, err := p.readNextMessage()
3✔
807
                if err != nil {
3✔
808
                        readErr <- err
×
809
                        msgChan <- nil
×
810
                        return
×
811
                }
×
812
                readErr <- nil
3✔
813
                msgChan <- msg
3✔
814
        }()
815

816
        select {
3✔
817
        // In order to avoid blocking indefinitely, we'll give the other peer
818
        // an upper timeout to respond before we bail out early.
819
        case <-time.After(handshakeTimeout):
×
820
                return fmt.Errorf("peer did not complete handshake within %v",
×
821
                        handshakeTimeout)
×
822
        case err := <-readErr:
3✔
823
                if err != nil {
3✔
824
                        return fmt.Errorf("unable to read init msg: %w", err)
×
825
                }
×
826
        }
827

828
        // Once the init message arrives, we can parse it so we can figure out
829
        // the negotiation of features for this session.
830
        msg := <-msgChan
3✔
831
        if msg, ok := msg.(*lnwire.Init); ok {
6✔
832
                if err := p.handleInitMsg(msg); err != nil {
3✔
833
                        p.storeError(err)
×
834
                        return err
×
835
                }
×
836
        } else {
×
837
                return errors.New("very first message between nodes " +
×
838
                        "must be init message")
×
839
        }
×
840

841
        // Next, load all the active channels we have with this peer,
842
        // registering them with the switch and launching the necessary
843
        // goroutines required to operate them.
844
        p.log.Debugf("Loaded %v active channels from database",
3✔
845
                len(activeChans))
3✔
846

3✔
847
        // Conditionally subscribe to channel events before loading channels so
3✔
848
        // we won't miss events. This subscription is used to listen to active
3✔
849
        // channel event when reenabling channels. Once the reenabling process
3✔
850
        // is finished, this subscription will be canceled.
3✔
851
        //
3✔
852
        // NOTE: ChannelNotifier must be started before subscribing events
3✔
853
        // otherwise we'd panic here.
3✔
854
        if err := p.attachChannelEventSubscription(); err != nil {
3✔
855
                return err
×
856
        }
×
857

858
        // Register the message router now as we may need to register some
859
        // endpoints while loading the channels below.
860
        p.msgRouter.WhenSome(func(router msgmux.Router) {
6✔
861
                router.Start(context.Background())
3✔
862
        })
3✔
863

864
        msgs, err := p.loadActiveChannels(activeChans)
3✔
865
        if err != nil {
3✔
866
                return fmt.Errorf("unable to load channels: %w", err)
×
867
        }
×
868

869
        p.startTime = time.Now()
3✔
870

3✔
871
        // Before launching the writeHandler goroutine, we send any channel
3✔
872
        // sync messages that must be resent for borked channels. We do this to
3✔
873
        // avoid data races with WriteMessage & Flush calls.
3✔
874
        if len(msgs) > 0 {
5✔
875
                p.log.Infof("Sending %d channel sync messages to peer after "+
2✔
876
                        "loading active channels", len(msgs))
2✔
877

2✔
878
                // Send the messages directly via writeMessage and bypass the
2✔
879
                // writeHandler goroutine.
2✔
880
                for _, msg := range msgs {
4✔
881
                        if err := p.writeMessage(msg); err != nil {
2✔
882
                                return fmt.Errorf("unable to send "+
×
883
                                        "reestablish msg: %v", err)
×
884
                        }
×
885
                }
886
        }
887

888
        err = p.pingManager.Start()
3✔
889
        if err != nil {
3✔
890
                return fmt.Errorf("could not start ping manager %w", err)
×
891
        }
×
892

893
        p.cg.WgAdd(4)
3✔
894
        go p.queueHandler()
3✔
895
        go p.writeHandler()
3✔
896
        go p.channelManager()
3✔
897
        go p.readHandler()
3✔
898

3✔
899
        // Signal to any external processes that the peer is now active.
3✔
900
        close(p.activeSignal)
3✔
901

3✔
902
        // Node announcements don't propagate very well throughout the network
3✔
903
        // as there isn't a way to efficiently query for them through their
3✔
904
        // timestamp, mostly affecting nodes that were offline during the time
3✔
905
        // of broadcast. We'll resend our node announcement to the remote peer
3✔
906
        // as a best-effort delivery such that it can also propagate to their
3✔
907
        // peers. To ensure they can successfully process it in most cases,
3✔
908
        // we'll only resend it as long as we have at least one confirmed
3✔
909
        // advertised channel with the remote peer.
3✔
910
        //
3✔
911
        // TODO(wilmer): Remove this once we're able to query for node
3✔
912
        // announcements through their timestamps.
3✔
913
        p.cg.WgAdd(2)
3✔
914
        go p.maybeSendNodeAnn(activeChans)
3✔
915
        go p.maybeSendChannelUpdates()
3✔
916

3✔
917
        return nil
3✔
918
}
919

920
// initGossipSync initializes either a gossip syncer or an initial routing
921
// dump, depending on the negotiated synchronization method.
922
func (p *Brontide) initGossipSync() {
3✔
923
        // If the remote peer knows of the new gossip queries feature, then
3✔
924
        // we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
3✔
925
        if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) {
6✔
926
                p.log.Info("Negotiated chan series queries")
3✔
927

3✔
928
                if p.cfg.AuthGossiper == nil {
6✔
929
                        // This should only ever be hit in the unit tests.
3✔
930
                        p.log.Warn("No AuthGossiper configured. Abandoning " +
3✔
931
                                "gossip sync.")
3✔
932
                        return
3✔
933
                }
3✔
934

935
                // Register the peer's gossip syncer with the gossiper.
936
                // This blocks synchronously to ensure the gossip syncer is
937
                // registered with the gossiper before attempting to read
938
                // messages from the remote peer.
939
                //
940
                // TODO(wilmer): Only sync updates from non-channel peers. This
941
                // requires an improved version of the current network
942
                // bootstrapper to ensure we can find and connect to non-channel
943
                // peers.
944
                p.cfg.AuthGossiper.InitSyncState(p)
×
945
        }
946
}
947

948
// taprootShutdownAllowed returns true if both parties have negotiated the
949
// shutdown-any-segwit feature.
950
func (p *Brontide) taprootShutdownAllowed() bool {
6✔
951
        return p.RemoteFeatures().HasFeature(lnwire.ShutdownAnySegwitOptional) &&
6✔
952
                p.LocalFeatures().HasFeature(lnwire.ShutdownAnySegwitOptional)
6✔
953
}
6✔
954

955
// rbfCoopCloseAllowed returns true if both parties have negotiated the new RBF
956
// coop close feature.
957
func (p *Brontide) rbfCoopCloseAllowed() bool {
7✔
958
        return p.RemoteFeatures().HasFeature(
7✔
959
                lnwire.RbfCoopCloseOptionalStaging,
7✔
960
        ) && p.LocalFeatures().HasFeature(
7✔
961
                lnwire.RbfCoopCloseOptionalStaging,
7✔
962
        )
7✔
963
}
7✔
964

965
// QuitSignal is a method that should return a channel which will be sent upon
966
// or closed once the backing peer exits. This allows callers using the
967
// interface to cancel any processing in the event the backing implementation
968
// exits.
969
//
970
// NOTE: Part of the lnpeer.Peer interface.
971
func (p *Brontide) QuitSignal() <-chan struct{} {
×
972
        return p.cg.Done()
×
973
}
×
974

975
// addrWithInternalKey takes a delivery script, then attempts to supplement it
976
// with information related to the internal key for the addr, but only if it's
977
// a taproot addr.
978
func (p *Brontide) addrWithInternalKey(
979
        deliveryScript []byte) (*chancloser.DeliveryAddrWithKey, error) {
9✔
980

9✔
981
        // Currently, custom channels cannot be created with external upfront
9✔
982
        // shutdown addresses, so this shouldn't be an issue. We only require
9✔
983
        // the internal key for taproot addresses to be able to provide a non
9✔
984
        // inclusion proof of any scripts.
9✔
985
        internalKeyDesc, err := lnwallet.InternalKeyForAddr(
9✔
986
                p.cfg.Wallet, &p.cfg.Wallet.Cfg.NetParams, deliveryScript,
9✔
987
        )
9✔
988
        if err != nil {
9✔
989
                return nil, fmt.Errorf("unable to fetch internal key: %w", err)
×
990
        }
×
991

992
        return &chancloser.DeliveryAddrWithKey{
9✔
993
                DeliveryAddress: deliveryScript,
9✔
994
                InternalKey: fn.MapOption(
9✔
995
                        func(desc keychain.KeyDescriptor) btcec.PublicKey {
9✔
996
                                return *desc.PubKey
×
997
                        },
×
998
                )(internalKeyDesc),
999
        }, nil
1000
}
1001

1002
// loadActiveChannels creates indexes within the peer for tracking all active
1003
// channels returned by the database. It returns a slice of channel reestablish
1004
// messages that should be sent to the peer immediately, in case we have borked
1005
// channels that haven't been closed yet.
1006
func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
1007
        []lnwire.Message, error) {
3✔
1008

3✔
1009
        // Return a slice of messages to send to the peers in case the channel
3✔
1010
        // cannot be loaded normally.
3✔
1011
        var msgs []lnwire.Message
3✔
1012

3✔
1013
        scidAliasNegotiated := p.hasNegotiatedScidAlias()
3✔
1014

3✔
1015
        for _, dbChan := range chans {
5✔
1016
                hasScidFeature := dbChan.ChanType.HasScidAliasFeature()
2✔
1017
                if scidAliasNegotiated && !hasScidFeature {
2✔
1018
                        // We'll request and store an alias, making sure that a
×
1019
                        // gossiper mapping is not created for the alias to the
×
1020
                        // real SCID. This is done because the peer and funding
×
1021
                        // manager are not aware of each other's states and if
×
1022
                        // we did not do this, we would accept alias channel
×
1023
                        // updates after 6 confirmations, which would be buggy.
×
1024
                        // We'll queue a channel_ready message with the new
×
1025
                        // alias. This should technically be done *after* the
×
1026
                        // reestablish, but this behavior is pre-existing since
×
1027
                        // the funding manager may already queue a
×
1028
                        // channel_ready before the channel_reestablish.
×
1029
                        if !dbChan.IsPending {
×
1030
                                aliasScid, err := p.cfg.RequestAlias()
×
1031
                                if err != nil {
×
1032
                                        return nil, err
×
1033
                                }
×
1034

1035
                                err = p.cfg.AddLocalAlias(
×
1036
                                        aliasScid, dbChan.ShortChanID(), false,
×
1037
                                        false,
×
1038
                                )
×
1039
                                if err != nil {
×
1040
                                        return nil, err
×
1041
                                }
×
1042

1043
                                chanID := lnwire.NewChanIDFromOutPoint(
×
1044
                                        dbChan.FundingOutpoint,
×
1045
                                )
×
1046

×
1047
                                // Fetch the second commitment point to send in
×
1048
                                // the channel_ready message.
×
1049
                                second, err := dbChan.SecondCommitmentPoint()
×
1050
                                if err != nil {
×
1051
                                        return nil, err
×
1052
                                }
×
1053

1054
                                channelReadyMsg := lnwire.NewChannelReady(
×
1055
                                        chanID, second,
×
1056
                                )
×
1057
                                channelReadyMsg.AliasScid = &aliasScid
×
1058

×
1059
                                msgs = append(msgs, channelReadyMsg)
×
1060
                        }
1061

1062
                        // If we've negotiated the option-scid-alias feature
1063
                        // and this channel does not have ScidAliasFeature set
1064
                        // to true due to an upgrade where the feature bit was
1065
                        // turned on, we'll update the channel's database
1066
                        // state.
1067
                        err := dbChan.MarkScidAliasNegotiated()
×
1068
                        if err != nil {
×
1069
                                return nil, err
×
1070
                        }
×
1071
                }
1072

1073
                var chanOpts []lnwallet.ChannelOpt
2✔
1074
                p.cfg.AuxLeafStore.WhenSome(func(s lnwallet.AuxLeafStore) {
2✔
1075
                        chanOpts = append(chanOpts, lnwallet.WithLeafStore(s))
×
1076
                })
×
1077
                p.cfg.AuxSigner.WhenSome(func(s lnwallet.AuxSigner) {
2✔
1078
                        chanOpts = append(chanOpts, lnwallet.WithAuxSigner(s))
×
1079
                })
×
1080
                p.cfg.AuxResolver.WhenSome(
2✔
1081
                        func(s lnwallet.AuxContractResolver) {
2✔
1082
                                chanOpts = append(
×
1083
                                        chanOpts, lnwallet.WithAuxResolver(s),
×
1084
                                )
×
1085
                        },
×
1086
                )
1087

1088
                lnChan, err := lnwallet.NewLightningChannel(
2✔
1089
                        p.cfg.Signer, dbChan, p.cfg.SigPool, chanOpts...,
2✔
1090
                )
2✔
1091
                if err != nil {
2✔
1092
                        return nil, fmt.Errorf("unable to create channel "+
×
1093
                                "state machine: %w", err)
×
1094
                }
×
1095

1096
                chanPoint := dbChan.FundingOutpoint
2✔
1097

2✔
1098
                chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
2✔
1099

2✔
1100
                p.log.Infof("Loading ChannelPoint(%v), isPending=%v",
2✔
1101
                        chanPoint, lnChan.IsPending())
2✔
1102

2✔
1103
                // Skip adding any permanently irreconcilable channels to the
2✔
1104
                // htlcswitch.
2✔
1105
                if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
2✔
1106
                        !dbChan.HasChanStatus(channeldb.ChanStatusRestored) {
4✔
1107

2✔
1108
                        p.log.Warnf("ChannelPoint(%v) has status %v, won't "+
2✔
1109
                                "start.", chanPoint, dbChan.ChanStatus())
2✔
1110

2✔
1111
                        // To help our peer recover from a potential data loss,
2✔
1112
                        // we resend our channel reestablish message if the
2✔
1113
                        // channel is in a borked state. We won't process any
2✔
1114
                        // channel reestablish message sent from the peer, but
2✔
1115
                        // that's okay since the assumption is that we did when
2✔
1116
                        // marking the channel borked.
2✔
1117
                        chanSync, err := dbChan.ChanSyncMsg()
2✔
1118
                        if err != nil {
2✔
1119
                                p.log.Errorf("Unable to create channel "+
×
1120
                                        "reestablish message for channel %v: "+
×
1121
                                        "%v", chanPoint, err)
×
1122
                                continue
×
1123
                        }
1124

1125
                        msgs = append(msgs, chanSync)
2✔
1126

2✔
1127
                        // Check if this channel needs to have the cooperative
2✔
1128
                        // close process restarted. If so, we'll need to send
2✔
1129
                        // the Shutdown message that is returned.
2✔
1130
                        if dbChan.HasChanStatus(
2✔
1131
                                channeldb.ChanStatusCoopBroadcasted,
2✔
1132
                        ) {
2✔
1133

×
1134
                                shutdownMsg, err := p.restartCoopClose(lnChan)
×
1135
                                if err != nil {
×
1136
                                        p.log.Errorf("Unable to restart "+
×
1137
                                                "coop close for channel: %v",
×
1138
                                                err)
×
1139
                                        continue
×
1140
                                }
1141

1142
                                if shutdownMsg == nil {
×
1143
                                        continue
×
1144
                                }
1145

1146
                                // Append the message to the set of messages to
1147
                                // send.
1148
                                msgs = append(msgs, shutdownMsg)
×
1149
                        }
1150

1151
                        continue
2✔
1152
                }
1153

1154
                // Before we register this new link with the HTLC Switch, we'll
1155
                // need to fetch its current link-layer forwarding policy from
1156
                // the database.
1157
                graph := p.cfg.ChannelGraph
×
1158
                info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(
×
1159
                        &chanPoint,
×
1160
                )
×
1161
                if err != nil && !errors.Is(err, graphdb.ErrEdgeNotFound) {
×
1162
                        return nil, err
×
1163
                }
×
1164

1165
                // We'll filter out our policy from the directional channel
1166
                // edges based whom the edge connects to. If it doesn't connect
1167
                // to us, then we know that we were the one that advertised the
1168
                // policy.
1169
                //
1170
                // TODO(roasbeef): can add helper method to get policy for
1171
                // particular channel.
1172
                var selfPolicy *models.ChannelEdgePolicy
×
1173
                if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
×
1174
                        p.cfg.ServerPubKey[:]) {
×
1175

×
1176
                        selfPolicy = p1
×
1177
                } else {
×
1178
                        selfPolicy = p2
×
1179
                }
×
1180

1181
                // If we don't yet have an advertised routing policy, then
1182
                // we'll use the current default, otherwise we'll translate the
1183
                // routing policy into a forwarding policy.
1184
                var forwardingPolicy *models.ForwardingPolicy
×
1185
                if selfPolicy != nil {
×
1186
                        var inboundWireFee lnwire.Fee
×
1187
                        _, err := selfPolicy.ExtraOpaqueData.ExtractRecords(
×
1188
                                &inboundWireFee,
×
1189
                        )
×
1190
                        if err != nil {
×
1191
                                return nil, err
×
1192
                        }
×
1193

1194
                        inboundFee := models.NewInboundFeeFromWire(
×
1195
                                inboundWireFee,
×
1196
                        )
×
1197

×
1198
                        forwardingPolicy = &models.ForwardingPolicy{
×
1199
                                MinHTLCOut:    selfPolicy.MinHTLC,
×
1200
                                MaxHTLC:       selfPolicy.MaxHTLC,
×
1201
                                BaseFee:       selfPolicy.FeeBaseMSat,
×
1202
                                FeeRate:       selfPolicy.FeeProportionalMillionths,
×
1203
                                TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
×
1204

×
1205
                                InboundFee: inboundFee,
×
1206
                        }
×
1207
                } else {
×
1208
                        p.log.Warnf("Unable to find our forwarding policy "+
×
1209
                                "for channel %v, using default values",
×
1210
                                chanPoint)
×
1211
                        forwardingPolicy = &p.cfg.RoutingPolicy
×
1212
                }
×
1213

1214
                p.log.Tracef("Using link policy of: %v",
×
1215
                        spew.Sdump(forwardingPolicy))
×
1216

×
1217
                // If the channel is pending, set the value to nil in the
×
1218
                // activeChannels map. This is done to signify that the channel
×
1219
                // is pending. We don't add the link to the switch here - it's
×
1220
                // the funding manager's responsibility to spin up pending
×
1221
                // channels. Adding them here would just be extra work as we'll
×
1222
                // tear them down when creating + adding the final link.
×
1223
                if lnChan.IsPending() {
×
1224
                        p.activeChannels.Store(chanID, nil)
×
1225

×
1226
                        continue
×
1227
                }
1228

1229
                shutdownInfo, err := lnChan.State().ShutdownInfo()
×
1230
                if err != nil && !errors.Is(err, channeldb.ErrNoShutdownInfo) {
×
1231
                        return nil, err
×
1232
                }
×
1233

1234
                isTaprootChan := lnChan.ChanType().IsTaproot()
×
1235

×
1236
                var (
×
1237
                        shutdownMsg     fn.Option[lnwire.Shutdown]
×
1238
                        shutdownInfoErr error
×
1239
                )
×
1240
                shutdownInfo.WhenSome(func(info channeldb.ShutdownInfo) {
×
1241
                        // If we can use the new RBF close feature, we don't
×
1242
                        // need to create the legacy closer. However for taproot
×
1243
                        // channels, we'll continue to use the legacy closer.
×
1244
                        if p.rbfCoopCloseAllowed() && !isTaprootChan {
×
1245
                                return
×
1246
                        }
×
1247

1248
                        // Compute an ideal fee.
1249
                        feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
×
1250
                                p.cfg.CoopCloseTargetConfs,
×
1251
                        )
×
1252
                        if err != nil {
×
1253
                                shutdownInfoErr = fmt.Errorf("unable to "+
×
1254
                                        "estimate fee: %w", err)
×
1255

×
1256
                                return
×
1257
                        }
×
1258

1259
                        addr, err := p.addrWithInternalKey(
×
1260
                                info.DeliveryScript.Val,
×
1261
                        )
×
1262
                        if err != nil {
×
1263
                                shutdownInfoErr = fmt.Errorf("unable to make "+
×
1264
                                        "delivery addr: %w", err)
×
1265
                                return
×
1266
                        }
×
1267
                        negotiateChanCloser, err := p.createChanCloser(
×
1268
                                lnChan, addr, feePerKw, nil,
×
1269
                                info.Closer(),
×
1270
                        )
×
1271
                        if err != nil {
×
1272
                                shutdownInfoErr = fmt.Errorf("unable to "+
×
1273
                                        "create chan closer: %w", err)
×
1274

×
1275
                                return
×
1276
                        }
×
1277

1278
                        chanID := lnwire.NewChanIDFromOutPoint(
×
1279
                                lnChan.State().FundingOutpoint,
×
1280
                        )
×
1281

×
1282
                        p.activeChanCloses.Store(chanID, makeNegotiateCloser(
×
1283
                                negotiateChanCloser,
×
1284
                        ))
×
1285

×
1286
                        // Create the Shutdown message.
×
1287
                        shutdown, err := negotiateChanCloser.ShutdownChan()
×
1288
                        if err != nil {
×
1289
                                p.activeChanCloses.Delete(chanID)
×
1290
                                shutdownInfoErr = err
×
1291

×
1292
                                return
×
1293
                        }
×
1294

1295
                        shutdownMsg = fn.Some(*shutdown)
×
1296
                })
1297
                if shutdownInfoErr != nil {
×
1298
                        return nil, shutdownInfoErr
×
1299
                }
×
1300

1301
                // Subscribe to the set of on-chain events for this channel.
1302
                chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
×
1303
                        chanPoint,
×
1304
                )
×
1305
                if err != nil {
×
1306
                        return nil, err
×
1307
                }
×
1308

1309
                err = p.addLink(
×
1310
                        &chanPoint, lnChan, forwardingPolicy, chainEvents,
×
1311
                        true, shutdownMsg,
×
1312
                )
×
1313
                if err != nil {
×
1314
                        return nil, fmt.Errorf("unable to add link %v to "+
×
1315
                                "switch: %v", chanPoint, err)
×
1316
                }
×
1317

1318
                p.activeChannels.Store(chanID, lnChan)
×
1319

×
1320
                // We're using the old co-op close, so we don't need to init
×
1321
                // the new RBF chan closer. If we have a taproot chan, then
×
1322
                // we'll also use the legacy type, so we don't need to make the
×
1323
                // new closer.
×
1324
                if !p.rbfCoopCloseAllowed() || isTaprootChan {
×
1325
                        continue
×
1326
                }
1327

1328
                // Now that the link has been added above, we'll also init an
1329
                // RBF chan closer for this channel, but only if the new close
1330
                // feature is negotiated.
1331
                //
1332
                // Creating this here ensures that any shutdown messages sent
1333
                // will be automatically routed by the msg router.
1334
                if _, err := p.initRbfChanCloser(lnChan); err != nil {
×
1335
                        p.activeChanCloses.Delete(chanID)
×
1336

×
1337
                        return nil, fmt.Errorf("unable to init RBF chan "+
×
1338
                                "closer during peer connect: %w", err)
×
1339
                }
×
1340

1341
                // If the shutdown info isn't blank, then we should kick things
1342
                // off by sending a shutdown message to the remote party to
1343
                // continue the old shutdown flow.
1344
                restartShutdown := func(s channeldb.ShutdownInfo) error {
×
1345
                        return p.startRbfChanCloser(
×
1346
                                newRestartShutdownInit(s),
×
1347
                                lnChan.ChannelPoint(),
×
1348
                        )
×
1349
                }
×
1350
                err = fn.MapOptionZ(shutdownInfo, restartShutdown)
×
1351
                if err != nil {
×
1352
                        return nil, fmt.Errorf("unable to start RBF "+
×
1353
                                "chan closer: %w", err)
×
1354
                }
×
1355
        }
1356

1357
        return msgs, nil
3✔
1358
}
1359

1360
// addLink creates and adds a new ChannelLink from the specified channel.
1361
func (p *Brontide) addLink(chanPoint *wire.OutPoint,
1362
        lnChan *lnwallet.LightningChannel,
1363
        forwardingPolicy *models.ForwardingPolicy,
1364
        chainEvents *contractcourt.ChainEventSubscription,
1365
        syncStates bool, shutdownMsg fn.Option[lnwire.Shutdown]) error {
×
1366

×
1367
        // onChannelFailure will be called by the link in case the channel
×
1368
        // fails for some reason.
×
1369
        onChannelFailure := func(chanID lnwire.ChannelID,
×
1370
                shortChanID lnwire.ShortChannelID,
×
1371
                linkErr htlcswitch.LinkFailureError) {
×
1372

×
1373
                failure := linkFailureReport{
×
1374
                        chanPoint:   *chanPoint,
×
1375
                        chanID:      chanID,
×
1376
                        shortChanID: shortChanID,
×
1377
                        linkErr:     linkErr,
×
1378
                }
×
1379

×
1380
                select {
×
1381
                case p.linkFailures <- failure:
×
1382
                case <-p.cg.Done():
×
1383
                case <-p.cfg.Quit:
×
1384
                }
1385
        }
1386

1387
        updateContractSignals := func(signals *contractcourt.ContractSignals) error {
×
1388
                return p.cfg.ChainArb.UpdateContractSignals(*chanPoint, signals)
×
1389
        }
×
1390

1391
        notifyContractUpdate := func(update *contractcourt.ContractUpdate) error {
×
1392
                return p.cfg.ChainArb.NotifyContractUpdate(*chanPoint, update)
×
1393
        }
×
1394

1395
        //nolint:ll
1396
        linkCfg := htlcswitch.ChannelLinkConfig{
×
1397
                Peer:                   p,
×
1398
                DecodeHopIterators:     p.cfg.Sphinx.DecodeHopIterators,
×
1399
                ExtractErrorEncrypter:  p.cfg.Sphinx.ExtractErrorEncrypter,
×
1400
                FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate,
×
1401
                HodlMask:               p.cfg.Hodl.Mask(),
×
1402
                Registry:               p.cfg.Invoices,
×
1403
                BestHeight:             p.cfg.Switch.BestHeight,
×
1404
                Circuits:               p.cfg.Switch.CircuitModifier(),
×
1405
                ForwardPackets:         p.cfg.InterceptSwitch.ForwardPackets,
×
1406
                FwrdingPolicy:          *forwardingPolicy,
×
1407
                FeeEstimator:           p.cfg.FeeEstimator,
×
1408
                PreimageCache:          p.cfg.WitnessBeacon,
×
1409
                ChainEvents:            chainEvents,
×
1410
                UpdateContractSignals:  updateContractSignals,
×
1411
                NotifyContractUpdate:   notifyContractUpdate,
×
1412
                OnChannelFailure:       onChannelFailure,
×
1413
                SyncStates:             syncStates,
×
1414
                BatchTicker:            ticker.New(p.cfg.ChannelCommitInterval),
×
1415
                FwdPkgGCTicker:         ticker.New(time.Hour),
×
1416
                PendingCommitTicker: ticker.New(
×
1417
                        p.cfg.PendingCommitInterval,
×
1418
                ),
×
1419
                BatchSize:               p.cfg.ChannelCommitBatchSize,
×
1420
                UnsafeReplay:            p.cfg.UnsafeReplay,
×
1421
                MinUpdateTimeout:        htlcswitch.DefaultMinLinkFeeUpdateTimeout,
×
1422
                MaxUpdateTimeout:        htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
×
1423
                OutgoingCltvRejectDelta: p.cfg.OutgoingCltvRejectDelta,
×
1424
                TowerClient:             p.cfg.TowerClient,
×
1425
                MaxOutgoingCltvExpiry:   p.cfg.MaxOutgoingCltvExpiry,
×
1426
                MaxFeeAllocation:        p.cfg.MaxChannelFeeAllocation,
×
1427
                MaxAnchorsCommitFeeRate: p.cfg.MaxAnchorsCommitFeeRate,
×
1428
                NotifyActiveLink:        p.cfg.ChannelNotifier.NotifyActiveLinkEvent,
×
1429
                NotifyActiveChannel:     p.cfg.ChannelNotifier.NotifyActiveChannelEvent,
×
1430
                NotifyInactiveChannel:   p.cfg.ChannelNotifier.NotifyInactiveChannelEvent,
×
1431
                NotifyInactiveLinkEvent: p.cfg.ChannelNotifier.NotifyInactiveLinkEvent,
×
1432
                HtlcNotifier:            p.cfg.HtlcNotifier,
×
1433
                GetAliases:              p.cfg.GetAliases,
×
1434
                PreviouslySentShutdown:  shutdownMsg,
×
1435
                DisallowRouteBlinding:   p.cfg.DisallowRouteBlinding,
×
1436
                MaxFeeExposure:          p.cfg.MaxFeeExposure,
×
1437
                ShouldFwdExpEndorsement: p.cfg.ShouldFwdExpEndorsement,
×
1438
                DisallowQuiescence: p.cfg.DisallowQuiescence ||
×
1439
                        !p.remoteFeatures.HasFeature(lnwire.QuiescenceOptional),
×
1440
                AuxTrafficShaper: p.cfg.AuxTrafficShaper,
×
1441
        }
×
1442

×
1443
        // Before adding our new link, purge the switch of any pending or live
×
1444
        // links going by the same channel id. If one is found, we'll shut it
×
1445
        // down to ensure that the mailboxes are only ever under the control of
×
1446
        // one link.
×
1447
        chanID := lnwire.NewChanIDFromOutPoint(*chanPoint)
×
1448
        p.cfg.Switch.RemoveLink(chanID)
×
1449

×
1450
        // With the channel link created, we'll now notify the htlc switch so
×
1451
        // this channel can be used to dispatch local payments and also
×
1452
        // passively forward payments.
×
1453
        return p.cfg.Switch.CreateAndAddLink(linkCfg, lnChan)
×
1454
}
1455

1456
// maybeSendNodeAnn sends our node announcement to the remote peer if at least
1457
// one confirmed public channel exists with them.
1458
func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
3✔
1459
        defer p.cg.WgDone()
3✔
1460

3✔
1461
        hasConfirmedPublicChan := false
3✔
1462
        for _, channel := range channels {
5✔
1463
                if channel.IsPending {
2✔
1464
                        continue
×
1465
                }
1466
                if channel.ChannelFlags&lnwire.FFAnnounceChannel == 0 {
4✔
1467
                        continue
2✔
1468
                }
1469

1470
                hasConfirmedPublicChan = true
×
1471
                break
×
1472
        }
1473
        if !hasConfirmedPublicChan {
6✔
1474
                return
3✔
1475
        }
3✔
1476

1477
        ourNodeAnn, err := p.cfg.GenNodeAnnouncement()
×
1478
        if err != nil {
×
1479
                p.log.Debugf("Unable to retrieve node announcement: %v", err)
×
1480
                return
×
1481
        }
×
1482

1483
        if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
×
1484
                p.log.Debugf("Unable to resend node announcement: %v", err)
×
1485
        }
×
1486
}
1487

1488
// maybeSendChannelUpdates sends our channel updates to the remote peer if we
1489
// have any active channels with them.
1490
func (p *Brontide) maybeSendChannelUpdates() {
3✔
1491
        defer p.cg.WgDone()
3✔
1492

3✔
1493
        // If we don't have any active channels, then we can exit early.
3✔
1494
        if p.activeChannels.Len() == 0 {
4✔
1495
                return
1✔
1496
        }
1✔
1497

1498
        maybeSendUpd := func(cid lnwire.ChannelID,
2✔
1499
                lnChan *lnwallet.LightningChannel) error {
4✔
1500

2✔
1501
                // Nil channels are pending, so we'll skip them.
2✔
1502
                if lnChan == nil {
2✔
1503
                        return nil
×
1504
                }
×
1505

1506
                dbChan := lnChan.State()
2✔
1507
                scid := func() lnwire.ShortChannelID {
4✔
1508
                        switch {
2✔
1509
                        // Otherwise if it's a zero conf channel and confirmed,
1510
                        // then we need to use the "real" scid.
1511
                        case dbChan.IsZeroConf() && dbChan.ZeroConfConfirmed():
×
1512
                                return dbChan.ZeroConfRealScid()
×
1513

1514
                        // Otherwise, we can use the normal scid.
1515
                        default:
2✔
1516
                                return dbChan.ShortChanID()
2✔
1517
                        }
1518
                }()
1519

1520
                // Now that we know the channel is in a good state, we'll try
1521
                // to fetch the update to send to the remote peer. If the
1522
                // channel is pending, and not a zero conf channel, we'll get
1523
                // an error here which we'll ignore.
1524
                chanUpd, err := p.cfg.FetchLastChanUpdate(scid)
2✔
1525
                if err != nil {
2✔
1526
                        p.log.Debugf("Unable to fetch channel update for "+
×
1527
                                "ChannelPoint(%v), scid=%v: %v",
×
1528
                                dbChan.FundingOutpoint, dbChan.ShortChanID, err)
×
1529

×
1530
                        return nil
×
1531
                }
×
1532

1533
                p.log.Debugf("Sending channel update for ChannelPoint(%v), "+
2✔
1534
                        "scid=%v", dbChan.FundingOutpoint, dbChan.ShortChanID)
2✔
1535

2✔
1536
                // We'll send it as a normal message instead of using the lazy
2✔
1537
                // queue to prioritize transmission of the fresh update.
2✔
1538
                if err := p.SendMessage(false, chanUpd); err != nil {
2✔
1539
                        err := fmt.Errorf("unable to send channel update for "+
×
1540
                                "ChannelPoint(%v), scid=%v: %w",
×
1541
                                dbChan.FundingOutpoint, dbChan.ShortChanID(),
×
1542
                                err)
×
1543
                        p.log.Errorf(err.Error())
×
1544

×
1545
                        return err
×
1546
                }
×
1547

1548
                return nil
2✔
1549
        }
1550

1551
        p.activeChannels.ForEach(maybeSendUpd)
2✔
1552
}
1553

1554
// WaitForDisconnect waits until the peer has disconnected. A peer may be
1555
// disconnected if the local or remote side terminates the connection, or an
1556
// irrecoverable protocol error has been encountered. This method will only
1557
// begin watching the peer's waitgroup after the ready channel or the peer's
1558
// quit channel are signaled. The ready channel should only be signaled if a
1559
// call to Start returns no error. Otherwise, if the peer fails to start,
1560
// calling Disconnect will signal the quit channel and the method will not
1561
// block, since no goroutines were spawned.
1562
func (p *Brontide) WaitForDisconnect(ready chan struct{}) {
×
1563
        // Before we try to call the `Wait` goroutine, we'll make sure the main
×
1564
        // set of goroutines are already active.
×
1565
        select {
×
1566
        case <-p.startReady:
×
1567
        case <-p.cg.Done():
×
1568
                return
×
1569
        }
1570

1571
        select {
×
1572
        case <-ready:
×
1573
        case <-p.cg.Done():
×
1574
        }
1575

1576
        p.cg.WgWait()
×
1577
}
1578

1579
// Disconnect terminates the connection with the remote peer. Additionally, a
1580
// signal is sent to the server and htlcSwitch indicating the resources
1581
// allocated to the peer can now be cleaned up.
1582
func (p *Brontide) Disconnect(reason error) {
×
1583
        if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
×
1584
                return
×
1585
        }
×
1586

1587
        // Make sure initialization has completed before we try to tear things
1588
        // down.
1589
        //
1590
        // NOTE: We only read the `startReady` chan if the peer has been
1591
        // started, otherwise we will skip reading it as this chan won't be
1592
        // closed, hence blocks forever.
1593
        if atomic.LoadInt32(&p.started) == 1 {
×
1594
                p.log.Debugf("Started, waiting on startReady signal")
×
1595

×
1596
                select {
×
1597
                case <-p.startReady:
×
1598
                case <-p.cg.Done():
×
1599
                        return
×
1600
                }
1601
        }
1602

1603
        err := fmt.Errorf("disconnecting %s, reason: %v", p, reason)
×
1604
        p.storeError(err)
×
1605

×
1606
        p.log.Infof(err.Error())
×
1607

×
1608
        // Stop PingManager before closing TCP connection.
×
1609
        p.pingManager.Stop()
×
1610

×
1611
        // Ensure that the TCP connection is properly closed before continuing.
×
1612
        p.cfg.Conn.Close()
×
1613

×
1614
        p.cg.Quit()
×
1615

×
1616
        // If our msg router isn't global (local to this instance), then we'll
×
1617
        // stop it. Otherwise, we'll leave it running.
×
1618
        if !p.globalMsgRouter {
×
1619
                p.msgRouter.WhenSome(func(router msgmux.Router) {
×
1620
                        router.Stop()
×
1621
                })
×
1622
        }
1623
}
1624

1625
// String returns the string representation of this peer.
1626
func (p *Brontide) String() string {
×
1627
        return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr())
×
1628
}
×
1629

1630
// readNextMessage reads, and returns the next message on the wire along with
1631
// any additional raw payload.
1632
func (p *Brontide) readNextMessage() (lnwire.Message, error) {
7✔
1633
        noiseConn := p.cfg.Conn
7✔
1634
        err := noiseConn.SetReadDeadline(time.Time{})
7✔
1635
        if err != nil {
7✔
1636
                return nil, err
×
1637
        }
×
1638

1639
        pktLen, err := noiseConn.ReadNextHeader()
7✔
1640
        if err != nil {
7✔
1641
                return nil, fmt.Errorf("read next header: %w", err)
×
1642
        }
×
1643

1644
        // First we'll read the next _full_ message. We do this rather than
1645
        // reading incrementally from the stream as the Lightning wire protocol
1646
        // is message oriented and allows nodes to pad on additional data to
1647
        // the message stream.
1648
        var (
4✔
1649
                nextMsg lnwire.Message
4✔
1650
                msgLen  uint64
4✔
1651
        )
4✔
1652
        err = p.cfg.ReadPool.Submit(func(buf *buffer.Read) error {
8✔
1653
                // Before reading the body of the message, set the read timeout
4✔
1654
                // accordingly to ensure we don't block other readers using the
4✔
1655
                // pool. We do so only after the task has been scheduled to
4✔
1656
                // ensure the deadline doesn't expire while the message is in
4✔
1657
                // the process of being scheduled.
4✔
1658
                readDeadline := time.Now().Add(
4✔
1659
                        p.scaleTimeout(readMessageTimeout),
4✔
1660
                )
4✔
1661
                readErr := noiseConn.SetReadDeadline(readDeadline)
4✔
1662
                if readErr != nil {
4✔
1663
                        return readErr
×
1664
                }
×
1665

1666
                // The ReadNextBody method will actually end up re-using the
1667
                // buffer, so within this closure, we can continue to use
1668
                // rawMsg as it's just a slice into the buf from the buffer
1669
                // pool.
1670
                rawMsg, readErr := noiseConn.ReadNextBody(buf[:pktLen])
4✔
1671
                if readErr != nil {
4✔
1672
                        return fmt.Errorf("read next body: %w", readErr)
×
1673
                }
×
1674
                msgLen = uint64(len(rawMsg))
4✔
1675

4✔
1676
                // Next, create a new io.Reader implementation from the raw
4✔
1677
                // message, and use this to decode the message directly from.
4✔
1678
                msgReader := bytes.NewReader(rawMsg)
4✔
1679
                nextMsg, err = lnwire.ReadMessage(msgReader, 0)
4✔
1680
                if err != nil {
4✔
1681
                        return err
×
1682
                }
×
1683

1684
                // At this point, rawMsg and buf will be returned back to the
1685
                // buffer pool for re-use.
1686
                return nil
4✔
1687
        })
1688
        atomic.AddUint64(&p.bytesReceived, msgLen)
4✔
1689
        if err != nil {
4✔
1690
                return nil, err
×
1691
        }
×
1692

1693
        p.logWireMessage(nextMsg, true)
4✔
1694

4✔
1695
        return nextMsg, nil
4✔
1696
}
1697

1698
// msgStream implements a goroutine-safe, in-order stream of messages to be
1699
// delivered via closure to a receiver. These messages MUST be in order due to
1700
// the nature of the lightning channel commitment and gossiper state machines.
1701
// TODO(conner): use stream handler interface to abstract out stream
1702
// state/logging.
1703
type msgStream struct {
1704
        streamShutdown int32 // To be used atomically.
1705

1706
        peer *Brontide
1707

1708
        apply func(lnwire.Message)
1709

1710
        startMsg string
1711
        stopMsg  string
1712

1713
        msgCond *sync.Cond
1714
        msgs    []lnwire.Message
1715

1716
        mtx sync.Mutex
1717

1718
        producerSema chan struct{}
1719

1720
        wg   sync.WaitGroup
1721
        quit chan struct{}
1722
}
1723

1724
// newMsgStream creates a new instance of a chanMsgStream for a particular
1725
// channel identified by its channel ID. bufSize is the max number of messages
1726
// that should be buffered in the internal queue. Callers should set this to a
1727
// sane value that avoids blocking unnecessarily, but doesn't allow an
1728
// unbounded amount of memory to be allocated to buffer incoming messages.
1729
func newMsgStream(p *Brontide, startMsg, stopMsg string, bufSize uint32,
1730
        apply func(lnwire.Message)) *msgStream {
3✔
1731

3✔
1732
        stream := &msgStream{
3✔
1733
                peer:         p,
3✔
1734
                apply:        apply,
3✔
1735
                startMsg:     startMsg,
3✔
1736
                stopMsg:      stopMsg,
3✔
1737
                producerSema: make(chan struct{}, bufSize),
3✔
1738
                quit:         make(chan struct{}),
3✔
1739
        }
3✔
1740
        stream.msgCond = sync.NewCond(&stream.mtx)
3✔
1741

3✔
1742
        // Before we return the active stream, we'll populate the producer's
3✔
1743
        // semaphore channel. We'll use this to ensure that the producer won't
3✔
1744
        // attempt to allocate memory in the queue for an item until it has
3✔
1745
        // sufficient extra space.
3✔
1746
        for i := uint32(0); i < bufSize; i++ {
18✔
1747
                stream.producerSema <- struct{}{}
15✔
1748
        }
15✔
1749

1750
        return stream
3✔
1751
}
1752

1753
// Start starts the chanMsgStream.
1754
func (ms *msgStream) Start() {
3✔
1755
        ms.wg.Add(1)
3✔
1756
        go ms.msgConsumer()
3✔
1757
}
3✔
1758

1759
// Stop stops the chanMsgStream.
1760
func (ms *msgStream) Stop() {
×
1761
        // TODO(roasbeef): signal too?
×
1762

×
1763
        close(ms.quit)
×
1764

×
1765
        // Now that we've closed the channel, we'll repeatedly signal the msg
×
1766
        // consumer until we've detected that it has exited.
×
1767
        for atomic.LoadInt32(&ms.streamShutdown) == 0 {
×
1768
                ms.msgCond.Signal()
×
1769
                time.Sleep(time.Millisecond * 100)
×
1770
        }
×
1771

1772
        ms.wg.Wait()
×
1773
}
1774

1775
// msgConsumer is the main goroutine that streams messages from the peer's
1776
// readHandler directly to the target channel.
1777
func (ms *msgStream) msgConsumer() {
3✔
1778
        defer ms.wg.Done()
3✔
1779
        defer peerLog.Tracef(ms.stopMsg)
3✔
1780
        defer atomic.StoreInt32(&ms.streamShutdown, 1)
3✔
1781

3✔
1782
        peerLog.Tracef(ms.startMsg)
3✔
1783

3✔
1784
        for {
6✔
1785
                // First, we'll check our condition. If the queue of messages
3✔
1786
                // is empty, then we'll wait until a new item is added.
3✔
1787
                ms.msgCond.L.Lock()
3✔
1788
                for len(ms.msgs) == 0 {
6✔
1789
                        ms.msgCond.Wait()
3✔
1790

3✔
1791
                        // If we woke up in order to exit, then we'll do so.
3✔
1792
                        // Otherwise, we'll check the message queue for any new
3✔
1793
                        // items.
3✔
1794
                        select {
3✔
1795
                        case <-ms.peer.cg.Done():
×
1796
                                ms.msgCond.L.Unlock()
×
1797
                                return
×
1798
                        case <-ms.quit:
×
1799
                                ms.msgCond.L.Unlock()
×
1800
                                return
×
1801
                        default:
×
1802
                        }
1803
                }
1804

1805
                // Grab the message off the front of the queue, shifting the
1806
                // slice's reference down one in order to remove the message
1807
                // from the queue.
1808
                msg := ms.msgs[0]
×
1809
                ms.msgs[0] = nil // Set to nil to prevent GC leak.
×
1810
                ms.msgs = ms.msgs[1:]
×
1811

×
1812
                ms.msgCond.L.Unlock()
×
1813

×
1814
                ms.apply(msg)
×
1815

×
1816
                // We've just successfully processed an item, so we'll signal
×
1817
                // to the producer that a new slot in the buffer. We'll use
×
1818
                // this to bound the size of the buffer to avoid allowing it to
×
1819
                // grow indefinitely.
×
1820
                select {
×
1821
                case ms.producerSema <- struct{}{}:
×
1822
                case <-ms.peer.cg.Done():
×
1823
                        return
×
1824
                case <-ms.quit:
×
1825
                        return
×
1826
                }
1827
        }
1828
}
1829

1830
// AddMsg adds a new message to the msgStream. This function is safe for
1831
// concurrent access.
1832
func (ms *msgStream) AddMsg(msg lnwire.Message) {
×
1833
        // First, we'll attempt to receive from the producerSema struct. This
×
1834
        // acts as a semaphore to prevent us from indefinitely buffering
×
1835
        // incoming items from the wire. Either the msg queue isn't full, and
×
1836
        // we'll not block, or the queue is full, and we'll block until either
×
1837
        // we're signalled to quit, or a slot is freed up.
×
1838
        select {
×
1839
        case <-ms.producerSema:
×
1840
        case <-ms.peer.cg.Done():
×
1841
                return
×
1842
        case <-ms.quit:
×
1843
                return
×
1844
        }
1845

1846
        // Next, we'll lock the condition, and add the message to the end of
1847
        // the message queue.
1848
        ms.msgCond.L.Lock()
×
1849
        ms.msgs = append(ms.msgs, msg)
×
1850
        ms.msgCond.L.Unlock()
×
1851

×
1852
        // With the message added, we signal to the msgConsumer that there are
×
1853
        // additional messages to consume.
×
1854
        ms.msgCond.Signal()
×
1855
}
1856

1857
// waitUntilLinkActive waits until the target link is active and returns a
1858
// ChannelLink to pass messages to. It accomplishes this by subscribing to
1859
// an ActiveLinkEvent which is emitted by the link when it first starts up.
1860
func waitUntilLinkActive(p *Brontide,
1861
        cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
×
1862

×
1863
        p.log.Tracef("Waiting for link=%v to be active", cid)
×
1864

×
1865
        // Subscribe to receive channel events.
×
1866
        //
×
1867
        // NOTE: If the link is already active by SubscribeChannelEvents, then
×
1868
        // GetLink will retrieve the link and we can send messages. If the link
×
1869
        // becomes active between SubscribeChannelEvents and GetLink, then GetLink
×
1870
        // will retrieve the link. If the link becomes active after GetLink, then
×
1871
        // we will get an ActiveLinkEvent notification and retrieve the link. If
×
1872
        // the call to GetLink is before SubscribeChannelEvents, however, there
×
1873
        // will be a race condition.
×
1874
        sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
×
1875
        if err != nil {
×
1876
                // If we have a non-nil error, then the server is shutting down and we
×
1877
                // can exit here and return nil. This means no message will be delivered
×
1878
                // to the link.
×
1879
                return nil
×
1880
        }
×
1881
        defer sub.Cancel()
×
1882

×
1883
        // The link may already be active by this point, and we may have missed the
×
1884
        // ActiveLinkEvent. Check if the link exists.
×
1885
        link := p.fetchLinkFromKeyAndCid(cid)
×
1886
        if link != nil {
×
1887
                return link
×
1888
        }
×
1889

1890
        // If the link is nil, we must wait for it to be active.
1891
        for {
×
1892
                select {
×
1893
                // A new event has been sent by the ChannelNotifier. We first check
1894
                // whether the event is an ActiveLinkEvent. If it is, we'll check
1895
                // that the event is for this channel. Otherwise, we discard the
1896
                // message.
1897
                case e := <-sub.Updates():
×
1898
                        event, ok := e.(channelnotifier.ActiveLinkEvent)
×
1899
                        if !ok {
×
1900
                                // Ignore this notification.
×
1901
                                continue
×
1902
                        }
1903

1904
                        chanPoint := event.ChannelPoint
×
1905

×
1906
                        // Check whether the retrieved chanPoint matches the target
×
1907
                        // channel id.
×
1908
                        if !cid.IsChanPoint(chanPoint) {
×
1909
                                continue
×
1910
                        }
1911

1912
                        // The link shouldn't be nil as we received an
1913
                        // ActiveLinkEvent. If it is nil, we return nil and the
1914
                        // calling function should catch it.
1915
                        return p.fetchLinkFromKeyAndCid(cid)
×
1916

1917
                case <-p.cg.Done():
×
1918
                        return nil
×
1919
                }
1920
        }
1921
}
1922

1923
// newChanMsgStream is used to create a msgStream between the peer and
1924
// particular channel link in the htlcswitch. We utilize additional
1925
// synchronization with the fundingManager to ensure we don't attempt to
1926
// dispatch a message to a channel before it is fully active. A reference to the
1927
// channel this stream forwards to is held in scope to prevent unnecessary
1928
// lookups.
1929
func newChanMsgStream(p *Brontide, cid lnwire.ChannelID) *msgStream {
×
1930
        var chanLink htlcswitch.ChannelUpdateHandler
×
1931

×
1932
        apply := func(msg lnwire.Message) {
×
1933
                // This check is fine because if the link no longer exists, it will
×
1934
                // be removed from the activeChannels map and subsequent messages
×
1935
                // shouldn't reach the chan msg stream.
×
1936
                if chanLink == nil {
×
1937
                        chanLink = waitUntilLinkActive(p, cid)
×
1938

×
1939
                        // If the link is still not active and the calling function
×
1940
                        // errored out, just return.
×
1941
                        if chanLink == nil {
×
1942
                                p.log.Warnf("Link=%v is not active", cid)
×
1943
                                return
×
1944
                        }
×
1945
                }
1946

1947
                // In order to avoid unnecessarily delivering message
1948
                // as the peer is exiting, we'll check quickly to see
1949
                // if we need to exit.
1950
                select {
×
1951
                case <-p.cg.Done():
×
1952
                        return
×
1953
                default:
×
1954
                }
1955

1956
                chanLink.HandleChannelUpdate(msg)
×
1957
        }
1958

1959
        return newMsgStream(p,
×
1960
                fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
×
1961
                fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
×
1962
                msgStreamSize,
×
1963
                apply,
×
1964
        )
×
1965
}
1966

1967
// newDiscMsgStream is used to setup a msgStream between the peer and the
1968
// authenticated gossiper. This stream should be used to forward all remote
1969
// channel announcements.
1970
func newDiscMsgStream(p *Brontide) *msgStream {
3✔
1971
        apply := func(msg lnwire.Message) {
3✔
1972
                // TODO(yy): `ProcessRemoteAnnouncement` returns an error chan
×
1973
                // and we need to process it.
×
1974
                p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p)
×
1975
        }
×
1976

1977
        return newMsgStream(
3✔
1978
                p,
3✔
1979
                "Update stream for gossiper created",
3✔
1980
                "Update stream for gossiper exited",
3✔
1981
                msgStreamSize,
3✔
1982
                apply,
3✔
1983
        )
3✔
1984
}
1985

1986
// readHandler is responsible for reading messages off the wire in series, then
1987
// properly dispatching the handling of the message to the proper subsystem.
1988
//
1989
// NOTE: This method MUST be run as a goroutine.
1990
func (p *Brontide) readHandler() {
3✔
1991
        defer p.cg.WgDone()
3✔
1992

3✔
1993
        // We'll stop the timer after a new messages is received, and also
3✔
1994
        // reset it after we process the next message.
3✔
1995
        idleTimer := time.AfterFunc(idleTimeout, func() {
3✔
1996
                err := fmt.Errorf("peer %s no answer for %s -- disconnecting",
×
1997
                        p, idleTimeout)
×
1998
                p.Disconnect(err)
×
1999
        })
×
2000

2001
        // Initialize our negotiated gossip sync method before reading messages
2002
        // off the wire. When using gossip queries, this ensures a gossip
2003
        // syncer is active by the time query messages arrive.
2004
        //
2005
        // TODO(conner): have peer store gossip syncer directly and bypass
2006
        // gossiper?
2007
        p.initGossipSync()
3✔
2008

3✔
2009
        discStream := newDiscMsgStream(p)
3✔
2010
        discStream.Start()
3✔
2011
        defer discStream.Stop()
3✔
2012
out:
3✔
2013
        for atomic.LoadInt32(&p.disconnect) == 0 {
7✔
2014
                nextMsg, err := p.readNextMessage()
4✔
2015
                if !idleTimer.Stop() {
4✔
2016
                        select {
×
2017
                        case <-idleTimer.C:
×
2018
                        default:
×
2019
                        }
2020
                }
2021
                if err != nil {
1✔
2022
                        p.log.Infof("unable to read message from peer: %v", err)
×
2023

×
2024
                        // If we could not read our peer's message due to an
×
2025
                        // unknown type or invalid alias, we continue processing
×
2026
                        // as normal. We store unknown message and address
×
2027
                        // types, as they may provide debugging insight.
×
2028
                        switch e := err.(type) {
×
2029
                        // If this is just a message we don't yet recognize,
2030
                        // we'll continue processing as normal as this allows
2031
                        // us to introduce new messages in a forwards
2032
                        // compatible manner.
2033
                        case *lnwire.UnknownMessage:
×
2034
                                p.storeError(e)
×
2035
                                idleTimer.Reset(idleTimeout)
×
2036
                                continue
×
2037

2038
                        // If they sent us an address type that we don't yet
2039
                        // know of, then this isn't a wire error, so we'll
2040
                        // simply continue parsing the remainder of their
2041
                        // messages.
2042
                        case *lnwire.ErrUnknownAddrType:
×
2043
                                p.storeError(e)
×
2044
                                idleTimer.Reset(idleTimeout)
×
2045
                                continue
×
2046

2047
                        // If the NodeAnnouncement has an invalid alias, then
2048
                        // we'll log that error above and continue so we can
2049
                        // continue to read messages from the peer. We do not
2050
                        // store this error because it is of little debugging
2051
                        // value.
2052
                        case *lnwire.ErrInvalidNodeAlias:
×
2053
                                idleTimer.Reset(idleTimeout)
×
2054
                                continue
×
2055

2056
                        // If the error we encountered wasn't just a message we
2057
                        // didn't recognize, then we'll stop all processing as
2058
                        // this is a fatal error.
2059
                        default:
×
2060
                                break out
×
2061
                        }
2062
                }
2063

2064
                // If a message router is active, then we'll try to have it
2065
                // handle this message. If it can, then we're able to skip the
2066
                // rest of the message handling logic.
2067
                err = fn.MapOptionZ(p.msgRouter, func(r msgmux.Router) error {
2✔
2068
                        return r.RouteMsg(msgmux.PeerMsg{
1✔
2069
                                PeerPub: *p.IdentityKey(),
1✔
2070
                                Message: nextMsg,
1✔
2071
                        })
1✔
2072
                })
1✔
2073

2074
                // No error occurred, and the message was handled by the
2075
                // router.
2076
                if err == nil {
1✔
2077
                        continue
×
2078
                }
2079

2080
                var (
1✔
2081
                        targetChan   lnwire.ChannelID
1✔
2082
                        isLinkUpdate bool
1✔
2083
                )
1✔
2084

1✔
2085
                switch msg := nextMsg.(type) {
1✔
2086
                case *lnwire.Pong:
×
2087
                        // When we receive a Pong message in response to our
×
2088
                        // last ping message, we send it to the pingManager
×
2089
                        p.pingManager.ReceivedPong(msg)
×
2090

2091
                case *lnwire.Ping:
×
2092
                        // First, we'll store their latest ping payload within
×
2093
                        // the relevant atomic variable.
×
2094
                        p.lastPingPayload.Store(msg.PaddingBytes[:])
×
2095

×
2096
                        // Next, we'll send over the amount of specified pong
×
2097
                        // bytes.
×
2098
                        pong := lnwire.NewPong(p.cfg.PongBuf[0:msg.NumPongBytes])
×
2099
                        p.queueMsg(pong, nil)
×
2100

2101
                case *lnwire.OpenChannel,
2102
                        *lnwire.AcceptChannel,
2103
                        *lnwire.FundingCreated,
2104
                        *lnwire.FundingSigned,
2105
                        *lnwire.ChannelReady:
×
2106

×
2107
                        p.cfg.FundingManager.ProcessFundingMsg(msg, p)
×
2108

2109
                case *lnwire.Shutdown:
×
2110
                        select {
×
2111
                        case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
×
2112
                        case <-p.cg.Done():
×
2113
                                break out
×
2114
                        }
2115
                case *lnwire.ClosingSigned:
×
2116
                        select {
×
2117
                        case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
×
2118
                        case <-p.cg.Done():
×
2119
                                break out
×
2120
                        }
2121

2122
                case *lnwire.Warning:
×
2123
                        targetChan = msg.ChanID
×
2124
                        isLinkUpdate = p.handleWarningOrError(targetChan, msg)
×
2125

2126
                case *lnwire.Error:
×
2127
                        targetChan = msg.ChanID
×
2128
                        isLinkUpdate = p.handleWarningOrError(targetChan, msg)
×
2129

2130
                case *lnwire.ChannelReestablish:
×
2131
                        targetChan = msg.ChanID
×
2132
                        isLinkUpdate = p.hasChannel(targetChan)
×
2133

×
2134
                        // If we failed to find the link in question, and the
×
2135
                        // message received was a channel sync message, then
×
2136
                        // this might be a peer trying to resync closed channel.
×
2137
                        // In this case we'll try to resend our last channel
×
2138
                        // sync message, such that the peer can recover funds
×
2139
                        // from the closed channel.
×
2140
                        if !isLinkUpdate {
×
2141
                                err := p.resendChanSyncMsg(targetChan)
×
2142
                                if err != nil {
×
2143
                                        // TODO(halseth): send error to peer?
×
2144
                                        p.log.Errorf("resend failed: %v",
×
2145
                                                err)
×
2146
                                }
×
2147
                        }
2148

2149
                // For messages that implement the LinkUpdater interface, we
2150
                // will consider them as link updates and send them to
2151
                // chanStream. These messages will be queued inside chanStream
2152
                // if the channel is not active yet.
2153
                case lnwire.LinkUpdater:
×
2154
                        targetChan = msg.TargetChanID()
×
2155
                        isLinkUpdate = p.hasChannel(targetChan)
×
2156

×
2157
                        // Log an error if we don't have this channel. This
×
2158
                        // means the peer has sent us a message with unknown
×
2159
                        // channel ID.
×
2160
                        if !isLinkUpdate {
×
2161
                                p.log.Errorf("Unknown channel ID: %v found "+
×
2162
                                        "in received msg=%s", targetChan,
×
2163
                                        nextMsg.MsgType())
×
2164
                        }
×
2165

2166
                case *lnwire.ChannelUpdate1,
2167
                        *lnwire.ChannelAnnouncement1,
2168
                        *lnwire.NodeAnnouncement,
2169
                        *lnwire.AnnounceSignatures1,
2170
                        *lnwire.GossipTimestampRange,
2171
                        *lnwire.QueryShortChanIDs,
2172
                        *lnwire.QueryChannelRange,
2173
                        *lnwire.ReplyChannelRange,
2174
                        *lnwire.ReplyShortChanIDsEnd:
×
2175

×
2176
                        discStream.AddMsg(msg)
×
2177

2178
                case *lnwire.Custom:
1✔
2179
                        err := p.handleCustomMessage(msg)
1✔
2180
                        if err != nil {
1✔
2181
                                p.storeError(err)
×
2182
                                p.log.Errorf("%v", err)
×
2183
                        }
×
2184

2185
                default:
×
2186
                        // If the message we received is unknown to us, store
×
2187
                        // the type to track the failure.
×
2188
                        err := fmt.Errorf("unknown message type %v received",
×
2189
                                uint16(msg.MsgType()))
×
2190
                        p.storeError(err)
×
2191

×
2192
                        p.log.Errorf("%v", err)
×
2193
                }
2194

2195
                if isLinkUpdate {
1✔
2196
                        // If this is a channel update, then we need to feed it
×
2197
                        // into the channel's in-order message stream.
×
2198
                        p.sendLinkUpdateMsg(targetChan, nextMsg)
×
2199
                }
×
2200

2201
                idleTimer.Reset(idleTimeout)
1✔
2202
        }
2203

2204
        p.Disconnect(errors.New("read handler closed"))
×
2205

×
2206
        p.log.Trace("readHandler for peer done")
×
2207
}
2208

2209
// handleCustomMessage handles the given custom message if a handler is
2210
// registered.
2211
func (p *Brontide) handleCustomMessage(msg *lnwire.Custom) error {
1✔
2212
        if p.cfg.HandleCustomMessage == nil {
1✔
2213
                return fmt.Errorf("no custom message handler for "+
×
2214
                        "message type %v", uint16(msg.MsgType()))
×
2215
        }
×
2216

2217
        return p.cfg.HandleCustomMessage(p.PubKey(), msg)
1✔
2218
}
2219

2220
// isLoadedFromDisk returns true if the provided channel ID is loaded from
2221
// disk.
2222
//
2223
// NOTE: only returns true for pending channels.
2224
func (p *Brontide) isLoadedFromDisk(chanID lnwire.ChannelID) bool {
×
2225
        // If this is a newly added channel, no need to reestablish.
×
2226
        _, added := p.addedChannels.Load(chanID)
×
2227
        if added {
×
2228
                return false
×
2229
        }
×
2230

2231
        // Return false if the channel is unknown.
2232
        channel, ok := p.activeChannels.Load(chanID)
×
2233
        if !ok {
×
2234
                return false
×
2235
        }
×
2236

2237
        // During startup, we will use a nil value to mark a pending channel
2238
        // that's loaded from disk.
2239
        return channel == nil
×
2240
}
2241

2242
// isActiveChannel returns true if the provided channel id is active, otherwise
2243
// returns false.
2244
func (p *Brontide) isActiveChannel(chanID lnwire.ChannelID) bool {
8✔
2245
        // The channel would be nil if,
8✔
2246
        // - the channel doesn't exist, or,
8✔
2247
        // - the channel exists, but is pending. In this case, we don't
8✔
2248
        //   consider this channel active.
8✔
2249
        channel, _ := p.activeChannels.Load(chanID)
8✔
2250

8✔
2251
        return channel != nil
8✔
2252
}
8✔
2253

2254
// isPendingChannel returns true if the provided channel ID is pending, and
2255
// returns false if the channel is active or unknown.
2256
func (p *Brontide) isPendingChannel(chanID lnwire.ChannelID) bool {
6✔
2257
        // Return false if the channel is unknown.
6✔
2258
        channel, ok := p.activeChannels.Load(chanID)
6✔
2259
        if !ok {
9✔
2260
                return false
3✔
2261
        }
3✔
2262

2263
        return channel == nil
3✔
2264
}
2265

2266
// hasChannel returns true if the peer has a pending/active channel specified
2267
// by the channel ID.
2268
func (p *Brontide) hasChannel(chanID lnwire.ChannelID) bool {
×
2269
        _, ok := p.activeChannels.Load(chanID)
×
2270
        return ok
×
2271
}
×
2272

2273
// storeError stores an error in our peer's buffer of recent errors with the
2274
// current timestamp. Errors are only stored if we have at least one active
2275
// channel with the peer to mitigate a dos vector where a peer costlessly
2276
// connects to us and spams us with errors.
2277
func (p *Brontide) storeError(err error) {
×
2278
        var haveChannels bool
×
2279

×
2280
        p.activeChannels.Range(func(_ lnwire.ChannelID,
×
2281
                channel *lnwallet.LightningChannel) bool {
×
2282

×
2283
                // Pending channels will be nil in the activeChannels map.
×
2284
                if channel == nil {
×
2285
                        // Return true to continue the iteration.
×
2286
                        return true
×
2287
                }
×
2288

2289
                haveChannels = true
×
2290

×
2291
                // Return false to break the iteration.
×
2292
                return false
×
2293
        })
2294

2295
        // If we do not have any active channels with the peer, we do not store
2296
        // errors as a dos mitigation.
2297
        if !haveChannels {
×
2298
                p.log.Trace("no channels with peer, not storing err")
×
2299
                return
×
2300
        }
×
2301

2302
        p.cfg.ErrorBuffer.Add(
×
2303
                &TimestampedError{Timestamp: time.Now(), Error: err},
×
2304
        )
×
2305
}
2306

2307
// handleWarningOrError processes a warning or error msg and returns true if
2308
// msg should be forwarded to the associated channel link. False is returned if
2309
// any necessary forwarding of msg was already handled by this method. If msg is
2310
// an error from a peer with an active channel, we'll store it in memory.
2311
//
2312
// NOTE: This method should only be called from within the readHandler.
2313
func (p *Brontide) handleWarningOrError(chanID lnwire.ChannelID,
2314
        msg lnwire.Message) bool {
×
2315

×
2316
        if errMsg, ok := msg.(*lnwire.Error); ok {
×
2317
                p.storeError(errMsg)
×
2318
        }
×
2319

2320
        switch {
×
2321
        // Connection wide messages should be forwarded to all channel links
2322
        // with this peer.
2323
        case chanID == lnwire.ConnectionWideID:
×
2324
                for _, chanStream := range p.activeMsgStreams {
×
2325
                        chanStream.AddMsg(msg)
×
2326
                }
×
2327

2328
                return false
×
2329

2330
        // If the channel ID for the message corresponds to a pending channel,
2331
        // then the funding manager will handle it.
2332
        case p.cfg.FundingManager.IsPendingChannel(chanID, p):
×
2333
                p.cfg.FundingManager.ProcessFundingMsg(msg, p)
×
2334
                return false
×
2335

2336
        // If not we hand the message to the channel link for this channel.
2337
        case p.isActiveChannel(chanID):
×
2338
                return true
×
2339

2340
        default:
×
2341
                return false
×
2342
        }
2343
}
2344

2345
// messageSummary returns a human-readable string that summarizes a
2346
// incoming/outgoing message. Not all messages will have a summary, only those
2347
// which have additional data that can be informative at a glance.
2348
func messageSummary(msg lnwire.Message) string {
×
2349
        switch msg := msg.(type) {
×
2350
        case *lnwire.Init:
×
2351
                // No summary.
×
2352
                return ""
×
2353

2354
        case *lnwire.OpenChannel:
×
2355
                return fmt.Sprintf("temp_chan_id=%x, chain=%v, csv=%v, amt=%v, "+
×
2356
                        "push_amt=%v, reserve=%v, flags=%v",
×
2357
                        msg.PendingChannelID[:], msg.ChainHash,
×
2358
                        msg.CsvDelay, msg.FundingAmount, msg.PushAmount,
×
2359
                        msg.ChannelReserve, msg.ChannelFlags)
×
2360

2361
        case *lnwire.AcceptChannel:
×
2362
                return fmt.Sprintf("temp_chan_id=%x, reserve=%v, csv=%v, num_confs=%v",
×
2363
                        msg.PendingChannelID[:], msg.ChannelReserve, msg.CsvDelay,
×
2364
                        msg.MinAcceptDepth)
×
2365

2366
        case *lnwire.FundingCreated:
×
2367
                return fmt.Sprintf("temp_chan_id=%x, chan_point=%v",
×
2368
                        msg.PendingChannelID[:], msg.FundingPoint)
×
2369

2370
        case *lnwire.FundingSigned:
×
2371
                return fmt.Sprintf("chan_id=%v", msg.ChanID)
×
2372

2373
        case *lnwire.ChannelReady:
×
2374
                return fmt.Sprintf("chan_id=%v, next_point=%x",
×
2375
                        msg.ChanID, msg.NextPerCommitmentPoint.SerializeCompressed())
×
2376

2377
        case *lnwire.Shutdown:
×
2378
                return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID,
×
2379
                        msg.Address[:])
×
2380

2381
        case *lnwire.ClosingComplete:
×
2382
                return fmt.Sprintf("chan_id=%v, fee_sat=%v, locktime=%v",
×
2383
                        msg.ChannelID, msg.FeeSatoshis, msg.LockTime)
×
2384

2385
        case *lnwire.ClosingSig:
×
2386
                return fmt.Sprintf("chan_id=%v", msg.ChannelID)
×
2387

2388
        case *lnwire.ClosingSigned:
×
2389
                return fmt.Sprintf("chan_id=%v, fee_sat=%v", msg.ChannelID,
×
2390
                        msg.FeeSatoshis)
×
2391

2392
        case *lnwire.UpdateAddHTLC:
×
2393
                var blindingPoint []byte
×
2394
                msg.BlindingPoint.WhenSome(
×
2395
                        func(b tlv.RecordT[lnwire.BlindingPointTlvType,
×
2396
                                *btcec.PublicKey]) {
×
2397

×
2398
                                blindingPoint = b.Val.SerializeCompressed()
×
2399
                        },
×
2400
                )
2401

2402
                return fmt.Sprintf("chan_id=%v, id=%v, amt=%v, expiry=%v, "+
×
2403
                        "hash=%x, blinding_point=%x, custom_records=%v",
×
2404
                        msg.ChanID, msg.ID, msg.Amount, msg.Expiry,
×
2405
                        msg.PaymentHash[:], blindingPoint, msg.CustomRecords)
×
2406

2407
        case *lnwire.UpdateFailHTLC:
×
2408
                return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
×
2409
                        msg.ID, msg.Reason)
×
2410

2411
        case *lnwire.UpdateFulfillHTLC:
×
2412
                return fmt.Sprintf("chan_id=%v, id=%v, preimage=%x, "+
×
2413
                        "custom_records=%v", msg.ChanID, msg.ID,
×
2414
                        msg.PaymentPreimage[:], msg.CustomRecords)
×
2415

2416
        case *lnwire.CommitSig:
×
2417
                return fmt.Sprintf("chan_id=%v, num_htlcs=%v", msg.ChanID,
×
2418
                        len(msg.HtlcSigs))
×
2419

2420
        case *lnwire.RevokeAndAck:
×
2421
                return fmt.Sprintf("chan_id=%v, rev=%x, next_point=%x",
×
2422
                        msg.ChanID, msg.Revocation[:],
×
2423
                        msg.NextRevocationKey.SerializeCompressed())
×
2424

2425
        case *lnwire.UpdateFailMalformedHTLC:
×
2426
                return fmt.Sprintf("chan_id=%v, id=%v, fail_code=%v",
×
2427
                        msg.ChanID, msg.ID, msg.FailureCode)
×
2428

2429
        case *lnwire.Warning:
×
2430
                return fmt.Sprintf("%v", msg.Warning())
×
2431

2432
        case *lnwire.Error:
×
2433
                return fmt.Sprintf("%v", msg.Error())
×
2434

2435
        case *lnwire.AnnounceSignatures1:
×
2436
                return fmt.Sprintf("chan_id=%v, short_chan_id=%v", msg.ChannelID,
×
2437
                        msg.ShortChannelID.ToUint64())
×
2438

2439
        case *lnwire.ChannelAnnouncement1:
×
2440
                return fmt.Sprintf("chain_hash=%v, short_chan_id=%v",
×
2441
                        msg.ChainHash, msg.ShortChannelID.ToUint64())
×
2442

2443
        case *lnwire.ChannelUpdate1:
×
2444
                return fmt.Sprintf("chain_hash=%v, short_chan_id=%v, "+
×
2445
                        "mflags=%v, cflags=%v, update_time=%v", msg.ChainHash,
×
2446
                        msg.ShortChannelID.ToUint64(), msg.MessageFlags,
×
2447
                        msg.ChannelFlags, time.Unix(int64(msg.Timestamp), 0))
×
2448

2449
        case *lnwire.NodeAnnouncement:
×
2450
                return fmt.Sprintf("node=%x, update_time=%v",
×
2451
                        msg.NodeID, time.Unix(int64(msg.Timestamp), 0))
×
2452

2453
        case *lnwire.Ping:
×
2454
                return fmt.Sprintf("ping_bytes=%x", msg.PaddingBytes[:])
×
2455

2456
        case *lnwire.Pong:
×
2457
                return fmt.Sprintf("len(pong_bytes)=%d", len(msg.PongBytes[:]))
×
2458

2459
        case *lnwire.UpdateFee:
×
2460
                return fmt.Sprintf("chan_id=%v, fee_update_sat=%v",
×
2461
                        msg.ChanID, int64(msg.FeePerKw))
×
2462

2463
        case *lnwire.ChannelReestablish:
×
2464
                return fmt.Sprintf("chan_id=%v, next_local_height=%v, "+
×
2465
                        "remote_tail_height=%v", msg.ChanID,
×
2466
                        msg.NextLocalCommitHeight, msg.RemoteCommitTailHeight)
×
2467

2468
        case *lnwire.ReplyShortChanIDsEnd:
×
2469
                return fmt.Sprintf("chain_hash=%v, complete=%v", msg.ChainHash,
×
2470
                        msg.Complete)
×
2471

2472
        case *lnwire.ReplyChannelRange:
×
2473
                return fmt.Sprintf("start_height=%v, end_height=%v, "+
×
2474
                        "num_chans=%v, encoding=%v", msg.FirstBlockHeight,
×
2475
                        msg.LastBlockHeight(), len(msg.ShortChanIDs),
×
2476
                        msg.EncodingType)
×
2477

2478
        case *lnwire.QueryShortChanIDs:
×
2479
                return fmt.Sprintf("chain_hash=%v, encoding=%v, num_chans=%v",
×
2480
                        msg.ChainHash, msg.EncodingType, len(msg.ShortChanIDs))
×
2481

2482
        case *lnwire.QueryChannelRange:
×
2483
                return fmt.Sprintf("chain_hash=%v, start_height=%v, "+
×
2484
                        "end_height=%v", msg.ChainHash, msg.FirstBlockHeight,
×
2485
                        msg.LastBlockHeight())
×
2486

2487
        case *lnwire.GossipTimestampRange:
×
2488
                return fmt.Sprintf("chain_hash=%v, first_stamp=%v, "+
×
2489
                        "stamp_range=%v", msg.ChainHash,
×
2490
                        time.Unix(int64(msg.FirstTimestamp), 0),
×
2491
                        msg.TimestampRange)
×
2492

2493
        case *lnwire.Stfu:
×
2494
                return fmt.Sprintf("chan_id=%v, initiator=%v", msg.ChanID,
×
2495
                        msg.Initiator)
×
2496

2497
        case *lnwire.Custom:
×
2498
                return fmt.Sprintf("type=%d", msg.Type)
×
2499
        }
2500

2501
        return fmt.Sprintf("unknown msg type=%T", msg)
×
2502
}
2503

2504
// logWireMessage logs the receipt or sending of particular wire message. This
2505
// function is used rather than just logging the message in order to produce
2506
// less spammy log messages in trace mode by setting the 'Curve" parameter to
2507
// nil. Doing this avoids printing out each of the field elements in the curve
2508
// parameters for secp256k1.
2509
func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
17✔
2510
        summaryPrefix := "Received"
17✔
2511
        if !read {
30✔
2512
                summaryPrefix = "Sending"
13✔
2513
        }
13✔
2514

2515
        p.log.Debugf("%v", lnutils.NewLogClosure(func() string {
17✔
2516
                // Debug summary of message.
×
2517
                summary := messageSummary(msg)
×
2518
                if len(summary) > 0 {
×
2519
                        summary = "(" + summary + ")"
×
2520
                }
×
2521

2522
                preposition := "to"
×
2523
                if read {
×
2524
                        preposition = "from"
×
2525
                }
×
2526

2527
                var msgType string
×
2528
                if msg.MsgType() < lnwire.CustomTypeStart {
×
2529
                        msgType = msg.MsgType().String()
×
2530
                } else {
×
2531
                        msgType = "custom"
×
2532
                }
×
2533

2534
                return fmt.Sprintf("%v %v%s %v %s", summaryPrefix,
×
2535
                        msgType, summary, preposition, p)
×
2536
        }))
2537

2538
        prefix := "readMessage from peer"
17✔
2539
        if !read {
30✔
2540
                prefix = "writeMessage to peer"
13✔
2541
        }
13✔
2542

2543
        p.log.Tracef(prefix+": %v", lnutils.SpewLogClosure(msg))
17✔
2544
}
2545

2546
// writeMessage writes and flushes the target lnwire.Message to the remote peer.
2547
// If the passed message is nil, this method will only try to flush an existing
2548
// message buffered on the connection. It is safe to call this method again
2549
// with a nil message iff a timeout error is returned. This will continue to
2550
// flush the pending message to the wire.
2551
//
2552
// NOTE:
2553
// Besides its usage in Start, this function should not be used elsewhere
2554
// except in writeHandler. If multiple goroutines call writeMessage at the same
2555
// time, panics can occur because WriteMessage and Flush don't use any locking
2556
// internally.
2557
func (p *Brontide) writeMessage(msg lnwire.Message) error {
13✔
2558
        // Only log the message on the first attempt.
13✔
2559
        if msg != nil {
26✔
2560
                p.logWireMessage(msg, false)
13✔
2561
        }
13✔
2562

2563
        noiseConn := p.cfg.Conn
13✔
2564

13✔
2565
        flushMsg := func() error {
26✔
2566
                // Ensure the write deadline is set before we attempt to send
13✔
2567
                // the message.
13✔
2568
                writeDeadline := time.Now().Add(
13✔
2569
                        p.scaleTimeout(writeMessageTimeout),
13✔
2570
                )
13✔
2571
                err := noiseConn.SetWriteDeadline(writeDeadline)
13✔
2572
                if err != nil {
13✔
2573
                        return err
×
2574
                }
×
2575

2576
                // Flush the pending message to the wire. If an error is
2577
                // encountered, e.g. write timeout, the number of bytes written
2578
                // so far will be returned.
2579
                n, err := noiseConn.Flush()
13✔
2580

13✔
2581
                // Record the number of bytes written on the wire, if any.
13✔
2582
                if n > 0 {
13✔
2583
                        atomic.AddUint64(&p.bytesSent, uint64(n))
×
2584
                }
×
2585

2586
                return err
13✔
2587
        }
2588

2589
        // If the current message has already been serialized, encrypted, and
2590
        // buffered on the underlying connection we will skip straight to
2591
        // flushing it to the wire.
2592
        if msg == nil {
13✔
2593
                return flushMsg()
×
2594
        }
×
2595

2596
        // Otherwise, this is a new message. We'll acquire a write buffer to
2597
        // serialize the message and buffer the ciphertext on the connection.
2598
        err := p.cfg.WritePool.Submit(func(buf *bytes.Buffer) error {
26✔
2599
                // Using a buffer allocated by the write pool, encode the
13✔
2600
                // message directly into the buffer.
13✔
2601
                _, writeErr := lnwire.WriteMessage(buf, msg, 0)
13✔
2602
                if writeErr != nil {
13✔
2603
                        return writeErr
×
2604
                }
×
2605

2606
                // Finally, write the message itself in a single swoop. This
2607
                // will buffer the ciphertext on the underlying connection. We
2608
                // will defer flushing the message until the write pool has been
2609
                // released.
2610
                return noiseConn.WriteMessage(buf.Bytes())
13✔
2611
        })
2612
        if err != nil {
13✔
2613
                return err
×
2614
        }
×
2615

2616
        return flushMsg()
13✔
2617
}
2618

2619
// writeHandler is a goroutine dedicated to reading messages off of an incoming
2620
// queue, and writing them out to the wire. This goroutine coordinates with the
2621
// queueHandler in order to ensure the incoming message queue is quickly
2622
// drained.
2623
//
2624
// NOTE: This method MUST be run as a goroutine.
2625
func (p *Brontide) writeHandler() {
3✔
2626
        // We'll stop the timer after a new messages is sent, and also reset it
3✔
2627
        // after we process the next message.
3✔
2628
        idleTimer := time.AfterFunc(idleTimeout, func() {
3✔
2629
                err := fmt.Errorf("peer %s no write for %s -- disconnecting",
×
2630
                        p, idleTimeout)
×
2631
                p.Disconnect(err)
×
2632
        })
×
2633

2634
        var exitErr error
3✔
2635

3✔
2636
out:
3✔
2637
        for {
10✔
2638
                select {
7✔
2639
                case outMsg := <-p.sendQueue:
4✔
2640
                        // Record the time at which we first attempt to send the
4✔
2641
                        // message.
4✔
2642
                        startTime := time.Now()
4✔
2643

4✔
2644
                retry:
4✔
2645
                        // Write out the message to the socket. If a timeout
2646
                        // error is encountered, we will catch this and retry
2647
                        // after backing off in case the remote peer is just
2648
                        // slow to process messages from the wire.
2649
                        err := p.writeMessage(outMsg.msg)
4✔
2650
                        if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
4✔
2651
                                p.log.Debugf("Write timeout detected for "+
×
2652
                                        "peer, first write for message "+
×
2653
                                        "attempted %v ago",
×
2654
                                        time.Since(startTime))
×
2655

×
2656
                                // If we received a timeout error, this implies
×
2657
                                // that the message was buffered on the
×
2658
                                // connection successfully and that a flush was
×
2659
                                // attempted. We'll set the message to nil so
×
2660
                                // that on a subsequent pass we only try to
×
2661
                                // flush the buffered message, and forgo
×
2662
                                // reserializing or reencrypting it.
×
2663
                                outMsg.msg = nil
×
2664

×
2665
                                goto retry
×
2666
                        }
2667

2668
                        // The write succeeded, reset the idle timer to prevent
2669
                        // us from disconnecting the peer.
2670
                        if !idleTimer.Stop() {
4✔
2671
                                select {
×
2672
                                case <-idleTimer.C:
×
2673
                                default:
×
2674
                                }
2675
                        }
2676
                        idleTimer.Reset(idleTimeout)
4✔
2677

4✔
2678
                        // If the peer requested a synchronous write, respond
4✔
2679
                        // with the error.
4✔
2680
                        if outMsg.errChan != nil {
5✔
2681
                                outMsg.errChan <- err
1✔
2682
                        }
1✔
2683

2684
                        if err != nil {
4✔
2685
                                exitErr = fmt.Errorf("unable to write "+
×
2686
                                        "message: %v", err)
×
2687
                                break out
×
2688
                        }
2689

2690
                case <-p.cg.Done():
×
2691
                        exitErr = lnpeer.ErrPeerExiting
×
2692
                        break out
×
2693
                }
2694
        }
2695

2696
        // Avoid an exit deadlock by ensuring WaitGroups are decremented before
2697
        // disconnect.
2698
        p.cg.WgDone()
×
2699

×
2700
        p.Disconnect(exitErr)
×
2701

×
2702
        p.log.Trace("writeHandler for peer done")
×
2703
}
2704

2705
// queueHandler is responsible for accepting messages from outside subsystems
2706
// to be eventually sent out on the wire by the writeHandler.
2707
//
2708
// NOTE: This method MUST be run as a goroutine.
2709
func (p *Brontide) queueHandler() {
3✔
2710
        defer p.cg.WgDone()
3✔
2711

3✔
2712
        // priorityMsgs holds an in order list of messages deemed high-priority
3✔
2713
        // to be added to the sendQueue. This predominately includes messages
3✔
2714
        // from the funding manager and htlcswitch.
3✔
2715
        priorityMsgs := list.New()
3✔
2716

3✔
2717
        // lazyMsgs holds an in order list of messages deemed low-priority to be
3✔
2718
        // added to the sendQueue only after all high-priority messages have
3✔
2719
        // been queued. This predominately includes messages from the gossiper.
3✔
2720
        lazyMsgs := list.New()
3✔
2721

3✔
2722
        for {
14✔
2723
                // Examine the front of the priority queue, if it is empty check
11✔
2724
                // the low priority queue.
11✔
2725
                elem := priorityMsgs.Front()
11✔
2726
                if elem == nil {
19✔
2727
                        elem = lazyMsgs.Front()
8✔
2728
                }
8✔
2729

2730
                if elem != nil {
15✔
2731
                        front := elem.Value.(outgoingMsg)
4✔
2732

4✔
2733
                        // There's an element on the queue, try adding
4✔
2734
                        // it to the sendQueue. We also watch for
4✔
2735
                        // messages on the outgoingQueue, in case the
4✔
2736
                        // writeHandler cannot accept messages on the
4✔
2737
                        // sendQueue.
4✔
2738
                        select {
4✔
2739
                        case p.sendQueue <- front:
4✔
2740
                                if front.priority {
7✔
2741
                                        priorityMsgs.Remove(elem)
3✔
2742
                                } else {
4✔
2743
                                        lazyMsgs.Remove(elem)
1✔
2744
                                }
1✔
2745
                        case msg := <-p.outgoingQueue:
×
2746
                                if msg.priority {
×
2747
                                        priorityMsgs.PushBack(msg)
×
2748
                                } else {
×
2749
                                        lazyMsgs.PushBack(msg)
×
2750
                                }
×
2751
                        case <-p.cg.Done():
×
2752
                                return
×
2753
                        }
2754
                } else {
7✔
2755
                        // If there weren't any messages to send to the
7✔
2756
                        // writeHandler, then we'll accept a new message
7✔
2757
                        // into the queue from outside sub-systems.
7✔
2758
                        select {
7✔
2759
                        case msg := <-p.outgoingQueue:
4✔
2760
                                if msg.priority {
7✔
2761
                                        priorityMsgs.PushBack(msg)
3✔
2762
                                } else {
4✔
2763
                                        lazyMsgs.PushBack(msg)
1✔
2764
                                }
1✔
2765
                        case <-p.cg.Done():
×
2766
                                return
×
2767
                        }
2768
                }
2769
        }
2770
}
2771

2772
// PingTime returns the estimated ping time to the peer in microseconds.
2773
func (p *Brontide) PingTime() int64 {
×
2774
        return p.pingManager.GetPingTimeMicroSeconds()
×
2775
}
×
2776

2777
// queueMsg adds the lnwire.Message to the back of the high priority send queue.
2778
// If the errChan is non-nil, an error is sent back if the msg failed to queue
2779
// or failed to write, and nil otherwise.
2780
func (p *Brontide) queueMsg(msg lnwire.Message, errChan chan error) {
25✔
2781
        p.queue(true, msg, errChan)
25✔
2782
}
25✔
2783

2784
// queueMsgLazy adds the lnwire.Message to the back of the low priority send
2785
// queue. If the errChan is non-nil, an error is sent back if the msg failed to
2786
// queue or failed to write, and nil otherwise.
2787
func (p *Brontide) queueMsgLazy(msg lnwire.Message, errChan chan error) {
1✔
2788
        p.queue(false, msg, errChan)
1✔
2789
}
1✔
2790

2791
// queue sends a given message to the queueHandler using the passed priority. If
2792
// the errChan is non-nil, an error is sent back if the msg failed to queue or
2793
// failed to write, and nil otherwise.
2794
func (p *Brontide) queue(priority bool, msg lnwire.Message,
2795
        errChan chan error) {
26✔
2796

26✔
2797
        select {
26✔
2798
        case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
25✔
2799
        case <-p.cg.Done():
×
2800
                p.log.Tracef("Peer shutting down, could not enqueue msg: %v.",
×
2801
                        spew.Sdump(msg))
×
2802
                if errChan != nil {
×
2803
                        errChan <- lnpeer.ErrPeerExiting
×
2804
                }
×
2805
        }
2806
}
2807

2808
// ChannelSnapshots returns a slice of channel snapshots detailing all
2809
// currently active channels maintained with the remote peer.
2810
func (p *Brontide) ChannelSnapshots() []*channeldb.ChannelSnapshot {
×
2811
        snapshots := make(
×
2812
                []*channeldb.ChannelSnapshot, 0, p.activeChannels.Len(),
×
2813
        )
×
2814

×
2815
        p.activeChannels.ForEach(func(_ lnwire.ChannelID,
×
2816
                activeChan *lnwallet.LightningChannel) error {
×
2817

×
2818
                // If the activeChan is nil, then we skip it as the channel is
×
2819
                // pending.
×
2820
                if activeChan == nil {
×
2821
                        return nil
×
2822
                }
×
2823

2824
                // We'll only return a snapshot for channels that are
2825
                // *immediately* available for routing payments over.
2826
                if activeChan.RemoteNextRevocation() == nil {
×
2827
                        return nil
×
2828
                }
×
2829

2830
                snapshot := activeChan.StateSnapshot()
×
2831
                snapshots = append(snapshots, snapshot)
×
2832

×
2833
                return nil
×
2834
        })
2835

2836
        return snapshots
×
2837
}
2838

2839
// genDeliveryScript returns a new script to be used to send our funds to in
2840
// the case of a cooperative channel close negotiation.
2841
func (p *Brontide) genDeliveryScript() ([]byte, error) {
6✔
2842
        // We'll send a normal p2wkh address unless we've negotiated the
6✔
2843
        // shutdown-any-segwit feature.
6✔
2844
        addrType := lnwallet.WitnessPubKey
6✔
2845
        if p.taprootShutdownAllowed() {
6✔
2846
                addrType = lnwallet.TaprootPubkey
×
2847
        }
×
2848

2849
        deliveryAddr, err := p.cfg.Wallet.NewAddress(
6✔
2850
                addrType, false, lnwallet.DefaultAccountName,
6✔
2851
        )
6✔
2852
        if err != nil {
6✔
2853
                return nil, err
×
2854
        }
×
2855
        p.log.Infof("Delivery addr for channel close: %v",
6✔
2856
                deliveryAddr)
6✔
2857

6✔
2858
        return txscript.PayToAddrScript(deliveryAddr)
6✔
2859
}
2860

2861
// channelManager is goroutine dedicated to handling all requests/signals
2862
// pertaining to the opening, cooperative closing, and force closing of all
2863
// channels maintained with the remote peer.
2864
//
2865
// NOTE: This method MUST be run as a goroutine.
2866
func (p *Brontide) channelManager() {
17✔
2867
        defer p.cg.WgDone()
17✔
2868

17✔
2869
        // reenableTimeout will fire once after the configured channel status
17✔
2870
        // interval has elapsed. This will trigger us to sign new channel
17✔
2871
        // updates and broadcast them with the "disabled" flag unset.
17✔
2872
        reenableTimeout := time.After(p.cfg.ChanActiveTimeout)
17✔
2873

17✔
2874
out:
17✔
2875
        for {
55✔
2876
                select {
38✔
2877
                // A new pending channel has arrived which means we are about
2878
                // to complete a funding workflow and is waiting for the final
2879
                // `ChannelReady` messages to be exchanged. We will add this
2880
                // channel to the `activeChannels` with a nil value to indicate
2881
                // this is a pending channel.
2882
                case req := <-p.newPendingChannel:
1✔
2883
                        p.handleNewPendingChannel(req)
1✔
2884

2885
                // A new channel has arrived which means we've just completed a
2886
                // funding workflow. We'll initialize the necessary local
2887
                // state, and notify the htlc switch of a new link.
2888
                case req := <-p.newActiveChannel:
×
2889
                        p.handleNewActiveChannel(req)
×
2890

2891
                // The funding flow for a pending channel is failed, we will
2892
                // remove it from Brontide.
2893
                case req := <-p.removePendingChannel:
1✔
2894
                        p.handleRemovePendingChannel(req)
1✔
2895

2896
                // We've just received a local request to close an active
2897
                // channel. It will either kick of a cooperative channel
2898
                // closure negotiation, or be a notification of a breached
2899
                // contract that should be abandoned.
2900
                case req := <-p.localCloseChanReqs:
7✔
2901
                        p.handleLocalCloseReq(req)
7✔
2902

2903
                // We've received a link failure from a link that was added to
2904
                // the switch. This will initiate the teardown of the link, and
2905
                // initiate any on-chain closures if necessary.
2906
                case failure := <-p.linkFailures:
×
2907
                        p.handleLinkFailure(failure)
×
2908

2909
                // We've received a new cooperative channel closure related
2910
                // message from the remote peer, we'll use this message to
2911
                // advance the chan closer state machine.
2912
                case closeMsg := <-p.chanCloseMsgs:
13✔
2913
                        p.handleCloseMsg(closeMsg)
13✔
2914

2915
                // The channel reannounce delay has elapsed, broadcast the
2916
                // reenabled channel updates to the network. This should only
2917
                // fire once, so we set the reenableTimeout channel to nil to
2918
                // mark it for garbage collection. If the peer is torn down
2919
                // before firing, reenabling will not be attempted.
2920
                // TODO(conner): consolidate reenables timers inside chan status
2921
                // manager
2922
                case <-reenableTimeout:
×
2923
                        p.reenableActiveChannels()
×
2924

×
2925
                        // Since this channel will never fire again during the
×
2926
                        // lifecycle of the peer, we nil the channel to mark it
×
2927
                        // eligible for garbage collection, and make this
×
2928
                        // explicitly ineligible to receive in future calls to
×
2929
                        // select. This also shaves a few CPU cycles since the
×
2930
                        // select will ignore this case entirely.
×
2931
                        reenableTimeout = nil
×
2932

×
2933
                        // Once the reenabling is attempted, we also cancel the
×
2934
                        // channel event subscription to free up the overflow
×
2935
                        // queue used in channel notifier.
×
2936
                        //
×
2937
                        // NOTE: channelEventClient will be nil if the
×
2938
                        // reenableTimeout is greater than 1 minute.
×
2939
                        if p.channelEventClient != nil {
×
2940
                                p.channelEventClient.Cancel()
×
2941
                        }
×
2942

2943
                case <-p.cg.Done():
×
2944
                        // As, we've been signalled to exit, we'll reset all
×
2945
                        // our active channel back to their default state.
×
2946
                        p.activeChannels.ForEach(func(_ lnwire.ChannelID,
×
2947
                                lc *lnwallet.LightningChannel) error {
×
2948

×
2949
                                // Exit if the channel is nil as it's a pending
×
2950
                                // channel.
×
2951
                                if lc == nil {
×
2952
                                        return nil
×
2953
                                }
×
2954

2955
                                lc.ResetState()
×
2956

×
2957
                                return nil
×
2958
                        })
2959

2960
                        break out
×
2961
                }
2962
        }
2963
}
2964

2965
// reenableActiveChannels searches the index of channels maintained with this
2966
// peer, and reenables each public, non-pending channel. This is done at the
2967
// gossip level by broadcasting a new ChannelUpdate with the disabled bit unset.
2968
// No message will be sent if the channel is already enabled.
2969
func (p *Brontide) reenableActiveChannels() {
×
2970
        // First, filter all known channels with this peer for ones that are
×
2971
        // both public and not pending.
×
2972
        activePublicChans := p.filterChannelsToEnable()
×
2973

×
2974
        // Create a map to hold channels that needs to be retried.
×
2975
        retryChans := make(map[wire.OutPoint]struct{}, len(activePublicChans))
×
2976

×
2977
        // For each of the public, non-pending channels, set the channel
×
2978
        // disabled bit to false and send out a new ChannelUpdate. If this
×
2979
        // channel is already active, the update won't be sent.
×
2980
        for _, chanPoint := range activePublicChans {
×
2981
                err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
×
2982

×
2983
                switch {
×
2984
                // No error occurred, continue to request the next channel.
2985
                case err == nil:
×
2986
                        continue
×
2987

2988
                // Cannot auto enable a manually disabled channel so we do
2989
                // nothing but proceed to the next channel.
2990
                case errors.Is(err, netann.ErrEnableManuallyDisabledChan):
×
2991
                        p.log.Debugf("Channel(%v) was manually disabled, "+
×
2992
                                "ignoring automatic enable request", chanPoint)
×
2993

×
2994
                        continue
×
2995

2996
                // If the channel is reported as inactive, we will give it
2997
                // another chance. When handling the request, ChanStatusManager
2998
                // will check whether the link is active or not. One of the
2999
                // conditions is whether the link has been marked as
3000
                // reestablished, which happens inside a goroutine(htlcManager)
3001
                // after the link is started. And we may get a false negative
3002
                // saying the link is not active because that goroutine hasn't
3003
                // reached the line to mark the reestablishment. Thus we give
3004
                // it a second chance to send the request.
3005
                case errors.Is(err, netann.ErrEnableInactiveChan):
×
3006
                        // If we don't have a client created, it means we
×
3007
                        // shouldn't retry enabling the channel.
×
3008
                        if p.channelEventClient == nil {
×
3009
                                p.log.Errorf("Channel(%v) request enabling "+
×
3010
                                        "failed due to inactive link",
×
3011
                                        chanPoint)
×
3012

×
3013
                                continue
×
3014
                        }
3015

3016
                        p.log.Warnf("Channel(%v) cannot be enabled as " +
×
3017
                                "ChanStatusManager reported inactive, retrying")
×
3018

×
3019
                        // Add the channel to the retry map.
×
3020
                        retryChans[chanPoint] = struct{}{}
×
3021
                }
3022
        }
3023

3024
        // Retry the channels if we have any.
3025
        if len(retryChans) != 0 {
×
3026
                p.retryRequestEnable(retryChans)
×
3027
        }
×
3028
}
3029

3030
// fetchActiveChanCloser attempts to fetch the active chan closer state machine
3031
// for the target channel ID. If the channel isn't active an error is returned.
3032
// Otherwise, either an existing state machine will be returned, or a new one
3033
// will be created.
3034
func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
3035
        *chanCloserFsm, error) {
13✔
3036

13✔
3037
        chanCloser, found := p.activeChanCloses.Load(chanID)
13✔
3038
        if found {
23✔
3039
                // An entry will only be found if the closer has already been
10✔
3040
                // created for a non-pending channel or for a channel that had
10✔
3041
                // previously started the shutdown process but the connection
10✔
3042
                // was restarted.
10✔
3043
                return &chanCloser, nil
10✔
3044
        }
10✔
3045

3046
        // First, we'll ensure that we actually know of the target channel. If
3047
        // not, we'll ignore this message.
3048
        channel, ok := p.activeChannels.Load(chanID)
3✔
3049

3✔
3050
        // If the channel isn't in the map or the channel is nil, return
3✔
3051
        // ErrChannelNotFound as the channel is pending.
3✔
3052
        if !ok || channel == nil {
3✔
3053
                return nil, ErrChannelNotFound
×
3054
        }
×
3055

3056
        // We'll create a valid closing state machine in order to respond to
3057
        // the initiated cooperative channel closure. First, we set the
3058
        // delivery script that our funds will be paid out to. If an upfront
3059
        // shutdown script was set, we will use it. Otherwise, we get a fresh
3060
        // delivery script.
3061
        //
3062
        // TODO: Expose option to allow upfront shutdown script from watch-only
3063
        // accounts.
3064
        deliveryScript := channel.LocalUpfrontShutdownScript()
3✔
3065
        if len(deliveryScript) == 0 {
6✔
3066
                var err error
3✔
3067
                deliveryScript, err = p.genDeliveryScript()
3✔
3068
                if err != nil {
3✔
3069
                        p.log.Errorf("unable to gen delivery script: %v",
×
3070
                                err)
×
3071
                        return nil, fmt.Errorf("close addr unavailable")
×
3072
                }
×
3073
        }
3074

3075
        // In order to begin fee negotiations, we'll first compute our target
3076
        // ideal fee-per-kw.
3077
        feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
3✔
3078
                p.cfg.CoopCloseTargetConfs,
3✔
3079
        )
3✔
3080
        if err != nil {
3✔
3081
                p.log.Errorf("unable to query fee estimator: %v", err)
×
3082
                return nil, fmt.Errorf("unable to estimate fee")
×
3083
        }
×
3084

3085
        addr, err := p.addrWithInternalKey(deliveryScript)
3✔
3086
        if err != nil {
3✔
3087
                return nil, fmt.Errorf("unable to parse addr: %w", err)
×
3088
        }
×
3089
        negotiateChanCloser, err := p.createChanCloser(
3✔
3090
                channel, addr, feePerKw, nil, lntypes.Remote,
3✔
3091
        )
3✔
3092
        if err != nil {
3✔
3093
                p.log.Errorf("unable to create chan closer: %v", err)
×
3094
                return nil, fmt.Errorf("unable to create chan closer")
×
3095
        }
×
3096

3097
        chanCloser = makeNegotiateCloser(negotiateChanCloser)
3✔
3098

3✔
3099
        p.activeChanCloses.Store(chanID, chanCloser)
3✔
3100

3✔
3101
        return &chanCloser, nil
3✔
3102
}
3103

3104
// filterChannelsToEnable filters a list of channels to be enabled upon start.
3105
// The filtered channels are active channels that's neither private nor
3106
// pending.
3107
func (p *Brontide) filterChannelsToEnable() []wire.OutPoint {
×
3108
        var activePublicChans []wire.OutPoint
×
3109

×
3110
        p.activeChannels.Range(func(chanID lnwire.ChannelID,
×
3111
                lnChan *lnwallet.LightningChannel) bool {
×
3112

×
3113
                // If the lnChan is nil, continue as this is a pending channel.
×
3114
                if lnChan == nil {
×
3115
                        return true
×
3116
                }
×
3117

3118
                dbChan := lnChan.State()
×
3119
                isPublic := dbChan.ChannelFlags&lnwire.FFAnnounceChannel != 0
×
3120
                if !isPublic || dbChan.IsPending {
×
3121
                        return true
×
3122
                }
×
3123

3124
                // We'll also skip any channels added during this peer's
3125
                // lifecycle since they haven't waited out the timeout. Their
3126
                // first announcement will be enabled, and the chan status
3127
                // manager will begin monitoring them passively since they exist
3128
                // in the database.
3129
                if _, ok := p.addedChannels.Load(chanID); ok {
×
3130
                        return true
×
3131
                }
×
3132

3133
                activePublicChans = append(
×
3134
                        activePublicChans, dbChan.FundingOutpoint,
×
3135
                )
×
3136

×
3137
                return true
×
3138
        })
3139

3140
        return activePublicChans
×
3141
}
3142

3143
// retryRequestEnable takes a map of channel outpoints and a channel event
3144
// client. It listens to the channel events and removes a channel from the map
3145
// if it's matched to the event. Upon receiving an active channel event, it
3146
// will send the enabling request again.
3147
func (p *Brontide) retryRequestEnable(activeChans map[wire.OutPoint]struct{}) {
×
3148
        p.log.Debugf("Retry enabling %v channels", len(activeChans))
×
3149

×
3150
        // retryEnable is a helper closure that sends an enable request and
×
3151
        // removes the channel from the map if it's matched.
×
3152
        retryEnable := func(chanPoint wire.OutPoint) error {
×
3153
                // If this is an active channel event, check whether it's in
×
3154
                // our targeted channels map.
×
3155
                _, found := activeChans[chanPoint]
×
3156

×
3157
                // If this channel is irrelevant, return nil so the loop can
×
3158
                // jump to next iteration.
×
3159
                if !found {
×
3160
                        return nil
×
3161
                }
×
3162

3163
                // Otherwise we've just received an active signal for a channel
3164
                // that's previously failed to be enabled, we send the request
3165
                // again.
3166
                //
3167
                // We only give the channel one more shot, so we delete it from
3168
                // our map first to keep it from being attempted again.
3169
                delete(activeChans, chanPoint)
×
3170

×
3171
                // Send the request.
×
3172
                err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
×
3173
                if err != nil {
×
3174
                        return fmt.Errorf("request enabling channel %v "+
×
3175
                                "failed: %w", chanPoint, err)
×
3176
                }
×
3177

3178
                return nil
×
3179
        }
3180

3181
        for {
×
3182
                // If activeChans is empty, we've done processing all the
×
3183
                // channels.
×
3184
                if len(activeChans) == 0 {
×
3185
                        p.log.Debug("Finished retry enabling channels")
×
3186
                        return
×
3187
                }
×
3188

3189
                select {
×
3190
                // A new event has been sent by the ChannelNotifier. We now
3191
                // check whether it's an active or inactive channel event.
3192
                case e := <-p.channelEventClient.Updates():
×
3193
                        // If this is an active channel event, try enable the
×
3194
                        // channel then jump to the next iteration.
×
3195
                        active, ok := e.(channelnotifier.ActiveChannelEvent)
×
3196
                        if ok {
×
3197
                                chanPoint := *active.ChannelPoint
×
3198

×
3199
                                // If we received an error for this particular
×
3200
                                // channel, we log an error and won't quit as
×
3201
                                // we still want to retry other channels.
×
3202
                                if err := retryEnable(chanPoint); err != nil {
×
3203
                                        p.log.Errorf("Retry failed: %v", err)
×
3204
                                }
×
3205

3206
                                continue
×
3207
                        }
3208

3209
                        // Otherwise check for inactive link event, and jump to
3210
                        // next iteration if it's not.
3211
                        inactive, ok := e.(channelnotifier.InactiveLinkEvent)
×
3212
                        if !ok {
×
3213
                                continue
×
3214
                        }
3215

3216
                        // Found an inactive link event, if this is our
3217
                        // targeted channel, remove it from our map.
3218
                        chanPoint := *inactive.ChannelPoint
×
3219
                        _, found := activeChans[chanPoint]
×
3220
                        if !found {
×
3221
                                continue
×
3222
                        }
3223

3224
                        delete(activeChans, chanPoint)
×
3225
                        p.log.Warnf("Re-enable channel %v failed, received "+
×
3226
                                "inactive link event", chanPoint)
×
3227

3228
                case <-p.cg.Done():
×
3229
                        p.log.Debugf("Peer shutdown during retry enabling")
×
3230
                        return
×
3231
                }
3232
        }
3233
}
3234

3235
// chooseDeliveryScript takes two optionally set shutdown scripts and returns
3236
// a suitable script to close out to. This may be nil if neither script is
3237
// set. If both scripts are set, this function will error if they do not match.
3238
func chooseDeliveryScript(upfront, requested lnwire.DeliveryAddress,
3239
        genDeliveryScript func() ([]byte, error),
3240
) (lnwire.DeliveryAddress, error) {
12✔
3241

12✔
3242
        switch {
12✔
3243
        // If no script was provided, then we'll generate a new delivery script.
3244
        case len(upfront) == 0 && len(requested) == 0:
4✔
3245
                return genDeliveryScript()
4✔
3246

3247
        // If no upfront shutdown script was provided, return the user
3248
        // requested address (which may be nil).
3249
        case len(upfront) == 0:
2✔
3250
                return requested, nil
2✔
3251

3252
        // If an upfront shutdown script was provided, and the user did not
3253
        // request a custom shutdown script, return the upfront address.
3254
        case len(requested) == 0:
2✔
3255
                return upfront, nil
2✔
3256

3257
        // If both an upfront shutdown script and a custom close script were
3258
        // provided, error if the user provided shutdown script does not match
3259
        // the upfront shutdown script (because closing out to a different
3260
        // script would violate upfront shutdown).
3261
        case !bytes.Equal(upfront, requested):
2✔
3262
                return nil, chancloser.ErrUpfrontShutdownScriptMismatch
2✔
3263

3264
        // The user requested script matches the upfront shutdown script, so we
3265
        // can return it without error.
3266
        default:
2✔
3267
                return upfront, nil
2✔
3268
        }
3269
}
3270

3271
// restartCoopClose checks whether we need to restart the cooperative close
3272
// process for a given channel.
3273
func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
3274
        *lnwire.Shutdown, error) {
×
3275

×
3276
        isTaprootChan := lnChan.ChanType().IsTaproot()
×
3277

×
3278
        // If this channel has status ChanStatusCoopBroadcasted and does not
×
3279
        // have a closing transaction, then the cooperative close process was
×
3280
        // started but never finished. We'll re-create the chanCloser state
×
3281
        // machine and resend Shutdown. BOLT#2 requires that we retransmit
×
3282
        // Shutdown exactly, but doing so would mean persisting the RPC
×
3283
        // provided close script. Instead use the LocalUpfrontShutdownScript
×
3284
        // or generate a script.
×
3285
        c := lnChan.State()
×
3286
        _, err := c.BroadcastedCooperative()
×
3287
        if err != nil && err != channeldb.ErrNoCloseTx {
×
3288
                // An error other than ErrNoCloseTx was encountered.
×
3289
                return nil, err
×
3290
        } else if err == nil && !p.rbfCoopCloseAllowed() {
×
3291
                // This is a channel that doesn't support RBF coop close, and it
×
3292
                // already had a coop close txn broadcast. As a result, we can
×
3293
                // just exit here as all we can do is wait for it to confirm.
×
3294
                return nil, nil
×
3295
        }
×
3296

3297
        chanID := lnwire.NewChanIDFromOutPoint(c.FundingOutpoint)
×
3298

×
3299
        var deliveryScript []byte
×
3300

×
3301
        shutdownInfo, err := c.ShutdownInfo()
×
3302
        switch {
×
3303
        // We have previously stored the delivery script that we need to use
3304
        // in the shutdown message. Re-use this script.
3305
        case err == nil:
×
3306
                shutdownInfo.WhenSome(func(info channeldb.ShutdownInfo) {
×
3307
                        deliveryScript = info.DeliveryScript.Val
×
3308
                })
×
3309

3310
        // An error other than ErrNoShutdownInfo was returned
3311
        case !errors.Is(err, channeldb.ErrNoShutdownInfo):
×
3312
                return nil, err
×
3313

3314
        case errors.Is(err, channeldb.ErrNoShutdownInfo):
×
3315
                deliveryScript = c.LocalShutdownScript
×
3316
                if len(deliveryScript) == 0 {
×
3317
                        var err error
×
3318
                        deliveryScript, err = p.genDeliveryScript()
×
3319
                        if err != nil {
×
3320
                                p.log.Errorf("unable to gen delivery script: "+
×
3321
                                        "%v", err)
×
3322

×
3323
                                return nil, fmt.Errorf("close addr unavailable")
×
3324
                        }
×
3325
                }
3326
        }
3327

3328
        // If the new RBF co-op close is negotiated, then we'll init and start
3329
        // that state machine, skipping the steps for the negotiate machine
3330
        // below. We don't support this close type for taproot channels though.
3331
        if p.rbfCoopCloseAllowed() && !isTaprootChan {
×
3332
                _, err := p.initRbfChanCloser(lnChan)
×
3333
                if err != nil {
×
3334
                        return nil, fmt.Errorf("unable to init rbf chan "+
×
3335
                                "closer during restart: %w", err)
×
3336
                }
×
3337

3338
                shutdownDesc := fn.MapOption(
×
3339
                        newRestartShutdownInit,
×
3340
                )(shutdownInfo)
×
3341

×
3342
                err = p.startRbfChanCloser(
×
3343
                        fn.FlattenOption(shutdownDesc), lnChan.ChannelPoint(),
×
3344
                )
×
3345

×
3346
                return nil, err
×
3347
        }
3348

3349
        // Compute an ideal fee.
3350
        feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
×
3351
                p.cfg.CoopCloseTargetConfs,
×
3352
        )
×
3353
        if err != nil {
×
3354
                p.log.Errorf("unable to query fee estimator: %v", err)
×
3355
                return nil, fmt.Errorf("unable to estimate fee")
×
3356
        }
×
3357

3358
        // Determine whether we or the peer are the initiator of the coop
3359
        // close attempt by looking at the channel's status.
3360
        closingParty := lntypes.Remote
×
3361
        if c.HasChanStatus(channeldb.ChanStatusLocalCloseInitiator) {
×
3362
                closingParty = lntypes.Local
×
3363
        }
×
3364

3365
        addr, err := p.addrWithInternalKey(deliveryScript)
×
3366
        if err != nil {
×
3367
                return nil, fmt.Errorf("unable to parse addr: %w", err)
×
3368
        }
×
3369
        chanCloser, err := p.createChanCloser(
×
3370
                lnChan, addr, feePerKw, nil, closingParty,
×
3371
        )
×
3372
        if err != nil {
×
3373
                p.log.Errorf("unable to create chan closer: %v", err)
×
3374
                return nil, fmt.Errorf("unable to create chan closer")
×
3375
        }
×
3376

3377
        p.activeChanCloses.Store(chanID, makeNegotiateCloser(chanCloser))
×
3378

×
3379
        // Create the Shutdown message.
×
3380
        shutdownMsg, err := chanCloser.ShutdownChan()
×
3381
        if err != nil {
×
3382
                p.log.Errorf("unable to create shutdown message: %v", err)
×
3383
                p.activeChanCloses.Delete(chanID)
×
3384
                return nil, err
×
3385
        }
×
3386

3387
        return shutdownMsg, nil
×
3388
}
3389

3390
// createChanCloser constructs a ChanCloser from the passed parameters and is
3391
// used to de-duplicate code.
3392
func (p *Brontide) createChanCloser(channel *lnwallet.LightningChannel,
3393
        deliveryScript *chancloser.DeliveryAddrWithKey,
3394
        fee chainfee.SatPerKWeight, req *htlcswitch.ChanClose,
3395
        closer lntypes.ChannelParty) (*chancloser.ChanCloser, error) {
9✔
3396

9✔
3397
        _, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
9✔
3398
        if err != nil {
9✔
3399
                p.log.Errorf("unable to obtain best block: %v", err)
×
3400
                return nil, fmt.Errorf("cannot obtain best block")
×
3401
        }
×
3402

3403
        // The req will only be set if we initiated the co-op closing flow.
3404
        var maxFee chainfee.SatPerKWeight
9✔
3405
        if req != nil {
15✔
3406
                maxFee = req.MaxFee
6✔
3407
        }
6✔
3408

3409
        chanCloser := chancloser.NewChanCloser(
9✔
3410
                chancloser.ChanCloseCfg{
9✔
3411
                        Channel:      channel,
9✔
3412
                        MusigSession: NewMusigChanCloser(channel),
9✔
3413
                        FeeEstimator: &chancloser.SimpleCoopFeeEstimator{},
9✔
3414
                        BroadcastTx:  p.cfg.Wallet.PublishTransaction,
9✔
3415
                        AuxCloser:    p.cfg.AuxChanCloser,
9✔
3416
                        DisableChannel: func(op wire.OutPoint) error {
18✔
3417
                                return p.cfg.ChanStatusMgr.RequestDisable(
9✔
3418
                                        op, false,
9✔
3419
                                )
9✔
3420
                        },
9✔
3421
                        MaxFee: maxFee,
3422
                        Disconnect: func() error {
×
3423
                                return p.cfg.DisconnectPeer(p.IdentityKey())
×
3424
                        },
×
3425
                        ChainParams: &p.cfg.Wallet.Cfg.NetParams,
3426
                },
3427
                *deliveryScript,
3428
                fee,
3429
                uint32(startingHeight),
3430
                req,
3431
                closer,
3432
        )
3433

3434
        return chanCloser, nil
9✔
3435
}
3436

3437
// initNegotiateChanCloser initializes the channel closer for a channel that is
3438
// using the original "negotiation" based protocol. This path is used when
3439
// we're the one initiating the channel close.
3440
//
3441
// TODO(roasbeef): can make a MsgEndpoint for existing handling logic to
3442
// further abstract.
3443
func (p *Brontide) initNegotiateChanCloser(req *htlcswitch.ChanClose,
3444
        channel *lnwallet.LightningChannel) error {
7✔
3445

7✔
3446
        // First, we'll choose a delivery address that we'll use to send the
7✔
3447
        // funds to in the case of a successful negotiation.
7✔
3448

7✔
3449
        // An upfront shutdown and user provided script are both optional, but
7✔
3450
        // must be equal if both set  (because we cannot serve a request to
7✔
3451
        // close out to a script which violates upfront shutdown). Get the
7✔
3452
        // appropriate address to close out to (which may be nil if neither are
7✔
3453
        // set) and error if they are both set and do not match.
7✔
3454
        deliveryScript, err := chooseDeliveryScript(
7✔
3455
                channel.LocalUpfrontShutdownScript(), req.DeliveryScript,
7✔
3456
                p.genDeliveryScript,
7✔
3457
        )
7✔
3458
        if err != nil {
8✔
3459
                return fmt.Errorf("cannot close channel %v: %w",
1✔
3460
                        req.ChanPoint, err)
1✔
3461
        }
1✔
3462

3463
        addr, err := p.addrWithInternalKey(deliveryScript)
6✔
3464
        if err != nil {
6✔
3465
                return fmt.Errorf("unable to parse addr for channel "+
×
3466
                        "%v: %w", req.ChanPoint, err)
×
3467
        }
×
3468

3469
        chanCloser, err := p.createChanCloser(
6✔
3470
                channel, addr, req.TargetFeePerKw, req, lntypes.Local,
6✔
3471
        )
6✔
3472
        if err != nil {
6✔
3473
                return fmt.Errorf("unable to make chan closer: %w", err)
×
3474
        }
×
3475

3476
        chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
6✔
3477
        p.activeChanCloses.Store(chanID, makeNegotiateCloser(chanCloser))
6✔
3478

6✔
3479
        // Finally, we'll initiate the channel shutdown within the
6✔
3480
        // chanCloser, and send the shutdown message to the remote
6✔
3481
        // party to kick things off.
6✔
3482
        shutdownMsg, err := chanCloser.ShutdownChan()
6✔
3483
        if err != nil {
6✔
3484
                // As we were unable to shutdown the channel, we'll return it
×
3485
                // back to its normal state.
×
3486
                defer channel.ResetState()
×
3487

×
3488
                p.activeChanCloses.Delete(chanID)
×
3489

×
3490
                return fmt.Errorf("unable to shutdown channel: %w", err)
×
3491
        }
×
3492

3493
        link := p.fetchLinkFromKeyAndCid(chanID)
6✔
3494
        if link == nil {
6✔
3495
                // If the link is nil then it means it was already removed from
×
3496
                // the switch or it never existed in the first place. The
×
3497
                // latter case is handled at the beginning of this function, so
×
3498
                // in the case where it has already been removed, we can skip
×
3499
                // adding the commit hook to queue a Shutdown message.
×
3500
                p.log.Warnf("link not found during attempted closure: "+
×
3501
                        "%v", chanID)
×
3502
                return nil
×
3503
        }
×
3504

3505
        if !link.DisableAdds(htlcswitch.Outgoing) {
6✔
3506
                p.log.Warnf("Outgoing link adds already "+
×
3507
                        "disabled: %v", link.ChanID())
×
3508
        }
×
3509

3510
        link.OnCommitOnce(htlcswitch.Outgoing, func() {
12✔
3511
                p.queueMsg(shutdownMsg, nil)
6✔
3512
        })
6✔
3513

3514
        return nil
6✔
3515
}
3516

3517
// chooseAddr returns the provided address if it is non-zero length, otherwise
3518
// None.
3519
func chooseAddr(addr lnwire.DeliveryAddress) fn.Option[lnwire.DeliveryAddress] {
×
3520
        if len(addr) == 0 {
×
3521
                return fn.None[lnwire.DeliveryAddress]()
×
3522
        }
×
3523

3524
        return fn.Some(addr)
×
3525
}
3526

3527
// observeRbfCloseUpdates observes the channel for any updates that may
3528
// indicate that a new txid has been broadcasted, or the channel fully closed
3529
// on chain.
3530
func (p *Brontide) observeRbfCloseUpdates(chanCloser *chancloser.RbfChanCloser,
3531
        closeReq *htlcswitch.ChanClose,
3532
        coopCloseStates chancloser.RbfStateSub) {
×
3533

×
3534
        newStateChan := coopCloseStates.NewItemCreated.ChanOut()
×
3535
        defer chanCloser.RemoveStateSub(coopCloseStates)
×
3536

×
3537
        var (
×
3538
                lastTxids    lntypes.Dual[chainhash.Hash]
×
3539
                lastFeeRates lntypes.Dual[chainfee.SatPerVByte]
×
3540
        )
×
3541

×
3542
        maybeNotifyTxBroadcast := func(state chancloser.AsymmetricPeerState,
×
3543
                party lntypes.ChannelParty) {
×
3544

×
3545
                // First, check to see if we have an error to report to the
×
3546
                // caller. If so, then we''ll return that error and exit, as the
×
3547
                // stream will exit as well.
×
3548
                if closeErr, ok := state.(*chancloser.CloseErr); ok {
×
3549
                        // We hit an error during the last state transition, so
×
3550
                        // we'll extract the error then send it to the
×
3551
                        // user.
×
3552
                        err := closeErr.Err()
×
3553

×
3554
                        peerLog.Warnf("ChannelPoint(%v): encountered close "+
×
3555
                                "err: %v", closeReq.ChanPoint, err)
×
3556

×
3557
                        select {
×
3558
                        case closeReq.Err <- err:
×
3559
                        case <-closeReq.Ctx.Done():
×
3560
                        case <-p.cg.Done():
×
3561
                        }
3562

3563
                        return
×
3564
                }
3565

3566
                closePending, ok := state.(*chancloser.ClosePending)
×
3567

×
3568
                // If this isn't the close pending state, we aren't at the
×
3569
                // terminal state yet.
×
3570
                if !ok {
×
3571
                        return
×
3572
                }
×
3573

3574
                // Only notify if the fee rate is greater.
3575
                newFeeRate := closePending.FeeRate
×
3576
                lastFeeRate := lastFeeRates.GetForParty(party)
×
3577
                if newFeeRate <= lastFeeRate {
×
3578
                        peerLog.Debugf("ChannelPoint(%v): remote party made "+
×
3579
                                "update for fee rate %v, but we already have "+
×
3580
                                "a higher fee rate of %v", closeReq.ChanPoint,
×
3581
                                newFeeRate, lastFeeRate)
×
3582

×
3583
                        return
×
3584
                }
×
3585

3586
                feeRate := closePending.FeeRate
×
3587
                lastFeeRates.SetForParty(party, feeRate)
×
3588

×
3589
                // At this point, we'll have a txid that we can use to notify
×
3590
                // the client, but only if it's different from the last one we
×
3591
                // sent. If the user attempted to bump, but was rejected due to
×
3592
                // RBF, then we'll send a redundant update.
×
3593
                closingTxid := closePending.CloseTx.TxHash()
×
3594
                lastTxid := lastTxids.GetForParty(party)
×
3595
                if closeReq != nil && closingTxid != lastTxid {
×
3596
                        select {
×
3597
                        case closeReq.Updates <- &PendingUpdate{
3598
                                Txid:        closingTxid[:],
3599
                                FeePerVbyte: fn.Some(closePending.FeeRate),
3600
                                IsLocalCloseTx: fn.Some(
3601
                                        party == lntypes.Local,
3602
                                ),
3603
                        }:
×
3604

3605
                        case <-closeReq.Ctx.Done():
×
3606
                                return
×
3607

3608
                        case <-p.cg.Done():
×
3609
                                return
×
3610
                        }
3611
                }
3612

3613
                lastTxids.SetForParty(party, closingTxid)
×
3614
        }
3615

3616
        peerLog.Infof("Observing RBF close updates for channel %v",
×
3617
                closeReq.ChanPoint)
×
3618

×
3619
        // We'll consume each new incoming state to send out the appropriate
×
3620
        // RPC update.
×
3621
        for {
×
3622
                select {
×
3623
                case newState := <-newStateChan:
×
3624

×
3625
                        switch closeState := newState.(type) {
×
3626
                        // Once we've reached the state of pending close, we
3627
                        // have a txid that we broadcasted.
3628
                        case *chancloser.ClosingNegotiation:
×
3629
                                peerState := closeState.PeerState
×
3630

×
3631
                                // Each side may have gained a new co-op close
×
3632
                                // tx, so we'll examine both to see if they've
×
3633
                                // changed.
×
3634
                                maybeNotifyTxBroadcast(
×
3635
                                        peerState.GetForParty(lntypes.Local),
×
3636
                                        lntypes.Local,
×
3637
                                )
×
3638
                                maybeNotifyTxBroadcast(
×
3639
                                        peerState.GetForParty(lntypes.Remote),
×
3640
                                        lntypes.Remote,
×
3641
                                )
×
3642

3643
                        // Otherwise, if we're transition to CloseFin, then we
3644
                        // know that we're done.
3645
                        case *chancloser.CloseFin:
×
3646
                                // To clean up, we'll remove the chan closer
×
3647
                                // from the active map, and send the final
×
3648
                                // update to the client.
×
3649
                                closingTxid := closeState.ConfirmedTx.TxHash()
×
3650
                                if closeReq != nil {
×
3651
                                        closeReq.Updates <- &ChannelCloseUpdate{
×
3652
                                                ClosingTxid: closingTxid[:],
×
3653
                                                Success:     true,
×
3654
                                        }
×
3655
                                }
×
3656
                                chanID := lnwire.NewChanIDFromOutPoint(
×
3657
                                        *closeReq.ChanPoint,
×
3658
                                )
×
3659
                                p.activeChanCloses.Delete(chanID)
×
3660

×
3661
                                return
×
3662
                        }
3663

3664
                case <-closeReq.Ctx.Done():
×
3665
                        return
×
3666

3667
                case <-p.cg.Done():
×
3668
                        return
×
3669
                }
3670
        }
3671
}
3672

3673
// chanErrorReporter is a simple implementation of the
3674
// chancloser.ErrorReporter. This is bound to a single channel by the channel
3675
// ID.
3676
type chanErrorReporter struct {
3677
        chanID lnwire.ChannelID
3678
        peer   *Brontide
3679
}
3680

3681
// newChanErrorReporter creates a new instance of the chanErrorReporter.
3682
func newChanErrorReporter(chanID lnwire.ChannelID,
3683
        peer *Brontide) *chanErrorReporter {
×
3684

×
3685
        return &chanErrorReporter{
×
3686
                chanID: chanID,
×
3687
                peer:   peer,
×
3688
        }
×
3689
}
×
3690

3691
// ReportError is a method that's used to report an error that occurred during
3692
// state machine execution. This is used by the RBF close state machine to
3693
// terminate the state machine and send an error to the remote peer.
3694
//
3695
// This is a part of the chancloser.ErrorReporter interface.
3696
func (c *chanErrorReporter) ReportError(chanErr error) {
×
3697
        c.peer.log.Errorf("coop close error for channel %v: %v",
×
3698
                c.chanID, chanErr)
×
3699

×
3700
        var errMsg []byte
×
3701
        if errors.Is(chanErr, chancloser.ErrInvalidStateTransition) {
×
3702
                errMsg = []byte("unexpected protocol message")
×
3703
        } else {
×
3704
                errMsg = []byte(chanErr.Error())
×
3705
        }
×
3706

3707
        err := c.peer.SendMessageLazy(false, &lnwire.Error{
×
3708
                ChanID: c.chanID,
×
3709
                Data:   errMsg,
×
3710
        })
×
3711
        if err != nil {
×
3712
                c.peer.log.Warnf("unable to send error message to peer: %v",
×
3713
                        err)
×
3714
        }
×
3715

3716
        // After we send the error message to the peer, we'll re-initialize the
3717
        // coop close state machine as they may send a shutdown message to
3718
        // retry the coop close.
3719
        lnChan, ok := c.peer.activeChannels.Load(c.chanID)
×
3720
        if !ok {
×
3721
                return
×
3722
        }
×
3723

3724
        if lnChan == nil {
×
3725
                c.peer.log.Debugf("channel %v is pending, not "+
×
3726
                        "re-initializing coop close state machine",
×
3727
                        c.chanID)
×
3728

×
3729
                return
×
3730
        }
×
3731

3732
        if _, err := c.peer.initRbfChanCloser(lnChan); err != nil {
×
3733
                c.peer.activeChanCloses.Delete(c.chanID)
×
3734

×
3735
                c.peer.log.Errorf("unable to init RBF chan closer after "+
×
3736
                        "error case: %v", err)
×
3737
        }
×
3738
}
3739

3740
// chanFlushEventSentinel is used to send the RBF coop close state machine the
3741
// channel flushed event. We'll wait until the state machine enters the
3742
// ChannelFlushing state, then request the link to send the event once flushed.
3743
//
3744
// NOTE: This MUST be run as a goroutine.
3745
func (p *Brontide) chanFlushEventSentinel(chanCloser *chancloser.RbfChanCloser,
3746
        link htlcswitch.ChannelUpdateHandler,
3747
        channel *lnwallet.LightningChannel) {
×
3748

×
3749
        defer p.cg.WgDone()
×
3750

×
3751
        // If there's no link, then the channel has already been flushed, so we
×
3752
        // don't need to continue.
×
3753
        if link == nil {
×
3754
                return
×
3755
        }
×
3756

3757
        coopCloseStates := chanCloser.RegisterStateEvents()
×
3758
        defer chanCloser.RemoveStateSub(coopCloseStates)
×
3759

×
3760
        newStateChan := coopCloseStates.NewItemCreated.ChanOut()
×
3761

×
3762
        sendChanFlushed := func() {
×
3763
                chanState := channel.StateSnapshot()
×
3764

×
3765
                peerLog.Infof("ChannelPoint(%v) has been flushed for co-op "+
×
3766
                        "close, sending event to chan closer",
×
3767
                        channel.ChannelPoint())
×
3768

×
3769
                chanBalances := chancloser.ShutdownBalances{
×
3770
                        LocalBalance:  chanState.LocalBalance,
×
3771
                        RemoteBalance: chanState.RemoteBalance,
×
3772
                }
×
3773
                ctx := context.Background()
×
3774
                chanCloser.SendEvent(ctx, &chancloser.ChannelFlushed{
×
3775
                        ShutdownBalances: chanBalances,
×
3776
                        FreshFlush:       true,
×
3777
                })
×
3778
        }
×
3779

3780
        // We'll wait until the channel enters the ChannelFlushing state. We
3781
        // exit after a success loop. As after the first RBF iteration, the
3782
        // channel will always be flushed.
3783
        for newState := range newStateChan {
×
3784
                if _, ok := newState.(*chancloser.ChannelFlushing); ok {
×
3785
                        peerLog.Infof("ChannelPoint(%v): rbf coop "+
×
3786
                                "close is awaiting a flushed state, "+
×
3787
                                "registering with link..., ",
×
3788
                                channel.ChannelPoint())
×
3789

×
3790
                        // Request the link to send the event once the channel
×
3791
                        // is flushed. We only need this event sent once, so we
×
3792
                        // can exit now.
×
3793
                        link.OnFlushedOnce(sendChanFlushed)
×
3794

×
3795
                        return
×
3796
                }
×
3797
        }
3798
}
3799

3800
// initRbfChanCloser initializes the channel closer for a channel that
3801
// is using the new RBF based co-op close protocol. This only creates the chan
3802
// closer, but doesn't attempt to trigger any manual state transitions.
3803
func (p *Brontide) initRbfChanCloser(
3804
        channel *lnwallet.LightningChannel) (*chancloser.RbfChanCloser, error) {
×
3805

×
3806
        chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
×
3807

×
3808
        link := p.fetchLinkFromKeyAndCid(chanID)
×
3809

×
3810
        _, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
×
3811
        if err != nil {
×
3812
                return nil, fmt.Errorf("cannot obtain best block: %w", err)
×
3813
        }
×
3814

3815
        defaultFeePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
×
3816
                p.cfg.CoopCloseTargetConfs,
×
3817
        )
×
3818
        if err != nil {
×
3819
                return nil, fmt.Errorf("unable to estimate fee: %w", err)
×
3820
        }
×
3821

3822
        thawHeight, err := channel.AbsoluteThawHeight()
×
3823
        if err != nil {
×
3824
                return nil, fmt.Errorf("unable to get thaw height: %w", err)
×
3825
        }
×
3826

3827
        peerPub := *p.IdentityKey()
×
3828

×
3829
        msgMapper := chancloser.NewRbfMsgMapper(
×
3830
                uint32(startingHeight), chanID, peerPub,
×
3831
        )
×
3832

×
3833
        initialState := chancloser.ChannelActive{}
×
3834

×
3835
        scid := channel.ZeroConfRealScid().UnwrapOr(
×
3836
                channel.ShortChanID(),
×
3837
        )
×
3838

×
3839
        env := chancloser.Environment{
×
3840
                ChainParams:    p.cfg.Wallet.Cfg.NetParams,
×
3841
                ChanPeer:       peerPub,
×
3842
                ChanPoint:      channel.ChannelPoint(),
×
3843
                ChanID:         chanID,
×
3844
                Scid:           scid,
×
3845
                ChanType:       channel.ChanType(),
×
3846
                DefaultFeeRate: defaultFeePerKw.FeePerVByte(),
×
3847
                ThawHeight:     fn.Some(thawHeight),
×
3848
                RemoteUpfrontShutdown: chooseAddr(
×
3849
                        channel.RemoteUpfrontShutdownScript(),
×
3850
                ),
×
3851
                LocalUpfrontShutdown: chooseAddr(
×
3852
                        channel.LocalUpfrontShutdownScript(),
×
3853
                ),
×
3854
                NewDeliveryScript: func() (lnwire.DeliveryAddress, error) {
×
3855
                        return p.genDeliveryScript()
×
3856
                },
×
3857
                FeeEstimator: &chancloser.SimpleCoopFeeEstimator{},
3858
                CloseSigner:  channel,
3859
                ChanObserver: newChanObserver(
3860
                        channel, link, p.cfg.ChanStatusMgr,
3861
                ),
3862
        }
3863

3864
        spendEvent := protofsm.RegisterSpend[chancloser.ProtocolEvent]{
×
3865
                OutPoint:   channel.ChannelPoint(),
×
3866
                PkScript:   channel.FundingTxOut().PkScript,
×
3867
                HeightHint: channel.DeriveHeightHint(),
×
3868
                PostSpendEvent: fn.Some[chancloser.RbfSpendMapper](
×
3869
                        chancloser.SpendMapper,
×
3870
                ),
×
3871
        }
×
3872

×
3873
        daemonAdapters := NewLndDaemonAdapters(LndAdapterCfg{
×
3874
                MsgSender:     newPeerMsgSender(peerPub, p),
×
3875
                TxBroadcaster: p.cfg.Wallet,
×
3876
                ChainNotifier: p.cfg.ChainNotifier,
×
3877
        })
×
3878

×
3879
        protoCfg := chancloser.RbfChanCloserCfg{
×
3880
                Daemon:        daemonAdapters,
×
3881
                InitialState:  &initialState,
×
3882
                Env:           &env,
×
3883
                InitEvent:     fn.Some[protofsm.DaemonEvent](&spendEvent),
×
3884
                ErrorReporter: newChanErrorReporter(chanID, p),
×
3885
                MsgMapper: fn.Some[protofsm.MsgMapper[chancloser.ProtocolEvent]]( //nolint:ll
×
3886
                        msgMapper,
×
3887
                ),
×
3888
        }
×
3889

×
3890
        ctx := context.Background()
×
3891
        chanCloser := protofsm.NewStateMachine(protoCfg)
×
3892
        chanCloser.Start(ctx)
×
3893

×
3894
        // Finally, we'll register this new endpoint with the message router so
×
3895
        // future co-op close messages are handled by this state machine.
×
3896
        err = fn.MapOptionZ(p.msgRouter, func(r msgmux.Router) error {
×
3897
                _ = r.UnregisterEndpoint(chanCloser.Name())
×
3898

×
3899
                return r.RegisterEndpoint(&chanCloser)
×
3900
        })
×
3901
        if err != nil {
×
3902
                chanCloser.Stop()
×
3903

×
3904
                return nil, fmt.Errorf("unable to register endpoint for co-op "+
×
3905
                        "close: %w", err)
×
3906
        }
×
3907

3908
        p.activeChanCloses.Store(chanID, makeRbfCloser(&chanCloser))
×
3909

×
3910
        // Now that we've created the rbf closer state machine, we'll launch a
×
3911
        // new goroutine to eventually send in the ChannelFlushed event once
×
3912
        // needed.
×
3913
        p.cg.WgAdd(1)
×
3914
        go p.chanFlushEventSentinel(&chanCloser, link, channel)
×
3915

×
3916
        return &chanCloser, nil
×
3917
}
3918

3919
// shutdownInit describes the two ways we can initiate a new shutdown. Either we
3920
// got an RPC request to do so (left), or we sent a shutdown message to the
3921
// party (for w/e reason), but crashed before the close was complete.
3922
//
3923
//nolint:ll
3924
type shutdownInit = fn.Option[fn.Either[*htlcswitch.ChanClose, channeldb.ShutdownInfo]]
3925

3926
// shutdownStartFeeRate returns the fee rate that should be used for the
3927
// shutdown.  This returns a doubly wrapped option as the shutdown info might
3928
// be none, and the fee rate is only defined for the user initiated shutdown.
3929
func shutdownStartFeeRate(s shutdownInit) fn.Option[chainfee.SatPerKWeight] {
×
3930
        feeRateOpt := fn.MapOption(func(init fn.Either[*htlcswitch.ChanClose,
×
3931
                channeldb.ShutdownInfo]) fn.Option[chainfee.SatPerKWeight] {
×
3932

×
3933
                var feeRate fn.Option[chainfee.SatPerKWeight]
×
3934
                init.WhenLeft(func(req *htlcswitch.ChanClose) {
×
3935
                        feeRate = fn.Some(req.TargetFeePerKw)
×
3936
                })
×
3937

3938
                return feeRate
×
3939
        })(s)
3940

3941
        return fn.FlattenOption(feeRateOpt)
×
3942
}
3943

3944
// shutdownStartAddr returns the delivery address that should be used when
3945
// restarting the shutdown process.  If we didn't send a shutdown before we
3946
// restarted, and the user didn't initiate one either, then None is returned.
3947
func shutdownStartAddr(s shutdownInit) fn.Option[lnwire.DeliveryAddress] {
×
3948
        addrOpt := fn.MapOption(func(init fn.Either[*htlcswitch.ChanClose,
×
3949
                channeldb.ShutdownInfo]) fn.Option[lnwire.DeliveryAddress] {
×
3950

×
3951
                var addr fn.Option[lnwire.DeliveryAddress]
×
3952
                init.WhenLeft(func(req *htlcswitch.ChanClose) {
×
3953
                        if len(req.DeliveryScript) != 0 {
×
3954
                                addr = fn.Some(req.DeliveryScript)
×
3955
                        }
×
3956
                })
3957
                init.WhenRight(func(info channeldb.ShutdownInfo) {
×
3958
                        addr = fn.Some(info.DeliveryScript.Val)
×
3959
                })
×
3960

3961
                return addr
×
3962
        })(s)
3963

3964
        return fn.FlattenOption(addrOpt)
×
3965
}
3966

3967
// whenRPCShutdown registers a callback to be executed when the shutdown init
3968
// type is and RPC request.
3969
func whenRPCShutdown(s shutdownInit, f func(r *htlcswitch.ChanClose)) {
×
3970
        s.WhenSome(func(init fn.Either[*htlcswitch.ChanClose,
×
3971
                channeldb.ShutdownInfo]) {
×
3972

×
3973
                init.WhenLeft(f)
×
3974
        })
×
3975
}
3976

3977
// newRestartShutdownInit creates a new shutdownInit for the case where we need
3978
// to restart the shutdown flow after a restart.
3979
func newRestartShutdownInit(info channeldb.ShutdownInfo) shutdownInit {
×
3980
        return fn.Some(fn.NewRight[*htlcswitch.ChanClose](info))
×
3981
}
×
3982

3983
// newRPCShutdownInit creates a new shutdownInit for the case where we
3984
// initiated the shutdown via an RPC client.
3985
func newRPCShutdownInit(req *htlcswitch.ChanClose) shutdownInit {
×
3986
        return fn.Some(
×
3987
                fn.NewLeft[*htlcswitch.ChanClose, channeldb.ShutdownInfo](req),
×
3988
        )
×
3989
}
×
3990

3991
// waitUntilRbfCoastClear waits until the RBF co-op close state machine has
3992
// advanced to a terminal state before attempting another fee bump.
3993
func waitUntilRbfCoastClear(ctx context.Context,
3994
        rbfCloser *chancloser.RbfChanCloser) error {
×
3995

×
3996
        coopCloseStates := rbfCloser.RegisterStateEvents()
×
3997
        newStateChan := coopCloseStates.NewItemCreated.ChanOut()
×
3998
        defer rbfCloser.RemoveStateSub(coopCloseStates)
×
3999

×
4000
        isTerminalState := func(newState chancloser.RbfState) bool {
×
4001
                // If we're not in the negotiation sub-state, then we aren't at
×
4002
                // the terminal state yet.
×
4003
                state, ok := newState.(*chancloser.ClosingNegotiation)
×
4004
                if !ok {
×
4005
                        return false
×
4006
                }
×
4007

4008
                localState := state.PeerState.GetForParty(lntypes.Local)
×
4009

×
4010
                // If this isn't the close pending state, we aren't at the
×
4011
                // terminal state yet.
×
4012
                _, ok = localState.(*chancloser.ClosePending)
×
4013

×
4014
                return ok
×
4015
        }
4016

4017
        // Before we enter the subscription loop below, check to see if we're
4018
        // already in the terminal state.
4019
        rbfState, err := rbfCloser.CurrentState()
×
4020
        if err != nil {
×
4021
                return err
×
4022
        }
×
4023
        if isTerminalState(rbfState) {
×
4024
                return nil
×
4025
        }
×
4026

4027
        peerLog.Debugf("Waiting for RBF iteration to complete...")
×
4028

×
4029
        for {
×
4030
                select {
×
4031
                case newState := <-newStateChan:
×
4032
                        if isTerminalState(newState) {
×
4033
                                return nil
×
4034
                        }
×
4035

4036
                case <-ctx.Done():
×
4037
                        return fmt.Errorf("context canceled")
×
4038
                }
4039
        }
4040
}
4041

4042
// startRbfChanCloser kicks off the co-op close process using the new RBF based
4043
// co-op close protocol. This is called when we're the one that's initiating
4044
// the cooperative channel close.
4045
//
4046
// TODO(roasbeef): just accept the two shutdown pointer params instead??
4047
func (p *Brontide) startRbfChanCloser(shutdown shutdownInit,
4048
        chanPoint wire.OutPoint) error {
×
4049

×
4050
        // Unlike the old negotiate chan closer, we'll always create the RBF
×
4051
        // chan closer on startup, so we can skip init here.
×
4052
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
×
4053
        chanCloser, found := p.activeChanCloses.Load(chanID)
×
4054
        if !found {
×
4055
                return fmt.Errorf("rbf chan closer not found for channel %v",
×
4056
                        chanPoint)
×
4057
        }
×
4058

4059
        defaultFeePerKw, err := shutdownStartFeeRate(
×
4060
                shutdown,
×
4061
        ).UnwrapOrFuncErr(func() (chainfee.SatPerKWeight, error) {
×
4062
                return p.cfg.FeeEstimator.EstimateFeePerKW(
×
4063
                        p.cfg.CoopCloseTargetConfs,
×
4064
                )
×
4065
        })
×
4066
        if err != nil {
×
4067
                return fmt.Errorf("unable to estimate fee: %w", err)
×
4068
        }
×
4069

4070
        chanCloser.WhenRight(func(rbfCloser *chancloser.RbfChanCloser) {
×
4071
                peerLog.Infof("ChannelPoint(%v): rbf-coop close requested, "+
×
4072
                        "sending shutdown", chanPoint)
×
4073

×
4074
                rbfState, err := rbfCloser.CurrentState()
×
4075
                if err != nil {
×
4076
                        peerLog.Warnf("ChannelPoint(%v): unable to get "+
×
4077
                                "current state for rbf-coop close: %v",
×
4078
                                chanPoint, err)
×
4079

×
4080
                        return
×
4081
                }
×
4082

4083
                coopCloseStates := rbfCloser.RegisterStateEvents()
×
4084

×
4085
                // Before we send our event below, we'll launch a goroutine to
×
4086
                // watch for the final terminal state to send updates to the RPC
×
4087
                // client. We only need to do this if there's an RPC caller.
×
4088
                var rpcShutdown bool
×
4089
                whenRPCShutdown(shutdown, func(req *htlcswitch.ChanClose) {
×
4090
                        rpcShutdown = true
×
4091

×
4092
                        p.cg.WgAdd(1)
×
4093
                        go func() {
×
4094
                                defer p.cg.WgDone()
×
4095

×
4096
                                p.observeRbfCloseUpdates(
×
4097
                                        rbfCloser, req, coopCloseStates,
×
4098
                                )
×
4099
                        }()
×
4100
                })
4101

4102
                if !rpcShutdown {
×
4103
                        defer rbfCloser.RemoveStateSub(coopCloseStates)
×
4104
                }
×
4105

4106
                ctx, _ := p.cg.Create(context.Background())
×
4107
                feeRate := defaultFeePerKw.FeePerVByte()
×
4108

×
4109
                // Depending on the state of the state machine, we'll either
×
4110
                // kick things off by sending shutdown, or attempt to send a new
×
4111
                // offer to the remote party.
×
4112
                switch rbfState.(type) {
×
4113
                // The channel is still active, so we'll now kick off the co-op
4114
                // close process by instructing it to send a shutdown message to
4115
                // the remote party.
4116
                case *chancloser.ChannelActive:
×
4117
                        rbfCloser.SendEvent(
×
4118
                                context.Background(),
×
4119
                                &chancloser.SendShutdown{
×
4120
                                        IdealFeeRate: feeRate,
×
4121
                                        DeliveryAddr: shutdownStartAddr(
×
4122
                                                shutdown,
×
4123
                                        ),
×
4124
                                },
×
4125
                        )
×
4126

4127
                // If we haven't yet sent an offer (didn't have enough funds at
4128
                // the prior fee rate), or we've sent an offer, then we'll
4129
                // trigger a new offer event.
4130
                case *chancloser.ClosingNegotiation:
×
4131
                        // Before we send the event below, we'll wait until
×
4132
                        // we're in a semi-terminal state.
×
4133
                        err := waitUntilRbfCoastClear(ctx, rbfCloser)
×
4134
                        if err != nil {
×
4135
                                peerLog.Warnf("ChannelPoint(%v): unable to "+
×
4136
                                        "wait for coast to clear: %v",
×
4137
                                        chanPoint, err)
×
4138

×
4139
                                return
×
4140
                        }
×
4141

4142
                        event := chancloser.ProtocolEvent(
×
4143
                                &chancloser.SendOfferEvent{
×
4144
                                        TargetFeeRate: feeRate,
×
4145
                                },
×
4146
                        )
×
4147
                        rbfCloser.SendEvent(ctx, event)
×
4148

4149
                default:
×
4150
                        peerLog.Warnf("ChannelPoint(%v): unexpected state "+
×
4151
                                "for rbf-coop close: %T", chanPoint, rbfState)
×
4152
                }
4153
        })
4154

4155
        return nil
×
4156
}
4157

4158
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
4159
// forced unilateral closure of the channel initiated by a local subsystem.
4160
func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
7✔
4161
        chanID := lnwire.NewChanIDFromOutPoint(*req.ChanPoint)
7✔
4162

7✔
4163
        channel, ok := p.activeChannels.Load(chanID)
7✔
4164

7✔
4165
        // Though this function can't be called for pending channels, we still
7✔
4166
        // check whether channel is nil for safety.
7✔
4167
        if !ok || channel == nil {
7✔
4168
                err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
×
4169
                        "unknown", chanID)
×
4170
                p.log.Errorf(err.Error())
×
4171
                req.Err <- err
×
4172
                return
×
4173
        }
×
4174

4175
        isTaprootChan := channel.ChanType().IsTaproot()
7✔
4176

7✔
4177
        switch req.CloseType {
7✔
4178
        // A type of CloseRegular indicates that the user has opted to close
4179
        // out this channel on-chain, so we execute the cooperative channel
4180
        // closure workflow.
4181
        case contractcourt.CloseRegular:
7✔
4182
                var err error
7✔
4183
                switch {
7✔
4184
                // If this is the RBF coop state machine, then we'll instruct
4185
                // it to send the shutdown message. This also might be an RBF
4186
                // iteration, in which case we'll be obtaining a new
4187
                // transaction w/ a higher fee rate.
4188
                //
4189
                // We don't support this close type for taproot channels yet
4190
                // however.
4191
                case !isTaprootChan && p.rbfCoopCloseAllowed():
×
4192
                        err = p.startRbfChanCloser(
×
4193
                                newRPCShutdownInit(req), channel.ChannelPoint(),
×
4194
                        )
×
4195
                default:
7✔
4196
                        err = p.initNegotiateChanCloser(req, channel)
7✔
4197
                }
4198

4199
                if err != nil {
8✔
4200
                        p.log.Errorf(err.Error())
1✔
4201
                        req.Err <- err
1✔
4202
                }
1✔
4203

4204
        // A type of CloseBreach indicates that the counterparty has breached
4205
        // the channel therefore we need to clean up our local state.
4206
        case contractcourt.CloseBreach:
×
4207
                // TODO(roasbeef): no longer need with newer beach logic?
×
4208
                p.log.Infof("ChannelPoint(%v) has been breached, wiping "+
×
4209
                        "channel", req.ChanPoint)
×
4210
                p.WipeChannel(req.ChanPoint)
×
4211
        }
4212
}
4213

4214
// linkFailureReport is sent to the channelManager whenever a link reports a
4215
// link failure, and is forced to exit. The report houses the necessary
4216
// information to clean up the channel state, send back the error message, and
4217
// force close if necessary.
4218
type linkFailureReport struct {
4219
        chanPoint   wire.OutPoint
4220
        chanID      lnwire.ChannelID
4221
        shortChanID lnwire.ShortChannelID
4222
        linkErr     htlcswitch.LinkFailureError
4223
}
4224

4225
// handleLinkFailure processes a link failure report when a link in the switch
4226
// fails. It facilitates the removal of all channel state within the peer,
4227
// force closing the channel depending on severity, and sending the error
4228
// message back to the remote party.
4229
func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
×
4230
        // Retrieve the channel from the map of active channels. We do this to
×
4231
        // have access to it even after WipeChannel remove it from the map.
×
4232
        chanID := lnwire.NewChanIDFromOutPoint(failure.chanPoint)
×
4233
        lnChan, _ := p.activeChannels.Load(chanID)
×
4234

×
4235
        // We begin by wiping the link, which will remove it from the switch,
×
4236
        // such that it won't be attempted used for any more updates.
×
4237
        //
×
4238
        // TODO(halseth): should introduce a way to atomically stop/pause the
×
4239
        // link and cancel back any adds in its mailboxes such that we can
×
4240
        // safely force close without the link being added again and updates
×
4241
        // being applied.
×
4242
        p.WipeChannel(&failure.chanPoint)
×
4243

×
4244
        // If the error encountered was severe enough, we'll now force close
×
4245
        // the channel to prevent reading it to the switch in the future.
×
4246
        if failure.linkErr.FailureAction == htlcswitch.LinkFailureForceClose {
×
4247
                p.log.Warnf("Force closing link(%v)", failure.shortChanID)
×
4248

×
4249
                closeTx, err := p.cfg.ChainArb.ForceCloseContract(
×
4250
                        failure.chanPoint,
×
4251
                )
×
4252
                if err != nil {
×
4253
                        p.log.Errorf("unable to force close "+
×
4254
                                "link(%v): %v", failure.shortChanID, err)
×
4255
                } else {
×
4256
                        p.log.Infof("channel(%v) force "+
×
4257
                                "closed with txid %v",
×
4258
                                failure.shortChanID, closeTx.TxHash())
×
4259
                }
×
4260
        }
4261

4262
        // If this is a permanent failure, we will mark the channel borked.
4263
        if failure.linkErr.PermanentFailure && lnChan != nil {
×
4264
                p.log.Warnf("Marking link(%v) borked due to permanent "+
×
4265
                        "failure", failure.shortChanID)
×
4266

×
4267
                if err := lnChan.State().MarkBorked(); err != nil {
×
4268
                        p.log.Errorf("Unable to mark channel %v borked: %v",
×
4269
                                failure.shortChanID, err)
×
4270
                }
×
4271
        }
4272

4273
        // Send an error to the peer, why we failed the channel.
4274
        if failure.linkErr.ShouldSendToPeer() {
×
4275
                // If SendData is set, send it to the peer. If not, we'll use
×
4276
                // the standard error messages in the payload. We only include
×
4277
                // sendData in the cases where the error data does not contain
×
4278
                // sensitive information.
×
4279
                data := []byte(failure.linkErr.Error())
×
4280
                if failure.linkErr.SendData != nil {
×
4281
                        data = failure.linkErr.SendData
×
4282
                }
×
4283

4284
                var networkMsg lnwire.Message
×
4285
                if failure.linkErr.Warning {
×
4286
                        networkMsg = &lnwire.Warning{
×
4287
                                ChanID: failure.chanID,
×
4288
                                Data:   data,
×
4289
                        }
×
4290
                } else {
×
4291
                        networkMsg = &lnwire.Error{
×
4292
                                ChanID: failure.chanID,
×
4293
                                Data:   data,
×
4294
                        }
×
4295
                }
×
4296

4297
                err := p.SendMessage(true, networkMsg)
×
4298
                if err != nil {
×
4299
                        p.log.Errorf("unable to send msg to "+
×
4300
                                "remote peer: %v", err)
×
4301
                }
×
4302
        }
4303

4304
        // If the failure action is disconnect, then we'll execute that now. If
4305
        // we had to send an error above, it was a sync call, so we expect the
4306
        // message to be flushed on the wire by now.
4307
        if failure.linkErr.FailureAction == htlcswitch.LinkFailureDisconnect {
×
4308
                p.Disconnect(fmt.Errorf("link requested disconnect"))
×
4309
        }
×
4310
}
4311

4312
// fetchLinkFromKeyAndCid fetches a link from the switch via the remote's
4313
// public key and the channel id.
4314
func (p *Brontide) fetchLinkFromKeyAndCid(
4315
        cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
19✔
4316

19✔
4317
        var chanLink htlcswitch.ChannelUpdateHandler
19✔
4318

19✔
4319
        // We don't need to check the error here, and can instead just loop
19✔
4320
        // over the slice and return nil.
19✔
4321
        links, _ := p.cfg.Switch.GetLinksByInterface(p.cfg.PubKeyBytes)
19✔
4322
        for _, link := range links {
37✔
4323
                if link.ChanID() == cid {
36✔
4324
                        chanLink = link
18✔
4325
                        break
18✔
4326
                }
4327
        }
4328

4329
        return chanLink
19✔
4330
}
4331

4332
// finalizeChanClosure performs the final clean up steps once the cooperative
4333
// closure transaction has been fully broadcast. The finalized closing state
4334
// machine should be passed in. Once the transaction has been sufficiently
4335
// confirmed, the channel will be marked as fully closed within the database,
4336
// and any clients will be notified of updates to the closing state.
4337
func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
4✔
4338
        closeReq := chanCloser.CloseRequest()
4✔
4339

4✔
4340
        // First, we'll clear all indexes related to the channel in question.
4✔
4341
        chanPoint := chanCloser.Channel().ChannelPoint()
4✔
4342
        p.WipeChannel(&chanPoint)
4✔
4343

4✔
4344
        // Also clear the activeChanCloses map of this channel.
4✔
4345
        cid := lnwire.NewChanIDFromOutPoint(chanPoint)
4✔
4346
        p.activeChanCloses.Delete(cid) // TODO(roasbeef): existing race
4✔
4347

4✔
4348
        // Next, we'll launch a goroutine which will request to be notified by
4✔
4349
        // the ChainNotifier once the closure transaction obtains a single
4✔
4350
        // confirmation.
4✔
4351
        notifier := p.cfg.ChainNotifier
4✔
4352

4✔
4353
        // If any error happens during waitForChanToClose, forward it to
4✔
4354
        // closeReq. If this channel closure is not locally initiated, closeReq
4✔
4355
        // will be nil, so just ignore the error.
4✔
4356
        errChan := make(chan error, 1)
4✔
4357
        if closeReq != nil {
6✔
4358
                errChan = closeReq.Err
2✔
4359
        }
2✔
4360

4361
        closingTx, err := chanCloser.ClosingTx()
4✔
4362
        if err != nil {
4✔
4363
                if closeReq != nil {
×
4364
                        p.log.Error(err)
×
4365
                        closeReq.Err <- err
×
4366
                }
×
4367
        }
4368

4369
        closingTxid := closingTx.TxHash()
4✔
4370

4✔
4371
        // If this is a locally requested shutdown, update the caller with a
4✔
4372
        // new event detailing the current pending state of this request.
4✔
4373
        if closeReq != nil {
6✔
4374
                closeReq.Updates <- &PendingUpdate{
2✔
4375
                        Txid: closingTxid[:],
2✔
4376
                }
2✔
4377
        }
2✔
4378

4379
        localOut := chanCloser.LocalCloseOutput()
4✔
4380
        remoteOut := chanCloser.RemoteCloseOutput()
4✔
4381
        auxOut := chanCloser.AuxOutputs()
4✔
4382
        go WaitForChanToClose(
4✔
4383
                chanCloser.NegotiationHeight(), notifier, errChan,
4✔
4384
                &chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() {
8✔
4385
                        // Respond to the local subsystem which requested the
4✔
4386
                        // channel closure.
4✔
4387
                        if closeReq != nil {
6✔
4388
                                closeReq.Updates <- &ChannelCloseUpdate{
2✔
4389
                                        ClosingTxid:       closingTxid[:],
2✔
4390
                                        Success:           true,
2✔
4391
                                        LocalCloseOutput:  localOut,
2✔
4392
                                        RemoteCloseOutput: remoteOut,
2✔
4393
                                        AuxOutputs:        auxOut,
2✔
4394
                                }
2✔
4395
                        }
2✔
4396
                },
4397
        )
4398
}
4399

4400
// WaitForChanToClose uses the passed notifier to wait until the channel has
4401
// been detected as closed on chain and then concludes by executing the
4402
// following actions: the channel point will be sent over the settleChan, and
4403
// finally the callback will be executed. If any error is encountered within
4404
// the function, then it will be sent over the errChan.
4405
func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
4406
        errChan chan error, chanPoint *wire.OutPoint,
4407
        closingTxID *chainhash.Hash, closeScript []byte, cb func()) {
4✔
4408

4✔
4409
        peerLog.Infof("Waiting for confirmation of close of ChannelPoint(%v) "+
4✔
4410
                "with txid: %v", chanPoint, closingTxID)
4✔
4411

4✔
4412
        // TODO(roasbeef): add param for num needed confs
4✔
4413
        confNtfn, err := notifier.RegisterConfirmationsNtfn(
4✔
4414
                closingTxID, closeScript, 1, bestHeight,
4✔
4415
        )
4✔
4416
        if err != nil {
4✔
4417
                if errChan != nil {
×
4418
                        errChan <- err
×
4419
                }
×
4420
                return
×
4421
        }
4422

4423
        // In the case that the ChainNotifier is shutting down, all subscriber
4424
        // notification channels will be closed, generating a nil receive.
4425
        height, ok := <-confNtfn.Confirmed
4✔
4426
        if !ok {
4✔
4427
                return
×
4428
        }
×
4429

4430
        // The channel has been closed, remove it from any active indexes, and
4431
        // the database state.
4432
        peerLog.Infof("ChannelPoint(%v) is now closed at "+
4✔
4433
                "height %v", chanPoint, height.BlockHeight)
4✔
4434

4✔
4435
        // Finally, execute the closure call back to mark the confirmation of
4✔
4436
        // the transaction closing the contract.
4✔
4437
        cb()
4✔
4438
}
4439

4440
// WipeChannel removes the passed channel point from all indexes associated with
4441
// the peer and the switch.
4442
func (p *Brontide) WipeChannel(chanPoint *wire.OutPoint) {
4✔
4443
        chanID := lnwire.NewChanIDFromOutPoint(*chanPoint)
4✔
4444

4✔
4445
        p.activeChannels.Delete(chanID)
4✔
4446

4✔
4447
        // Instruct the HtlcSwitch to close this link as the channel is no
4✔
4448
        // longer active.
4✔
4449
        p.cfg.Switch.RemoveLink(chanID)
4✔
4450
}
4✔
4451

4452
// handleInitMsg handles the incoming init message which contains global and
4453
// local feature vectors. If feature vectors are incompatible then disconnect.
4454
func (p *Brontide) handleInitMsg(msg *lnwire.Init) error {
3✔
4455
        // First, merge any features from the legacy global features field into
3✔
4456
        // those presented in the local features fields.
3✔
4457
        err := msg.Features.Merge(msg.GlobalFeatures)
3✔
4458
        if err != nil {
3✔
4459
                return fmt.Errorf("unable to merge legacy global features: %w",
×
4460
                        err)
×
4461
        }
×
4462

4463
        // Then, finalize the remote feature vector providing the flattened
4464
        // feature bit namespace.
4465
        p.remoteFeatures = lnwire.NewFeatureVector(
3✔
4466
                msg.Features, lnwire.Features,
3✔
4467
        )
3✔
4468

3✔
4469
        // Now that we have their features loaded, we'll ensure that they
3✔
4470
        // didn't set any required bits that we don't know of.
3✔
4471
        err = feature.ValidateRequired(p.remoteFeatures)
3✔
4472
        if err != nil {
3✔
4473
                return fmt.Errorf("invalid remote features: %w", err)
×
4474
        }
×
4475

4476
        // Ensure the remote party's feature vector contains all transitive
4477
        // dependencies. We know ours are correct since they are validated
4478
        // during the feature manager's instantiation.
4479
        err = feature.ValidateDeps(p.remoteFeatures)
3✔
4480
        if err != nil {
3✔
4481
                return fmt.Errorf("invalid remote features: %w", err)
×
4482
        }
×
4483

4484
        // Now that we know we understand their requirements, we'll check to
4485
        // see if they don't support anything that we deem to be mandatory.
4486
        if !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired) {
3✔
4487
                return fmt.Errorf("data loss protection required")
×
4488
        }
×
4489

4490
        return nil
3✔
4491
}
4492

4493
// LocalFeatures returns the set of global features that has been advertised by
4494
// the local node. This allows sub-systems that use this interface to gate their
4495
// behavior off the set of negotiated feature bits.
4496
//
4497
// NOTE: Part of the lnpeer.Peer interface.
4498
func (p *Brontide) LocalFeatures() *lnwire.FeatureVector {
×
4499
        return p.cfg.Features
×
4500
}
×
4501

4502
// RemoteFeatures returns the set of global features that has been advertised by
4503
// the remote node. This allows sub-systems that use this interface to gate
4504
// their behavior off the set of negotiated feature bits.
4505
//
4506
// NOTE: Part of the lnpeer.Peer interface.
4507
func (p *Brontide) RemoteFeatures() *lnwire.FeatureVector {
13✔
4508
        return p.remoteFeatures
13✔
4509
}
13✔
4510

4511
// hasNegotiatedScidAlias returns true if we've negotiated the
4512
// option-scid-alias feature bit with the peer.
4513
func (p *Brontide) hasNegotiatedScidAlias() bool {
3✔
4514
        peerHas := p.remoteFeatures.HasFeature(lnwire.ScidAliasOptional)
3✔
4515
        localHas := p.cfg.Features.HasFeature(lnwire.ScidAliasOptional)
3✔
4516
        return peerHas && localHas
3✔
4517
}
3✔
4518

4519
// sendInitMsg sends the Init message to the remote peer. This message contains
4520
// our currently supported local and global features.
4521
func (p *Brontide) sendInitMsg(legacyChan bool) error {
7✔
4522
        features := p.cfg.Features.Clone()
7✔
4523
        legacyFeatures := p.cfg.LegacyFeatures.Clone()
7✔
4524

7✔
4525
        // If we have a legacy channel open with a peer, we downgrade static
7✔
4526
        // remote required to optional in case the peer does not understand the
7✔
4527
        // required feature bit. If we do not do this, the peer will reject our
7✔
4528
        // connection because it does not understand a required feature bit, and
7✔
4529
        // our channel will be unusable.
7✔
4530
        if legacyChan && features.RequiresFeature(lnwire.StaticRemoteKeyRequired) {
8✔
4531
                p.log.Infof("Legacy channel open with peer, " +
1✔
4532
                        "downgrading static remote required feature bit to " +
1✔
4533
                        "optional")
1✔
4534

1✔
4535
                // Unset and set in both the local and global features to
1✔
4536
                // ensure both sets are consistent and merge able by old and
1✔
4537
                // new nodes.
1✔
4538
                features.Unset(lnwire.StaticRemoteKeyRequired)
1✔
4539
                legacyFeatures.Unset(lnwire.StaticRemoteKeyRequired)
1✔
4540

1✔
4541
                features.Set(lnwire.StaticRemoteKeyOptional)
1✔
4542
                legacyFeatures.Set(lnwire.StaticRemoteKeyOptional)
1✔
4543
        }
1✔
4544

4545
        msg := lnwire.NewInitMessage(
7✔
4546
                legacyFeatures.RawFeatureVector,
7✔
4547
                features.RawFeatureVector,
7✔
4548
        )
7✔
4549

7✔
4550
        return p.writeMessage(msg)
7✔
4551
}
4552

4553
// resendChanSyncMsg will attempt to find a channel sync message for the closed
4554
// channel and resend it to our peer.
4555
func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error {
×
4556
        // If we already re-sent the mssage for this channel, we won't do it
×
4557
        // again.
×
4558
        if _, ok := p.resentChanSyncMsg[cid]; ok {
×
4559
                return nil
×
4560
        }
×
4561

4562
        // Check if we have any channel sync messages stored for this channel.
4563
        c, err := p.cfg.ChannelDB.FetchClosedChannelForID(cid)
×
4564
        if err != nil {
×
4565
                return fmt.Errorf("unable to fetch channel sync messages for "+
×
4566
                        "peer %v: %v", p, err)
×
4567
        }
×
4568

4569
        if c.LastChanSyncMsg == nil {
×
4570
                return fmt.Errorf("no chan sync message stored for channel %v",
×
4571
                        cid)
×
4572
        }
×
4573

4574
        if !c.RemotePub.IsEqual(p.IdentityKey()) {
×
4575
                return fmt.Errorf("ignoring channel reestablish from "+
×
4576
                        "peer=%x", p.IdentityKey().SerializeCompressed())
×
4577
        }
×
4578

4579
        p.log.Debugf("Re-sending channel sync message for channel %v to "+
×
4580
                "peer", cid)
×
4581

×
4582
        if err := p.SendMessage(true, c.LastChanSyncMsg); err != nil {
×
4583
                return fmt.Errorf("failed resending channel sync "+
×
4584
                        "message to peer %v: %v", p, err)
×
4585
        }
×
4586

4587
        p.log.Debugf("Re-sent channel sync message for channel %v to peer ",
×
4588
                cid)
×
4589

×
4590
        // Note down that we sent the message, so we won't resend it again for
×
4591
        // this connection.
×
4592
        p.resentChanSyncMsg[cid] = struct{}{}
×
4593

×
4594
        return nil
×
4595
}
4596

4597
// SendMessage sends a variadic number of high-priority messages to the remote
4598
// peer. The first argument denotes if the method should block until the
4599
// messages have been sent to the remote peer or an error is returned,
4600
// otherwise it returns immediately after queuing.
4601
//
4602
// NOTE: Part of the lnpeer.Peer interface.
4603
func (p *Brontide) SendMessage(sync bool, msgs ...lnwire.Message) error {
3✔
4604
        return p.sendMessage(sync, true, msgs...)
3✔
4605
}
3✔
4606

4607
// SendMessageLazy sends a variadic number of low-priority messages to the
4608
// remote peer. The first argument denotes if the method should block until
4609
// the messages have been sent to the remote peer or an error is returned,
4610
// otherwise it returns immediately after queueing.
4611
//
4612
// NOTE: Part of the lnpeer.Peer interface.
4613
func (p *Brontide) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
1✔
4614
        return p.sendMessage(sync, false, msgs...)
1✔
4615
}
1✔
4616

4617
// sendMessage queues a variadic number of messages using the passed priority
4618
// to the remote peer. If sync is true, this method will block until the
4619
// messages have been sent to the remote peer or an error is returned, otherwise
4620
// it returns immediately after queueing.
4621
func (p *Brontide) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
4✔
4622
        // Add all incoming messages to the outgoing queue. A list of error
4✔
4623
        // chans is populated for each message if the caller requested a sync
4✔
4624
        // send.
4✔
4625
        var errChans []chan error
4✔
4626
        if sync {
5✔
4627
                errChans = make([]chan error, 0, len(msgs))
1✔
4628
        }
1✔
4629
        for _, msg := range msgs {
8✔
4630
                // If a sync send was requested, create an error chan to listen
4✔
4631
                // for an ack from the writeHandler.
4✔
4632
                var errChan chan error
4✔
4633
                if sync {
5✔
4634
                        errChan = make(chan error, 1)
1✔
4635
                        errChans = append(errChans, errChan)
1✔
4636
                }
1✔
4637

4638
                if priority {
7✔
4639
                        p.queueMsg(msg, errChan)
3✔
4640
                } else {
4✔
4641
                        p.queueMsgLazy(msg, errChan)
1✔
4642
                }
1✔
4643
        }
4644

4645
        // Wait for all replies from the writeHandler. For async sends, this
4646
        // will be a NOP as the list of error chans is nil.
4647
        for _, errChan := range errChans {
5✔
4648
                select {
1✔
4649
                case err := <-errChan:
1✔
4650
                        return err
1✔
4651
                case <-p.cg.Done():
×
4652
                        return lnpeer.ErrPeerExiting
×
4653
                case <-p.cfg.Quit:
×
4654
                        return lnpeer.ErrPeerExiting
×
4655
                }
4656
        }
4657

4658
        return nil
3✔
4659
}
4660

4661
// PubKey returns the pubkey of the peer in compressed serialized format.
4662
//
4663
// NOTE: Part of the lnpeer.Peer interface.
4664
func (p *Brontide) PubKey() [33]byte {
2✔
4665
        return p.cfg.PubKeyBytes
2✔
4666
}
2✔
4667

4668
// IdentityKey returns the public key of the remote peer.
4669
//
4670
// NOTE: Part of the lnpeer.Peer interface.
4671
func (p *Brontide) IdentityKey() *btcec.PublicKey {
15✔
4672
        return p.cfg.Addr.IdentityKey
15✔
4673
}
15✔
4674

4675
// Address returns the network address of the remote peer.
4676
//
4677
// NOTE: Part of the lnpeer.Peer interface.
4678
func (p *Brontide) Address() net.Addr {
×
4679
        return p.cfg.Addr.Address
×
4680
}
×
4681

4682
// AddNewChannel adds a new channel to the peer. The channel should fail to be
4683
// added if the cancel channel is closed.
4684
//
4685
// NOTE: Part of the lnpeer.Peer interface.
4686
func (p *Brontide) AddNewChannel(newChan *lnpeer.NewChannel,
4687
        cancel <-chan struct{}) error {
×
4688

×
4689
        errChan := make(chan error, 1)
×
4690
        newChanMsg := &newChannelMsg{
×
4691
                channel: newChan,
×
4692
                err:     errChan,
×
4693
        }
×
4694

×
4695
        select {
×
4696
        case p.newActiveChannel <- newChanMsg:
×
4697
        case <-cancel:
×
4698
                return errors.New("canceled adding new channel")
×
4699
        case <-p.cg.Done():
×
4700
                return lnpeer.ErrPeerExiting
×
4701
        }
4702

4703
        // We pause here to wait for the peer to recognize the new channel
4704
        // before we close the channel barrier corresponding to the channel.
4705
        select {
×
4706
        case err := <-errChan:
×
4707
                return err
×
4708
        case <-p.cg.Done():
×
4709
                return lnpeer.ErrPeerExiting
×
4710
        }
4711
}
4712

4713
// AddPendingChannel adds a pending open channel to the peer. The channel
4714
// should fail to be added if the cancel channel is closed.
4715
//
4716
// NOTE: Part of the lnpeer.Peer interface.
4717
func (p *Brontide) AddPendingChannel(cid lnwire.ChannelID,
4718
        cancel <-chan struct{}) error {
×
4719

×
4720
        errChan := make(chan error, 1)
×
4721
        newChanMsg := &newChannelMsg{
×
4722
                channelID: cid,
×
4723
                err:       errChan,
×
4724
        }
×
4725

×
4726
        select {
×
4727
        case p.newPendingChannel <- newChanMsg:
×
4728

4729
        case <-cancel:
×
4730
                return errors.New("canceled adding pending channel")
×
4731

4732
        case <-p.cg.Done():
×
4733
                return lnpeer.ErrPeerExiting
×
4734
        }
4735

4736
        // We pause here to wait for the peer to recognize the new pending
4737
        // channel before we close the channel barrier corresponding to the
4738
        // channel.
4739
        select {
×
4740
        case err := <-errChan:
×
4741
                return err
×
4742

4743
        case <-cancel:
×
4744
                return errors.New("canceled adding pending channel")
×
4745

4746
        case <-p.cg.Done():
×
4747
                return lnpeer.ErrPeerExiting
×
4748
        }
4749
}
4750

4751
// RemovePendingChannel removes a pending open channel from the peer.
4752
//
4753
// NOTE: Part of the lnpeer.Peer interface.
4754
func (p *Brontide) RemovePendingChannel(cid lnwire.ChannelID) error {
×
4755
        errChan := make(chan error, 1)
×
4756
        newChanMsg := &newChannelMsg{
×
4757
                channelID: cid,
×
4758
                err:       errChan,
×
4759
        }
×
4760

×
4761
        select {
×
4762
        case p.removePendingChannel <- newChanMsg:
×
4763
        case <-p.cg.Done():
×
4764
                return lnpeer.ErrPeerExiting
×
4765
        }
4766

4767
        // We pause here to wait for the peer to respond to the cancellation of
4768
        // the pending channel before we close the channel barrier
4769
        // corresponding to the channel.
4770
        select {
×
4771
        case err := <-errChan:
×
4772
                return err
×
4773

4774
        case <-p.cg.Done():
×
4775
                return lnpeer.ErrPeerExiting
×
4776
        }
4777
}
4778

4779
// StartTime returns the time at which the connection was established if the
4780
// peer started successfully, and zero otherwise.
4781
func (p *Brontide) StartTime() time.Time {
×
4782
        return p.startTime
×
4783
}
×
4784

4785
// handleCloseMsg is called when a new cooperative channel closure related
4786
// message is received from the remote peer. We'll use this message to advance
4787
// the chan closer state machine.
4788
func (p *Brontide) handleCloseMsg(msg *closeMsg) {
13✔
4789
        link := p.fetchLinkFromKeyAndCid(msg.cid)
13✔
4790

13✔
4791
        // We'll now fetch the matching closing state machine in order to
13✔
4792
        // continue, or finalize the channel closure process.
13✔
4793
        chanCloserE, err := p.fetchActiveChanCloser(msg.cid)
13✔
4794
        if err != nil {
13✔
4795
                // If the channel is not known to us, we'll simply ignore this
×
4796
                // message.
×
4797
                if err == ErrChannelNotFound {
×
4798
                        return
×
4799
                }
×
4800

4801
                p.log.Errorf("Unable to respond to remote close msg: %v", err)
×
4802

×
4803
                errMsg := &lnwire.Error{
×
4804
                        ChanID: msg.cid,
×
4805
                        Data:   lnwire.ErrorData(err.Error()),
×
4806
                }
×
4807
                p.queueMsg(errMsg, nil)
×
4808
                return
×
4809
        }
4810

4811
        if chanCloserE.IsRight() {
13✔
4812
                // TODO(roasbeef): assert?
×
4813
                return
×
4814
        }
×
4815

4816
        // At this point, we'll only enter this call path if a negotiate chan
4817
        // closer was used. So we'll extract that from the either now.
4818
        //
4819
        // TODO(roabeef): need extra helper func for either to make cleaner
4820
        var chanCloser *chancloser.ChanCloser
13✔
4821
        chanCloserE.WhenLeft(func(c *chancloser.ChanCloser) {
26✔
4822
                chanCloser = c
13✔
4823
        })
13✔
4824

4825
        handleErr := func(err error) {
13✔
4826
                err = fmt.Errorf("unable to process close msg: %w", err)
×
4827
                p.log.Error(err)
×
4828

×
4829
                // As the negotiations failed, we'll reset the channel state
×
4830
                // machine to ensure we act to on-chain events as normal.
×
4831
                chanCloser.Channel().ResetState()
×
4832
                if chanCloser.CloseRequest() != nil {
×
4833
                        chanCloser.CloseRequest().Err <- err
×
4834
                }
×
4835

4836
                p.activeChanCloses.Delete(msg.cid)
×
4837

×
4838
                p.Disconnect(err)
×
4839
        }
4840

4841
        // Next, we'll process the next message using the target state machine.
4842
        // We'll either continue negotiation, or halt.
4843
        switch typed := msg.msg.(type) {
13✔
4844
        case *lnwire.Shutdown:
5✔
4845
                // Disable incoming adds immediately.
5✔
4846
                if link != nil && !link.DisableAdds(htlcswitch.Incoming) {
5✔
4847
                        p.log.Warnf("Incoming link adds already disabled: %v",
×
4848
                                link.ChanID())
×
4849
                }
×
4850

4851
                oShutdown, err := chanCloser.ReceiveShutdown(*typed)
5✔
4852
                if err != nil {
5✔
4853
                        handleErr(err)
×
4854
                        return
×
4855
                }
×
4856

4857
                oShutdown.WhenSome(func(msg lnwire.Shutdown) {
8✔
4858
                        // If the link is nil it means we can immediately queue
3✔
4859
                        // the Shutdown message since we don't have to wait for
3✔
4860
                        // commitment transaction synchronization.
3✔
4861
                        if link == nil {
4✔
4862
                                p.queueMsg(&msg, nil)
1✔
4863
                                return
1✔
4864
                        }
1✔
4865

4866
                        // Immediately disallow any new HTLC's from being added
4867
                        // in the outgoing direction.
4868
                        if !link.DisableAdds(htlcswitch.Outgoing) {
2✔
4869
                                p.log.Warnf("Outgoing link adds already "+
×
4870
                                        "disabled: %v", link.ChanID())
×
4871
                        }
×
4872

4873
                        // When we have a Shutdown to send, we defer it till the
4874
                        // next time we send a CommitSig to remain spec
4875
                        // compliant.
4876
                        link.OnCommitOnce(htlcswitch.Outgoing, func() {
4✔
4877
                                p.queueMsg(&msg, nil)
2✔
4878
                        })
2✔
4879
                })
4880

4881
                beginNegotiation := func() {
10✔
4882
                        oClosingSigned, err := chanCloser.BeginNegotiation()
5✔
4883
                        if err != nil {
5✔
4884
                                handleErr(err)
×
4885
                                return
×
4886
                        }
×
4887

4888
                        oClosingSigned.WhenSome(func(msg lnwire.ClosingSigned) {
10✔
4889
                                p.queueMsg(&msg, nil)
5✔
4890
                        })
5✔
4891
                }
4892

4893
                if link == nil {
6✔
4894
                        beginNegotiation()
1✔
4895
                } else {
5✔
4896
                        // Now we register a flush hook to advance the
4✔
4897
                        // ChanCloser and possibly send out a ClosingSigned
4✔
4898
                        // when the link finishes draining.
4✔
4899
                        link.OnFlushedOnce(func() {
8✔
4900
                                // Remove link in goroutine to prevent deadlock.
4✔
4901
                                go p.cfg.Switch.RemoveLink(msg.cid)
4✔
4902
                                beginNegotiation()
4✔
4903
                        })
4✔
4904
                }
4905

4906
        case *lnwire.ClosingSigned:
8✔
4907
                oClosingSigned, err := chanCloser.ReceiveClosingSigned(*typed)
8✔
4908
                if err != nil {
8✔
4909
                        handleErr(err)
×
4910
                        return
×
4911
                }
×
4912

4913
                oClosingSigned.WhenSome(func(msg lnwire.ClosingSigned) {
16✔
4914
                        p.queueMsg(&msg, nil)
8✔
4915
                })
8✔
4916

4917
        default:
×
4918
                panic("impossible closeMsg type")
×
4919
        }
4920

4921
        // If we haven't finished close negotiations, then we'll continue as we
4922
        // can't yet finalize the closure.
4923
        if _, err := chanCloser.ClosingTx(); err != nil {
20✔
4924
                return
8✔
4925
        }
8✔
4926

4927
        // Otherwise, we've agreed on a closing fee! In this case, we'll wrap up
4928
        // the channel closure by notifying relevant sub-systems and launching a
4929
        // goroutine to wait for close tx conf.
4930
        p.finalizeChanClosure(chanCloser)
4✔
4931
}
4932

4933
// HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto
4934
// the channelManager goroutine, which will shut down the link and possibly
4935
// close the channel.
4936
func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) {
×
4937
        select {
×
4938
        case p.localCloseChanReqs <- req:
×
4939
                p.log.Info("Local close channel request is going to be " +
×
4940
                        "delivered to the peer")
×
4941
        case <-p.cg.Done():
×
4942
                p.log.Info("Unable to deliver local close channel request " +
×
4943
                        "to peer")
×
4944
        }
4945
}
4946

4947
// NetAddress returns the network of the remote peer as an lnwire.NetAddress.
4948
func (p *Brontide) NetAddress() *lnwire.NetAddress {
×
4949
        return p.cfg.Addr
×
4950
}
×
4951

4952
// Inbound is a getter for the Brontide's Inbound boolean in cfg.
4953
func (p *Brontide) Inbound() bool {
×
4954
        return p.cfg.Inbound
×
4955
}
×
4956

4957
// ConnReq is a getter for the Brontide's connReq in cfg.
4958
func (p *Brontide) ConnReq() *connmgr.ConnReq {
×
4959
        return p.cfg.ConnReq
×
4960
}
×
4961

4962
// ErrorBuffer is a getter for the Brontide's errorBuffer in cfg.
4963
func (p *Brontide) ErrorBuffer() *queue.CircularBuffer {
×
4964
        return p.cfg.ErrorBuffer
×
4965
}
×
4966

4967
// SetAddress sets the remote peer's address given an address.
4968
func (p *Brontide) SetAddress(address net.Addr) {
×
4969
        p.cfg.Addr.Address = address
×
4970
}
×
4971

4972
// ActiveSignal returns the peer's active signal.
4973
func (p *Brontide) ActiveSignal() chan struct{} {
×
4974
        return p.activeSignal
×
4975
}
×
4976

4977
// Conn returns a pointer to the peer's connection struct.
4978
func (p *Brontide) Conn() net.Conn {
×
4979
        return p.cfg.Conn
×
4980
}
×
4981

4982
// BytesReceived returns the number of bytes received from the peer.
4983
func (p *Brontide) BytesReceived() uint64 {
×
4984
        return atomic.LoadUint64(&p.bytesReceived)
×
4985
}
×
4986

4987
// BytesSent returns the number of bytes sent to the peer.
4988
func (p *Brontide) BytesSent() uint64 {
×
4989
        return atomic.LoadUint64(&p.bytesSent)
×
4990
}
×
4991

4992
// LastRemotePingPayload returns the last payload the remote party sent as part
4993
// of their ping.
4994
func (p *Brontide) LastRemotePingPayload() []byte {
×
4995
        pingPayload := p.lastPingPayload.Load()
×
4996
        if pingPayload == nil {
×
4997
                return []byte{}
×
4998
        }
×
4999

5000
        pingBytes, ok := pingPayload.(lnwire.PingPayload)
×
5001
        if !ok {
×
5002
                return nil
×
5003
        }
×
5004

5005
        return pingBytes
×
5006
}
5007

5008
// attachChannelEventSubscription creates a channel event subscription and
5009
// attaches to client to Brontide if the reenableTimeout is no greater than 1
5010
// minute.
5011
func (p *Brontide) attachChannelEventSubscription() error {
3✔
5012
        // If the timeout is greater than 1 minute, it's unlikely that the link
3✔
5013
        // hasn't yet finished its reestablishment. Return a nil without
3✔
5014
        // creating the client to specify that we don't want to retry.
3✔
5015
        if p.cfg.ChanActiveTimeout > 1*time.Minute {
3✔
5016
                return nil
×
5017
        }
×
5018

5019
        // When the reenable timeout is less than 1 minute, it's likely the
5020
        // channel link hasn't finished its reestablishment yet. In that case,
5021
        // we'll give it a second chance by subscribing to the channel update
5022
        // events. Upon receiving the `ActiveLinkEvent`, we'll then request
5023
        // enabling the channel again.
5024
        sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
3✔
5025
        if err != nil {
3✔
5026
                return fmt.Errorf("SubscribeChannelEvents failed: %w", err)
×
5027
        }
×
5028

5029
        p.channelEventClient = sub
3✔
5030

3✔
5031
        return nil
3✔
5032
}
5033

5034
// updateNextRevocation updates the existing channel's next revocation if it's
5035
// nil.
5036
func (p *Brontide) updateNextRevocation(c *channeldb.OpenChannel) error {
3✔
5037
        chanPoint := c.FundingOutpoint
3✔
5038
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
5039

3✔
5040
        // Read the current channel.
3✔
5041
        currentChan, loaded := p.activeChannels.Load(chanID)
3✔
5042

3✔
5043
        // currentChan should exist, but we perform a check anyway to avoid nil
3✔
5044
        // pointer dereference.
3✔
5045
        if !loaded {
4✔
5046
                return fmt.Errorf("missing active channel with chanID=%v",
1✔
5047
                        chanID)
1✔
5048
        }
1✔
5049

5050
        // currentChan should not be nil, but we perform a check anyway to
5051
        // avoid nil pointer dereference.
5052
        if currentChan == nil {
3✔
5053
                return fmt.Errorf("found nil active channel with chanID=%v",
1✔
5054
                        chanID)
1✔
5055
        }
1✔
5056

5057
        // If we're being sent a new channel, and our existing channel doesn't
5058
        // have the next revocation, then we need to update the current
5059
        // existing channel.
5060
        if currentChan.RemoteNextRevocation() != nil {
1✔
5061
                return nil
×
5062
        }
×
5063

5064
        p.log.Infof("Processing retransmitted ChannelReady for "+
1✔
5065
                "ChannelPoint(%v)", chanPoint)
1✔
5066

1✔
5067
        nextRevoke := c.RemoteNextRevocation
1✔
5068

1✔
5069
        err := currentChan.InitNextRevocation(nextRevoke)
1✔
5070
        if err != nil {
1✔
5071
                return fmt.Errorf("unable to init next revocation: %w", err)
×
5072
        }
×
5073

5074
        return nil
1✔
5075
}
5076

5077
// addActiveChannel adds a new active channel to the `activeChannels` map. It
5078
// takes a `channeldb.OpenChannel`, creates a `lnwallet.LightningChannel` from
5079
// it and assembles it with a channel link.
5080
func (p *Brontide) addActiveChannel(c *lnpeer.NewChannel) error {
×
5081
        chanPoint := c.FundingOutpoint
×
5082
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
×
5083

×
5084
        // If we've reached this point, there are two possible scenarios.  If
×
5085
        // the channel was in the active channels map as nil, then it was
×
5086
        // loaded from disk and we need to send reestablish. Else, it was not
×
5087
        // loaded from disk and we don't need to send reestablish as this is a
×
5088
        // fresh channel.
×
5089
        shouldReestablish := p.isLoadedFromDisk(chanID)
×
5090

×
5091
        chanOpts := c.ChanOpts
×
5092
        if shouldReestablish {
×
5093
                // If we have to do the reestablish dance for this channel,
×
5094
                // ensure that we don't try to call InitRemoteMusigNonces twice
×
5095
                // by calling SkipNonceInit.
×
5096
                chanOpts = append(chanOpts, lnwallet.WithSkipNonceInit())
×
5097
        }
×
5098

5099
        p.cfg.AuxLeafStore.WhenSome(func(s lnwallet.AuxLeafStore) {
×
5100
                chanOpts = append(chanOpts, lnwallet.WithLeafStore(s))
×
5101
        })
×
5102
        p.cfg.AuxSigner.WhenSome(func(s lnwallet.AuxSigner) {
×
5103
                chanOpts = append(chanOpts, lnwallet.WithAuxSigner(s))
×
5104
        })
×
5105
        p.cfg.AuxResolver.WhenSome(func(s lnwallet.AuxContractResolver) {
×
5106
                chanOpts = append(chanOpts, lnwallet.WithAuxResolver(s))
×
5107
        })
×
5108

5109
        // If not already active, we'll add this channel to the set of active
5110
        // channels, so we can look it up later easily according to its channel
5111
        // ID.
5112
        lnChan, err := lnwallet.NewLightningChannel(
×
5113
                p.cfg.Signer, c.OpenChannel, p.cfg.SigPool, chanOpts...,
×
5114
        )
×
5115
        if err != nil {
×
5116
                return fmt.Errorf("unable to create LightningChannel: %w", err)
×
5117
        }
×
5118

5119
        // Store the channel in the activeChannels map.
5120
        p.activeChannels.Store(chanID, lnChan)
×
5121

×
5122
        p.log.Infof("New channel active ChannelPoint(%v) with peer", chanPoint)
×
5123

×
5124
        // Next, we'll assemble a ChannelLink along with the necessary items it
×
5125
        // needs to function.
×
5126
        chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(chanPoint)
×
5127
        if err != nil {
×
5128
                return fmt.Errorf("unable to subscribe to chain events: %w",
×
5129
                        err)
×
5130
        }
×
5131

5132
        // We'll query the channel DB for the new channel's initial forwarding
5133
        // policies to determine the policy we start out with.
5134
        initialPolicy, err := p.cfg.ChannelDB.GetInitialForwardingPolicy(chanID)
×
5135
        if err != nil {
×
5136
                return fmt.Errorf("unable to query for initial forwarding "+
×
5137
                        "policy: %v", err)
×
5138
        }
×
5139

5140
        // Create the link and add it to the switch.
5141
        err = p.addLink(
×
5142
                &chanPoint, lnChan, initialPolicy, chainEvents,
×
5143
                shouldReestablish, fn.None[lnwire.Shutdown](),
×
5144
        )
×
5145
        if err != nil {
×
5146
                return fmt.Errorf("can't register new channel link(%v) with "+
×
5147
                        "peer", chanPoint)
×
5148
        }
×
5149

5150
        isTaprootChan := c.ChanType.IsTaproot()
×
5151

×
5152
        // We're using the old co-op close, so we don't need to init the new RBF
×
5153
        // chan closer. If this is a taproot channel, then we'll also fall
×
5154
        // through, as we don't support this type yet w/ rbf close.
×
5155
        if !p.rbfCoopCloseAllowed() || isTaprootChan {
×
5156
                return nil
×
5157
        }
×
5158

5159
        // Now that the link has been added above, we'll also init an RBF chan
5160
        // closer for this channel, but only if the new close feature is
5161
        // negotiated.
5162
        //
5163
        // Creating this here ensures that any shutdown messages sent will be
5164
        // automatically routed by the msg router.
5165
        if _, err := p.initRbfChanCloser(lnChan); err != nil {
×
5166
                p.activeChanCloses.Delete(chanID)
×
5167

×
5168
                return fmt.Errorf("unable to init RBF chan closer for new "+
×
5169
                        "chan: %w", err)
×
5170
        }
×
5171

5172
        return nil
×
5173
}
5174

5175
// handleNewActiveChannel handles a `newChannelMsg` request. Depending on we
5176
// know this channel ID or not, we'll either add it to the `activeChannels` map
5177
// or init the next revocation for it.
5178
func (p *Brontide) handleNewActiveChannel(req *newChannelMsg) {
×
5179
        newChan := req.channel
×
5180
        chanPoint := newChan.FundingOutpoint
×
5181
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
×
5182

×
5183
        // Only update RemoteNextRevocation if the channel is in the
×
5184
        // activeChannels map and if we added the link to the switch. Only
×
5185
        // active channels will be added to the switch.
×
5186
        if p.isActiveChannel(chanID) {
×
5187
                p.log.Infof("Already have ChannelPoint(%v), ignoring",
×
5188
                        chanPoint)
×
5189

×
5190
                // Handle it and close the err chan on the request.
×
5191
                close(req.err)
×
5192

×
5193
                // Update the next revocation point.
×
5194
                err := p.updateNextRevocation(newChan.OpenChannel)
×
5195
                if err != nil {
×
5196
                        p.log.Errorf(err.Error())
×
5197
                }
×
5198

5199
                return
×
5200
        }
5201

5202
        // This is a new channel, we now add it to the map.
5203
        if err := p.addActiveChannel(req.channel); err != nil {
×
5204
                // Log and send back the error to the request.
×
5205
                p.log.Errorf(err.Error())
×
5206
                req.err <- err
×
5207

×
5208
                return
×
5209
        }
×
5210

5211
        // Close the err chan if everything went fine.
5212
        close(req.err)
×
5213
}
5214

5215
// handleNewPendingChannel takes a `newChannelMsg` request and add it to
5216
// `activeChannels` map with nil value. This pending channel will be saved as
5217
// it may become active in the future. Once active, the funding manager will
5218
// send it again via `AddNewChannel`, and we'd handle the link creation there.
5219
func (p *Brontide) handleNewPendingChannel(req *newChannelMsg) {
4✔
5220
        defer close(req.err)
4✔
5221

4✔
5222
        chanID := req.channelID
4✔
5223

4✔
5224
        // If we already have this channel, something is wrong with the funding
4✔
5225
        // flow as it will only be marked as active after `ChannelReady` is
4✔
5226
        // handled. In this case, we will do nothing but log an error, just in
4✔
5227
        // case this is a legit channel.
4✔
5228
        if p.isActiveChannel(chanID) {
5✔
5229
                p.log.Errorf("Channel(%v) is already active, ignoring "+
1✔
5230
                        "pending channel request", chanID)
1✔
5231

1✔
5232
                return
1✔
5233
        }
1✔
5234

5235
        // The channel has already been added, we will do nothing and return.
5236
        if p.isPendingChannel(chanID) {
4✔
5237
                p.log.Infof("Channel(%v) is already added, ignoring "+
1✔
5238
                        "pending channel request", chanID)
1✔
5239

1✔
5240
                return
1✔
5241
        }
1✔
5242

5243
        // This is a new channel, we now add it to the map `activeChannels`
5244
        // with nil value and mark it as a newly added channel in
5245
        // `addedChannels`.
5246
        p.activeChannels.Store(chanID, nil)
2✔
5247
        p.addedChannels.Store(chanID, struct{}{})
2✔
5248
}
5249

5250
// handleRemovePendingChannel takes a `newChannelMsg` request and removes it
5251
// from `activeChannels` map. The request will be ignored if the channel is
5252
// considered active by Brontide. Noop if the channel ID cannot be found.
5253
func (p *Brontide) handleRemovePendingChannel(req *newChannelMsg) {
4✔
5254
        defer close(req.err)
4✔
5255

4✔
5256
        chanID := req.channelID
4✔
5257

4✔
5258
        // If we already have this channel, something is wrong with the funding
4✔
5259
        // flow as it will only be marked as active after `ChannelReady` is
4✔
5260
        // handled. In this case, we will log an error and exit.
4✔
5261
        if p.isActiveChannel(chanID) {
5✔
5262
                p.log.Errorf("Channel(%v) is active, ignoring remove request",
1✔
5263
                        chanID)
1✔
5264
                return
1✔
5265
        }
1✔
5266

5267
        // The channel has not been added yet, we will log a warning as there
5268
        // is an unexpected call from funding manager.
5269
        if !p.isPendingChannel(chanID) {
4✔
5270
                p.log.Warnf("Channel(%v) not found, removing it anyway", chanID)
1✔
5271
        }
1✔
5272

5273
        // Remove the record of this pending channel.
5274
        p.activeChannels.Delete(chanID)
3✔
5275
        p.addedChannels.Delete(chanID)
3✔
5276
}
5277

5278
// sendLinkUpdateMsg sends a message that updates the channel to the
5279
// channel's message stream.
5280
func (p *Brontide) sendLinkUpdateMsg(cid lnwire.ChannelID, msg lnwire.Message) {
×
5281
        p.log.Tracef("Sending link update msg=%v", msg.MsgType())
×
5282

×
5283
        chanStream, ok := p.activeMsgStreams[cid]
×
5284
        if !ok {
×
5285
                // If a stream hasn't yet been created, then we'll do so, add
×
5286
                // it to the map, and finally start it.
×
5287
                chanStream = newChanMsgStream(p, cid)
×
5288
                p.activeMsgStreams[cid] = chanStream
×
5289
                chanStream.Start()
×
5290

×
5291
                // Stop the stream when quit.
×
5292
                go func() {
×
5293
                        <-p.cg.Done()
×
5294
                        chanStream.Stop()
×
5295
                }()
×
5296
        }
5297

5298
        // With the stream obtained, add the message to the stream so we can
5299
        // continue processing message.
5300
        chanStream.AddMsg(msg)
×
5301
}
5302

5303
// scaleTimeout multiplies the argument duration by a constant factor depending
5304
// on variious heuristics. Currently this is only used to check whether our peer
5305
// appears to be connected over Tor and relaxes the timout deadline. However,
5306
// this is subject to change and should be treated as opaque.
5307
func (p *Brontide) scaleTimeout(timeout time.Duration) time.Duration {
67✔
5308
        if p.isTorConnection {
67✔
5309
                return timeout * time.Duration(torTimeoutMultiplier)
×
5310
        }
×
5311

5312
        return timeout
67✔
5313
}
5314

5315
// CoopCloseUpdates is a struct used to communicate updates for an active close
5316
// to the caller.
5317
type CoopCloseUpdates struct {
5318
        UpdateChan chan interface{}
5319

5320
        ErrChan chan error
5321
}
5322

5323
// ChanHasRbfCoopCloser returns true if the channel as identifier by the channel
5324
// point has an active RBF chan closer.
5325
func (p *Brontide) ChanHasRbfCoopCloser(chanPoint wire.OutPoint) bool {
×
5326
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
×
5327
        chanCloser, found := p.activeChanCloses.Load(chanID)
×
5328
        if !found {
×
5329
                return false
×
5330
        }
×
5331

5332
        return chanCloser.IsRight()
×
5333
}
5334

5335
// TriggerCoopCloseRbfBump given a chan ID, and the params needed to trigger a
5336
// new RBF co-op close update, a bump is attempted. A channel used for updates,
5337
// along with one used to o=communicate any errors is returned. If no chan
5338
// closer is found, then false is returned for the second argument.
5339
func (p *Brontide) TriggerCoopCloseRbfBump(ctx context.Context,
5340
        chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
5341
        deliveryScript lnwire.DeliveryAddress) (*CoopCloseUpdates, error) {
×
5342

×
5343
        // If RBF coop close isn't permitted, then we'll an error.
×
5344
        if !p.rbfCoopCloseAllowed() {
×
5345
                return nil, fmt.Errorf("rbf coop close not enabled for " +
×
5346
                        "channel")
×
5347
        }
×
5348

5349
        closeUpdates := &CoopCloseUpdates{
×
5350
                UpdateChan: make(chan interface{}, 1),
×
5351
                ErrChan:    make(chan error, 1),
×
5352
        }
×
5353

×
5354
        // We'll re-use the existing switch struct here, even though we're
×
5355
        // bypassing the switch entirely.
×
5356
        closeReq := htlcswitch.ChanClose{
×
5357
                CloseType:      contractcourt.CloseRegular,
×
5358
                ChanPoint:      &chanPoint,
×
5359
                TargetFeePerKw: feeRate,
×
5360
                DeliveryScript: deliveryScript,
×
5361
                Updates:        closeUpdates.UpdateChan,
×
5362
                Err:            closeUpdates.ErrChan,
×
5363
                Ctx:            ctx,
×
5364
        }
×
5365

×
5366
        err := p.startRbfChanCloser(newRPCShutdownInit(&closeReq), chanPoint)
×
5367
        if err != nil {
×
5368
                return nil, err
×
5369
        }
×
5370

5371
        return closeUpdates, nil
×
5372
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc