• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16990665124

15 Aug 2025 01:10PM UTC coverage: 66.74% (-0.03%) from 66.765%
16990665124

Pull #9455

github

web-flow
Merge 035fac41d into fb1adfc21
Pull Request #9455: [1/2] discovery+lnwire: add support for DNS host name in NodeAnnouncement msg

116 of 188 new or added lines in 8 files covered. (61.7%)

110 existing lines in 23 files now uncovered.

136011 of 203791 relevant lines covered (66.74%)

21482.89 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

69.33
/server.go
1
package lnd
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/rand"
7
        "encoding/hex"
8
        "errors"
9
        "fmt"
10
        "math/big"
11
        prand "math/rand"
12
        "net"
13
        "strconv"
14
        "strings"
15
        "sync"
16
        "sync/atomic"
17
        "time"
18

19
        "github.com/btcsuite/btcd/btcec/v2"
20
        "github.com/btcsuite/btcd/btcec/v2/ecdsa"
21
        "github.com/btcsuite/btcd/btcutil"
22
        "github.com/btcsuite/btcd/chaincfg"
23
        "github.com/btcsuite/btcd/chaincfg/chainhash"
24
        "github.com/btcsuite/btcd/connmgr"
25
        "github.com/btcsuite/btcd/txscript"
26
        "github.com/btcsuite/btcd/wire"
27
        "github.com/btcsuite/btclog/v2"
28
        sphinx "github.com/lightningnetwork/lightning-onion"
29
        "github.com/lightningnetwork/lnd/aliasmgr"
30
        "github.com/lightningnetwork/lnd/autopilot"
31
        "github.com/lightningnetwork/lnd/brontide"
32
        "github.com/lightningnetwork/lnd/chainio"
33
        "github.com/lightningnetwork/lnd/chainreg"
34
        "github.com/lightningnetwork/lnd/chanacceptor"
35
        "github.com/lightningnetwork/lnd/chanbackup"
36
        "github.com/lightningnetwork/lnd/chanfitness"
37
        "github.com/lightningnetwork/lnd/channeldb"
38
        "github.com/lightningnetwork/lnd/channelnotifier"
39
        "github.com/lightningnetwork/lnd/clock"
40
        "github.com/lightningnetwork/lnd/cluster"
41
        "github.com/lightningnetwork/lnd/contractcourt"
42
        "github.com/lightningnetwork/lnd/discovery"
43
        "github.com/lightningnetwork/lnd/feature"
44
        "github.com/lightningnetwork/lnd/fn/v2"
45
        "github.com/lightningnetwork/lnd/funding"
46
        "github.com/lightningnetwork/lnd/graph"
47
        graphdb "github.com/lightningnetwork/lnd/graph/db"
48
        "github.com/lightningnetwork/lnd/graph/db/models"
49
        "github.com/lightningnetwork/lnd/healthcheck"
50
        "github.com/lightningnetwork/lnd/htlcswitch"
51
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
52
        "github.com/lightningnetwork/lnd/input"
53
        "github.com/lightningnetwork/lnd/invoices"
54
        "github.com/lightningnetwork/lnd/keychain"
55
        "github.com/lightningnetwork/lnd/lncfg"
56
        "github.com/lightningnetwork/lnd/lnencrypt"
57
        "github.com/lightningnetwork/lnd/lnpeer"
58
        "github.com/lightningnetwork/lnd/lnrpc"
59
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
60
        "github.com/lightningnetwork/lnd/lnutils"
61
        "github.com/lightningnetwork/lnd/lnwallet"
62
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
63
        "github.com/lightningnetwork/lnd/lnwallet/chanfunding"
64
        "github.com/lightningnetwork/lnd/lnwallet/rpcwallet"
65
        "github.com/lightningnetwork/lnd/lnwire"
66
        "github.com/lightningnetwork/lnd/nat"
67
        "github.com/lightningnetwork/lnd/netann"
68
        paymentsdb "github.com/lightningnetwork/lnd/payments/db"
69
        "github.com/lightningnetwork/lnd/peer"
70
        "github.com/lightningnetwork/lnd/peernotifier"
71
        "github.com/lightningnetwork/lnd/pool"
72
        "github.com/lightningnetwork/lnd/queue"
73
        "github.com/lightningnetwork/lnd/routing"
74
        "github.com/lightningnetwork/lnd/routing/localchans"
75
        "github.com/lightningnetwork/lnd/routing/route"
76
        "github.com/lightningnetwork/lnd/subscribe"
77
        "github.com/lightningnetwork/lnd/sweep"
78
        "github.com/lightningnetwork/lnd/ticker"
79
        "github.com/lightningnetwork/lnd/tor"
80
        "github.com/lightningnetwork/lnd/walletunlocker"
81
        "github.com/lightningnetwork/lnd/watchtower/blob"
82
        "github.com/lightningnetwork/lnd/watchtower/wtclient"
83
        "github.com/lightningnetwork/lnd/watchtower/wtpolicy"
84
        "github.com/lightningnetwork/lnd/watchtower/wtserver"
85
)
86

87
const (
88
        // defaultMinPeers is the minimum number of peers nodes should always be
89
        // connected to.
90
        defaultMinPeers = 3
91

92
        // defaultStableConnDuration is a floor under which all reconnection
93
        // attempts will apply exponential randomized backoff. Connections
94
        // durations exceeding this value will be eligible to have their
95
        // backoffs reduced.
96
        defaultStableConnDuration = 10 * time.Minute
97

98
        // numInstantInitReconnect specifies how many persistent peers we should
99
        // always attempt outbound connections to immediately. After this value
100
        // is surpassed, the remaining peers will be randomly delayed using
101
        // maxInitReconnectDelay.
102
        numInstantInitReconnect = 10
103

104
        // maxInitReconnectDelay specifies the maximum delay in seconds we will
105
        // apply in attempting to reconnect to persistent peers on startup. The
106
        // value used or a particular peer will be chosen between 0s and this
107
        // value.
108
        maxInitReconnectDelay = 30
109

110
        // multiAddrConnectionStagger is the number of seconds to wait between
111
        // attempting to a peer with each of its advertised addresses.
112
        multiAddrConnectionStagger = 10 * time.Second
113
)
114

115
var (
116
        // ErrPeerNotConnected signals that the server has no connection to the
117
        // given peer.
118
        ErrPeerNotConnected = errors.New("peer is not connected")
119

120
        // ErrServerNotActive indicates that the server has started but hasn't
121
        // fully finished the startup process.
122
        ErrServerNotActive = errors.New("server is still in the process of " +
123
                "starting")
124

125
        // ErrServerShuttingDown indicates that the server is in the process of
126
        // gracefully exiting.
127
        ErrServerShuttingDown = errors.New("server is shutting down")
128

129
        // MaxFundingAmount is a soft-limit of the maximum channel size
130
        // currently accepted within the Lightning Protocol. This is
131
        // defined in BOLT-0002, and serves as an initial precautionary limit
132
        // while implementations are battle tested in the real world.
133
        //
134
        // At the moment, this value depends on which chain is active. It is set
135
        // to the value under the Bitcoin chain as default.
136
        //
137
        // TODO(roasbeef): add command line param to modify.
138
        MaxFundingAmount = funding.MaxBtcFundingAmount
139

140
        // EndorsementExperimentEnd is the time after which nodes should stop
141
        // propagating experimental endorsement signals.
142
        //
143
        // Per blip04: January 1, 2026 12:00:00 AM UTC in unix seconds.
144
        EndorsementExperimentEnd = time.Unix(1767225600, 0)
145

146
        // ErrGossiperBan is one of the errors that can be returned when we
147
        // attempt to finalize a connection to a remote peer.
148
        ErrGossiperBan = errors.New("gossiper has banned remote's key")
149

150
        // ErrNoMoreRestrictedAccessSlots is one of the errors that can be
151
        // returned when we attempt to finalize a connection. It means that
152
        // this peer has no pending-open, open, or closed channels with us and
153
        // are already at our connection ceiling for a peer with this access
154
        // status.
155
        ErrNoMoreRestrictedAccessSlots = errors.New("no more restricted slots")
156

157
        // ErrNoPeerScore is returned when we expect to find a score in
158
        // peerScores, but one does not exist.
159
        ErrNoPeerScore = errors.New("peer score not found")
160

161
        // ErrNoPendingPeerInfo is returned when we couldn't find any pending
162
        // peer info.
163
        ErrNoPendingPeerInfo = errors.New("no pending peer info")
164
)
165

166
// errPeerAlreadyConnected is an error returned by the server when we're
167
// commanded to connect to a peer, but they're already connected.
168
type errPeerAlreadyConnected struct {
169
        peer *peer.Brontide
170
}
171

172
// Error returns the human readable version of this error type.
173
//
174
// NOTE: Part of the error interface.
175
func (e *errPeerAlreadyConnected) Error() string {
3✔
176
        return fmt.Sprintf("already connected to peer: %v", e.peer)
3✔
177
}
3✔
178

179
// peerAccessStatus denotes the p2p access status of a given peer. This will be
180
// used to assign peer ban scores that determine an action the server will
181
// take.
182
type peerAccessStatus int
183

184
const (
185
        // peerStatusRestricted indicates that the peer only has access to the
186
        // limited number of "free" reserved slots.
187
        peerStatusRestricted peerAccessStatus = iota
188

189
        // peerStatusTemporary indicates that the peer only has temporary p2p
190
        // access to the server.
191
        peerStatusTemporary
192

193
        // peerStatusProtected indicates that the peer has been granted
194
        // permanent p2p access to the server. The peer can still have its
195
        // access revoked.
196
        peerStatusProtected
197
)
198

199
// String returns a human-readable representation of the status code.
200
func (p peerAccessStatus) String() string {
3✔
201
        switch p {
3✔
202
        case peerStatusRestricted:
3✔
203
                return "restricted"
3✔
204

205
        case peerStatusTemporary:
3✔
206
                return "temporary"
3✔
207

208
        case peerStatusProtected:
3✔
209
                return "protected"
3✔
210

211
        default:
×
212
                return "unknown"
×
213
        }
214
}
215

216
// peerSlotStatus determines whether a peer gets access to one of our free
217
// slots or gets to bypass this safety mechanism.
218
type peerSlotStatus struct {
219
        // state determines which privileges the peer has with our server.
220
        state peerAccessStatus
221
}
222

223
// server is the main server of the Lightning Network Daemon. The server houses
224
// global state pertaining to the wallet, database, and the rpcserver.
225
// Additionally, the server is also used as a central messaging bus to interact
226
// with any of its companion objects.
227
type server struct {
228
        active   int32 // atomic
229
        stopping int32 // atomic
230

231
        start sync.Once
232
        stop  sync.Once
233

234
        cfg *Config
235

236
        implCfg *ImplementationCfg
237

238
        // identityECDH is an ECDH capable wrapper for the private key used
239
        // to authenticate any incoming connections.
240
        identityECDH keychain.SingleKeyECDH
241

242
        // identityKeyLoc is the key locator for the above wrapped identity key.
243
        identityKeyLoc keychain.KeyLocator
244

245
        // nodeSigner is an implementation of the MessageSigner implementation
246
        // that's backed by the identity private key of the running lnd node.
247
        nodeSigner *netann.NodeSigner
248

249
        chanStatusMgr *netann.ChanStatusManager
250

251
        // listenAddrs is the list of addresses the server is currently
252
        // listening on.
253
        listenAddrs []net.Addr
254

255
        // torController is a client that will communicate with a locally
256
        // running Tor server. This client will handle initiating and
257
        // authenticating the connection to the Tor server, automatically
258
        // creating and setting up onion services, etc.
259
        torController *tor.Controller
260

261
        // natTraversal is the specific NAT traversal technique used to
262
        // automatically set up port forwarding rules in order to advertise to
263
        // the network that the node is accepting inbound connections.
264
        natTraversal nat.Traversal
265

266
        // lastDetectedIP is the last IP detected by the NAT traversal technique
267
        // above. This IP will be watched periodically in a goroutine in order
268
        // to handle dynamic IP changes.
269
        lastDetectedIP net.IP
270

271
        mu sync.RWMutex
272

273
        // peersByPub is a map of the active peers.
274
        //
275
        // NOTE: The key used here is the raw bytes of the peer's public key to
276
        // string conversion, which means it cannot be printed using `%s` as it
277
        // will just print the binary.
278
        //
279
        // TODO(yy): Use the hex string instead.
280
        peersByPub map[string]*peer.Brontide
281

282
        inboundPeers  map[string]*peer.Brontide
283
        outboundPeers map[string]*peer.Brontide
284

285
        peerConnectedListeners    map[string][]chan<- lnpeer.Peer
286
        peerDisconnectedListeners map[string][]chan<- struct{}
287

288
        // TODO(yy): the Brontide.Start doesn't know this value, which means it
289
        // will continue to send messages even if there are no active channels
290
        // and the value below is false. Once it's pruned, all its connections
291
        // will be closed, thus the Brontide.Start will return an error.
292
        persistentPeers        map[string]bool
293
        persistentPeersBackoff map[string]time.Duration
294
        persistentPeerAddrs    map[string][]*lnwire.NetAddress
295
        persistentConnReqs     map[string][]*connmgr.ConnReq
296
        persistentRetryCancels map[string]chan struct{}
297

298
        // peerErrors keeps a set of peer error buffers for peers that have
299
        // disconnected from us. This allows us to track historic peer errors
300
        // over connections. The string of the peer's compressed pubkey is used
301
        // as a key for this map.
302
        peerErrors map[string]*queue.CircularBuffer
303

304
        // ignorePeerTermination tracks peers for which the server has initiated
305
        // a disconnect. Adding a peer to this map causes the peer termination
306
        // watcher to short circuit in the event that peers are purposefully
307
        // disconnected.
308
        ignorePeerTermination map[*peer.Brontide]struct{}
309

310
        // scheduledPeerConnection maps a pubkey string to a callback that
311
        // should be executed in the peerTerminationWatcher the prior peer with
312
        // the same pubkey exits.  This allows the server to wait until the
313
        // prior peer has cleaned up successfully, before adding the new peer
314
        // intended to replace it.
315
        scheduledPeerConnection map[string]func()
316

317
        // pongBuf is a shared pong reply buffer we'll use across all active
318
        // peer goroutines. We know the max size of a pong message
319
        // (lnwire.MaxPongBytes), so we can allocate this ahead of time, and
320
        // avoid allocations each time we need to send a pong message.
321
        pongBuf []byte
322

323
        cc *chainreg.ChainControl
324

325
        fundingMgr *funding.Manager
326

327
        graphDB *graphdb.ChannelGraph
328

329
        chanStateDB *channeldb.ChannelStateDB
330

331
        addrSource channeldb.AddrSource
332

333
        // miscDB is the DB that contains all "other" databases within the main
334
        // channel DB that haven't been separated out yet.
335
        miscDB *channeldb.DB
336

337
        invoicesDB invoices.InvoiceDB
338

339
        // kvPaymentsDB is the DB that contains all functions for managing
340
        // payments.
341
        //
342
        // TODO(ziggie): Replace with interface.
343
        kvPaymentsDB *paymentsdb.KVPaymentsDB
344

345
        aliasMgr *aliasmgr.Manager
346

347
        htlcSwitch *htlcswitch.Switch
348

349
        interceptableSwitch *htlcswitch.InterceptableSwitch
350

351
        invoices *invoices.InvoiceRegistry
352

353
        invoiceHtlcModifier *invoices.HtlcModificationInterceptor
354

355
        channelNotifier *channelnotifier.ChannelNotifier
356

357
        peerNotifier *peernotifier.PeerNotifier
358

359
        htlcNotifier *htlcswitch.HtlcNotifier
360

361
        witnessBeacon contractcourt.WitnessBeacon
362

363
        breachArbitrator *contractcourt.BreachArbitrator
364

365
        missionController *routing.MissionController
366
        defaultMC         *routing.MissionControl
367

368
        graphBuilder *graph.Builder
369

370
        chanRouter *routing.ChannelRouter
371

372
        controlTower routing.ControlTower
373

374
        authGossiper *discovery.AuthenticatedGossiper
375

376
        localChanMgr *localchans.Manager
377

378
        utxoNursery *contractcourt.UtxoNursery
379

380
        sweeper *sweep.UtxoSweeper
381

382
        chainArb *contractcourt.ChainArbitrator
383

384
        sphinx *hop.OnionProcessor
385

386
        towerClientMgr *wtclient.Manager
387

388
        connMgr *connmgr.ConnManager
389

390
        sigPool *lnwallet.SigPool
391

392
        writePool *pool.Write
393

394
        readPool *pool.Read
395

396
        tlsManager *TLSManager
397

398
        // featureMgr dispatches feature vectors for various contexts within the
399
        // daemon.
400
        featureMgr *feature.Manager
401

402
        // currentNodeAnn is the node announcement that has been broadcast to
403
        // the network upon startup, if the attributes of the node (us) has
404
        // changed since last start.
405
        currentNodeAnn *lnwire.NodeAnnouncement
406

407
        // chansToRestore is the set of channels that upon starting, the server
408
        // should attempt to restore/recover.
409
        chansToRestore walletunlocker.ChannelsToRecover
410

411
        // chanSubSwapper is a sub-system that will ensure our on-disk channel
412
        // backups are consistent at all times. It interacts with the
413
        // channelNotifier to be notified of newly opened and closed channels.
414
        chanSubSwapper *chanbackup.SubSwapper
415

416
        // chanEventStore tracks the behaviour of channels and their remote peers to
417
        // provide insights into their health and performance.
418
        chanEventStore *chanfitness.ChannelEventStore
419

420
        hostAnn *netann.HostAnnouncer
421

422
        // livenessMonitor monitors that lnd has access to critical resources.
423
        livenessMonitor *healthcheck.Monitor
424

425
        customMessageServer *subscribe.Server
426

427
        // txPublisher is a publisher with fee-bumping capability.
428
        txPublisher *sweep.TxPublisher
429

430
        // blockbeatDispatcher is a block dispatcher that notifies subscribers
431
        // of new blocks.
432
        blockbeatDispatcher *chainio.BlockbeatDispatcher
433

434
        // peerAccessMan implements peer access controls.
435
        peerAccessMan *accessMan
436

437
        quit chan struct{}
438

439
        wg sync.WaitGroup
440
}
441

442
// updatePersistentPeerAddrs subscribes to topology changes and stores
443
// advertised addresses for any NodeAnnouncements from our persisted peers.
444
func (s *server) updatePersistentPeerAddrs() error {
3✔
445
        graphSub, err := s.graphDB.SubscribeTopology()
3✔
446
        if err != nil {
3✔
447
                return err
×
448
        }
×
449

450
        s.wg.Add(1)
3✔
451
        go func() {
6✔
452
                defer func() {
6✔
453
                        graphSub.Cancel()
3✔
454
                        s.wg.Done()
3✔
455
                }()
3✔
456

457
                for {
6✔
458
                        select {
3✔
459
                        case <-s.quit:
3✔
460
                                return
3✔
461

462
                        case topChange, ok := <-graphSub.TopologyChanges:
3✔
463
                                // If the router is shutting down, then we will
3✔
464
                                // as well.
3✔
465
                                if !ok {
3✔
466
                                        return
×
467
                                }
×
468

469
                                for _, update := range topChange.NodeUpdates {
6✔
470
                                        pubKeyStr := string(
3✔
471
                                                update.IdentityKey.
3✔
472
                                                        SerializeCompressed(),
3✔
473
                                        )
3✔
474

3✔
475
                                        // We only care about updates from
3✔
476
                                        // our persistentPeers.
3✔
477
                                        s.mu.RLock()
3✔
478
                                        _, ok := s.persistentPeers[pubKeyStr]
3✔
479
                                        s.mu.RUnlock()
3✔
480
                                        if !ok {
6✔
481
                                                continue
3✔
482
                                        }
483

484
                                        addrs := make([]*lnwire.NetAddress, 0,
3✔
485
                                                len(update.Addresses))
3✔
486

3✔
487
                                        for _, addr := range update.Addresses {
6✔
488
                                                addrs = append(addrs,
3✔
489
                                                        &lnwire.NetAddress{
3✔
490
                                                                IdentityKey: update.IdentityKey,
3✔
491
                                                                Address:     addr,
3✔
492
                                                                ChainNet:    s.cfg.ActiveNetParams.Net,
3✔
493
                                                        },
3✔
494
                                                )
3✔
495
                                        }
3✔
496

497
                                        s.mu.Lock()
3✔
498

3✔
499
                                        // Update the stored addresses for this
3✔
500
                                        // to peer to reflect the new set.
3✔
501
                                        s.persistentPeerAddrs[pubKeyStr] = addrs
3✔
502

3✔
503
                                        // If there are no outstanding
3✔
504
                                        // connection requests for this peer
3✔
505
                                        // then our work is done since we are
3✔
506
                                        // not currently trying to connect to
3✔
507
                                        // them.
3✔
508
                                        if len(s.persistentConnReqs[pubKeyStr]) == 0 {
6✔
509
                                                s.mu.Unlock()
3✔
510
                                                continue
3✔
511
                                        }
512

513
                                        s.mu.Unlock()
3✔
514

3✔
515
                                        s.connectToPersistentPeer(pubKeyStr)
3✔
516
                                }
517
                        }
518
                }
519
        }()
520

521
        return nil
3✔
522
}
523

524
// CustomMessage is a custom message that is received from a peer.
525
type CustomMessage struct {
526
        // Peer is the peer pubkey
527
        Peer [33]byte
528

529
        // Msg is the custom wire message.
530
        Msg *lnwire.Custom
531
}
532

533
// parseAddr parses an address from its string format to a net.Addr.
534
func parseAddr(address string, netCfg tor.Net) (net.Addr, error) {
3✔
535
        var (
3✔
536
                host string
3✔
537
                port int
3✔
538
        )
3✔
539

3✔
540
        // Split the address into its host and port components.
3✔
541
        h, p, err := net.SplitHostPort(address)
3✔
542
        if err != nil {
3✔
543
                // If a port wasn't specified, we'll assume the address only
×
544
                // contains the host so we'll use the default port.
×
545
                host = address
×
546
                port = defaultPeerPort
×
547
        } else {
3✔
548
                // Otherwise, we'll note both the host and ports.
3✔
549
                host = h
3✔
550
                portNum, err := strconv.Atoi(p)
3✔
551
                if err != nil {
3✔
552
                        return nil, err
×
553
                }
×
554
                port = portNum
3✔
555
        }
556

557
        if tor.IsOnionHost(host) {
3✔
558
                return &tor.OnionAddr{OnionService: host, Port: port}, nil
×
559
        }
×
560

561
        // If the host is part of a TCP address, we'll use the network
562
        // specific ResolveTCPAddr function in order to resolve these
563
        // addresses over Tor in order to prevent leaking your real IP
564
        // address.
565
        hostPort := net.JoinHostPort(host, strconv.Itoa(port))
3✔
566
        return netCfg.ResolveTCPAddr("tcp", hostPort)
3✔
567
}
568

569
// noiseDial is a factory function which creates a connmgr compliant dialing
570
// function by returning a closure which includes the server's identity key.
571
func noiseDial(idKey keychain.SingleKeyECDH,
572
        netCfg tor.Net, timeout time.Duration) func(net.Addr) (net.Conn, error) {
3✔
573

3✔
574
        return func(a net.Addr) (net.Conn, error) {
6✔
575
                lnAddr := a.(*lnwire.NetAddress)
3✔
576
                return brontide.Dial(idKey, lnAddr, timeout, netCfg.Dial)
3✔
577
        }
3✔
578
}
579

580
// newServer creates a new instance of the server which is to listen using the
581
// passed listener address.
582
//
583
//nolint:funlen
584
func newServer(ctx context.Context, cfg *Config, listenAddrs []net.Addr,
585
        dbs *DatabaseInstances, cc *chainreg.ChainControl,
586
        nodeKeyDesc *keychain.KeyDescriptor,
587
        chansToRestore walletunlocker.ChannelsToRecover,
588
        chanPredicate chanacceptor.ChannelAcceptor,
589
        torController *tor.Controller, tlsManager *TLSManager,
590
        leaderElector cluster.LeaderElector,
591
        implCfg *ImplementationCfg) (*server, error) {
3✔
592

3✔
593
        var (
3✔
594
                err         error
3✔
595
                nodeKeyECDH = keychain.NewPubKeyECDH(*nodeKeyDesc, cc.KeyRing)
3✔
596

3✔
597
                // We just derived the full descriptor, so we know the public
3✔
598
                // key is set on it.
3✔
599
                nodeKeySigner = keychain.NewPubKeyMessageSigner(
3✔
600
                        nodeKeyDesc.PubKey, nodeKeyDesc.KeyLocator, cc.KeyRing,
3✔
601
                )
3✔
602
        )
3✔
603

3✔
604
        var serializedPubKey [33]byte
3✔
605
        copy(serializedPubKey[:], nodeKeyDesc.PubKey.SerializeCompressed())
3✔
606

3✔
607
        netParams := cfg.ActiveNetParams.Params
3✔
608

3✔
609
        // Initialize the sphinx router.
3✔
610
        replayLog := htlcswitch.NewDecayedLog(
3✔
611
                dbs.DecayedLogDB, cc.ChainNotifier,
3✔
612
        )
3✔
613
        sphinxRouter := sphinx.NewRouter(nodeKeyECDH, replayLog)
3✔
614

3✔
615
        writeBufferPool := pool.NewWriteBuffer(
3✔
616
                pool.DefaultWriteBufferGCInterval,
3✔
617
                pool.DefaultWriteBufferExpiryInterval,
3✔
618
        )
3✔
619

3✔
620
        writePool := pool.NewWrite(
3✔
621
                writeBufferPool, cfg.Workers.Write, pool.DefaultWorkerTimeout,
3✔
622
        )
3✔
623

3✔
624
        readBufferPool := pool.NewReadBuffer(
3✔
625
                pool.DefaultReadBufferGCInterval,
3✔
626
                pool.DefaultReadBufferExpiryInterval,
3✔
627
        )
3✔
628

3✔
629
        readPool := pool.NewRead(
3✔
630
                readBufferPool, cfg.Workers.Read, pool.DefaultWorkerTimeout,
3✔
631
        )
3✔
632

3✔
633
        // If the taproot overlay flag is set, but we don't have an aux funding
3✔
634
        // controller, then we'll exit as this is incompatible.
3✔
635
        if cfg.ProtocolOptions.TaprootOverlayChans &&
3✔
636
                implCfg.AuxFundingController.IsNone() {
3✔
637

×
638
                return nil, fmt.Errorf("taproot overlay flag set, but " +
×
639
                        "overlay channels are not supported " +
×
640
                        "in a standalone lnd build")
×
641
        }
×
642

643
        //nolint:ll
644
        featureMgr, err := feature.NewManager(feature.Config{
3✔
645
                NoTLVOnion:                cfg.ProtocolOptions.LegacyOnion(),
3✔
646
                NoStaticRemoteKey:         cfg.ProtocolOptions.NoStaticRemoteKey(),
3✔
647
                NoAnchors:                 cfg.ProtocolOptions.NoAnchorCommitments(),
3✔
648
                NoWumbo:                   !cfg.ProtocolOptions.Wumbo(),
3✔
649
                NoScriptEnforcementLease:  cfg.ProtocolOptions.NoScriptEnforcementLease(),
3✔
650
                NoKeysend:                 !cfg.AcceptKeySend,
3✔
651
                NoOptionScidAlias:         !cfg.ProtocolOptions.ScidAlias(),
3✔
652
                NoZeroConf:                !cfg.ProtocolOptions.ZeroConf(),
3✔
653
                NoAnySegwit:               cfg.ProtocolOptions.NoAnySegwit(),
3✔
654
                CustomFeatures:            cfg.ProtocolOptions.CustomFeatures(),
3✔
655
                NoTaprootChans:            !cfg.ProtocolOptions.TaprootChans,
3✔
656
                NoTaprootOverlay:          !cfg.ProtocolOptions.TaprootOverlayChans,
3✔
657
                NoRouteBlinding:           cfg.ProtocolOptions.NoRouteBlinding(),
3✔
658
                NoExperimentalEndorsement: cfg.ProtocolOptions.NoExperimentalEndorsement(),
3✔
659
                NoQuiescence:              cfg.ProtocolOptions.NoQuiescence(),
3✔
660
                NoRbfCoopClose:            !cfg.ProtocolOptions.RbfCoopClose,
3✔
661
        })
3✔
662
        if err != nil {
3✔
663
                return nil, err
×
664
        }
×
665

666
        invoiceHtlcModifier := invoices.NewHtlcModificationInterceptor()
3✔
667
        registryConfig := invoices.RegistryConfig{
3✔
668
                FinalCltvRejectDelta:        lncfg.DefaultFinalCltvRejectDelta,
3✔
669
                HtlcHoldDuration:            invoices.DefaultHtlcHoldDuration,
3✔
670
                Clock:                       clock.NewDefaultClock(),
3✔
671
                AcceptKeySend:               cfg.AcceptKeySend,
3✔
672
                AcceptAMP:                   cfg.AcceptAMP,
3✔
673
                GcCanceledInvoicesOnStartup: cfg.GcCanceledInvoicesOnStartup,
3✔
674
                GcCanceledInvoicesOnTheFly:  cfg.GcCanceledInvoicesOnTheFly,
3✔
675
                KeysendHoldTime:             cfg.KeysendHoldTime,
3✔
676
                HtlcInterceptor:             invoiceHtlcModifier,
3✔
677
        }
3✔
678

3✔
679
        addrSource := channeldb.NewMultiAddrSource(dbs.ChanStateDB, dbs.GraphDB)
3✔
680

3✔
681
        s := &server{
3✔
682
                cfg:            cfg,
3✔
683
                implCfg:        implCfg,
3✔
684
                graphDB:        dbs.GraphDB,
3✔
685
                chanStateDB:    dbs.ChanStateDB.ChannelStateDB(),
3✔
686
                addrSource:     addrSource,
3✔
687
                miscDB:         dbs.ChanStateDB,
3✔
688
                invoicesDB:     dbs.InvoiceDB,
3✔
689
                kvPaymentsDB:   dbs.KVPaymentsDB,
3✔
690
                cc:             cc,
3✔
691
                sigPool:        lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer),
3✔
692
                writePool:      writePool,
3✔
693
                readPool:       readPool,
3✔
694
                chansToRestore: chansToRestore,
3✔
695

3✔
696
                blockbeatDispatcher: chainio.NewBlockbeatDispatcher(
3✔
697
                        cc.ChainNotifier,
3✔
698
                ),
3✔
699
                channelNotifier: channelnotifier.New(
3✔
700
                        dbs.ChanStateDB.ChannelStateDB(),
3✔
701
                ),
3✔
702

3✔
703
                identityECDH:   nodeKeyECDH,
3✔
704
                identityKeyLoc: nodeKeyDesc.KeyLocator,
3✔
705
                nodeSigner:     netann.NewNodeSigner(nodeKeySigner),
3✔
706

3✔
707
                listenAddrs: listenAddrs,
3✔
708

3✔
709
                // TODO(roasbeef): derive proper onion key based on rotation
3✔
710
                // schedule
3✔
711
                sphinx: hop.NewOnionProcessor(sphinxRouter),
3✔
712

3✔
713
                torController: torController,
3✔
714

3✔
715
                persistentPeers:         make(map[string]bool),
3✔
716
                persistentPeersBackoff:  make(map[string]time.Duration),
3✔
717
                persistentConnReqs:      make(map[string][]*connmgr.ConnReq),
3✔
718
                persistentPeerAddrs:     make(map[string][]*lnwire.NetAddress),
3✔
719
                persistentRetryCancels:  make(map[string]chan struct{}),
3✔
720
                peerErrors:              make(map[string]*queue.CircularBuffer),
3✔
721
                ignorePeerTermination:   make(map[*peer.Brontide]struct{}),
3✔
722
                scheduledPeerConnection: make(map[string]func()),
3✔
723
                pongBuf:                 make([]byte, lnwire.MaxPongBytes),
3✔
724

3✔
725
                peersByPub:                make(map[string]*peer.Brontide),
3✔
726
                inboundPeers:              make(map[string]*peer.Brontide),
3✔
727
                outboundPeers:             make(map[string]*peer.Brontide),
3✔
728
                peerConnectedListeners:    make(map[string][]chan<- lnpeer.Peer),
3✔
729
                peerDisconnectedListeners: make(map[string][]chan<- struct{}),
3✔
730

3✔
731
                invoiceHtlcModifier: invoiceHtlcModifier,
3✔
732

3✔
733
                customMessageServer: subscribe.NewServer(),
3✔
734

3✔
735
                tlsManager: tlsManager,
3✔
736

3✔
737
                featureMgr: featureMgr,
3✔
738
                quit:       make(chan struct{}),
3✔
739
        }
3✔
740

3✔
741
        // Start the low-level services once they are initialized.
3✔
742
        //
3✔
743
        // TODO(yy): break the server startup into four steps,
3✔
744
        // 1. init the low-level services.
3✔
745
        // 2. start the low-level services.
3✔
746
        // 3. init the high-level services.
3✔
747
        // 4. start the high-level services.
3✔
748
        if err := s.startLowLevelServices(); err != nil {
3✔
749
                return nil, err
×
750
        }
×
751

752
        currentHash, currentHeight, err := s.cc.ChainIO.GetBestBlock()
3✔
753
        if err != nil {
3✔
754
                return nil, err
×
755
        }
×
756

757
        expiryWatcher := invoices.NewInvoiceExpiryWatcher(
3✔
758
                clock.NewDefaultClock(), cfg.Invoices.HoldExpiryDelta,
3✔
759
                uint32(currentHeight), currentHash, cc.ChainNotifier,
3✔
760
        )
3✔
761
        s.invoices = invoices.NewRegistry(
3✔
762
                dbs.InvoiceDB, expiryWatcher, &registryConfig,
3✔
763
        )
3✔
764

3✔
765
        s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now)
3✔
766

3✔
767
        thresholdSats := btcutil.Amount(cfg.MaxFeeExposure)
3✔
768
        thresholdMSats := lnwire.NewMSatFromSatoshis(thresholdSats)
3✔
769

3✔
770
        linkUpdater := func(shortID lnwire.ShortChannelID) error {
6✔
771
                link, err := s.htlcSwitch.GetLinkByShortID(shortID)
3✔
772
                if err != nil {
3✔
773
                        return err
×
774
                }
×
775

776
                s.htlcSwitch.UpdateLinkAliases(link)
3✔
777

3✔
778
                return nil
3✔
779
        }
780

781
        s.aliasMgr, err = aliasmgr.NewManager(dbs.ChanStateDB, linkUpdater)
3✔
782
        if err != nil {
3✔
783
                return nil, err
×
784
        }
×
785

786
        s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
3✔
787
                DB:                   dbs.ChanStateDB,
3✔
788
                FetchAllOpenChannels: s.chanStateDB.FetchAllOpenChannels,
3✔
789
                FetchAllChannels:     s.chanStateDB.FetchAllChannels,
3✔
790
                FetchClosedChannels:  s.chanStateDB.FetchClosedChannels,
3✔
791
                LocalChannelClose: func(pubKey []byte,
3✔
792
                        request *htlcswitch.ChanClose) {
6✔
793

3✔
794
                        peer, err := s.FindPeerByPubStr(string(pubKey))
3✔
795
                        if err != nil {
3✔
796
                                srvrLog.Errorf("unable to close channel, peer"+
×
797
                                        " with %v id can't be found: %v",
×
798
                                        pubKey, err,
×
799
                                )
×
800
                                return
×
801
                        }
×
802

803
                        peer.HandleLocalCloseChanReqs(request)
3✔
804
                },
805
                FwdingLog:              dbs.ChanStateDB.ForwardingLog(),
806
                SwitchPackager:         channeldb.NewSwitchPackager(),
807
                ExtractErrorEncrypter:  s.sphinx.ExtractErrorEncrypter,
808
                FetchLastChannelUpdate: s.fetchLastChanUpdate(),
809
                Notifier:               s.cc.ChainNotifier,
810
                HtlcNotifier:           s.htlcNotifier,
811
                FwdEventTicker:         ticker.New(htlcswitch.DefaultFwdEventInterval),
812
                LogEventTicker:         ticker.New(htlcswitch.DefaultLogInterval),
813
                AckEventTicker:         ticker.New(htlcswitch.DefaultAckInterval),
814
                AllowCircularRoute:     cfg.AllowCircularRoute,
815
                RejectHTLC:             cfg.RejectHTLC,
816
                Clock:                  clock.NewDefaultClock(),
817
                MailboxDeliveryTimeout: cfg.Htlcswitch.MailboxDeliveryTimeout,
818
                MaxFeeExposure:         thresholdMSats,
819
                SignAliasUpdate:        s.signAliasUpdate,
820
                IsAlias:                aliasmgr.IsAlias,
821
        }, uint32(currentHeight))
822
        if err != nil {
3✔
823
                return nil, err
×
824
        }
×
825
        s.interceptableSwitch, err = htlcswitch.NewInterceptableSwitch(
3✔
826
                &htlcswitch.InterceptableSwitchConfig{
3✔
827
                        Switch:             s.htlcSwitch,
3✔
828
                        CltvRejectDelta:    lncfg.DefaultFinalCltvRejectDelta,
3✔
829
                        CltvInterceptDelta: lncfg.DefaultCltvInterceptDelta,
3✔
830
                        RequireInterceptor: s.cfg.RequireInterceptor,
3✔
831
                        Notifier:           s.cc.ChainNotifier,
3✔
832
                },
3✔
833
        )
3✔
834
        if err != nil {
3✔
835
                return nil, err
×
836
        }
×
837

838
        s.witnessBeacon = newPreimageBeacon(
3✔
839
                dbs.ChanStateDB.NewWitnessCache(),
3✔
840
                s.interceptableSwitch.ForwardPacket,
3✔
841
        )
3✔
842

3✔
843
        chanStatusMgrCfg := &netann.ChanStatusConfig{
3✔
844
                ChanStatusSampleInterval: cfg.ChanStatusSampleInterval,
3✔
845
                ChanEnableTimeout:        cfg.ChanEnableTimeout,
3✔
846
                ChanDisableTimeout:       cfg.ChanDisableTimeout,
3✔
847
                OurPubKey:                nodeKeyDesc.PubKey,
3✔
848
                OurKeyLoc:                nodeKeyDesc.KeyLocator,
3✔
849
                MessageSigner:            s.nodeSigner,
3✔
850
                IsChannelActive:          s.htlcSwitch.HasActiveLink,
3✔
851
                ApplyChannelUpdate:       s.applyChannelUpdate,
3✔
852
                DB:                       s.chanStateDB,
3✔
853
                Graph:                    dbs.GraphDB,
3✔
854
        }
3✔
855

3✔
856
        chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg)
3✔
857
        if err != nil {
3✔
858
                return nil, err
×
859
        }
×
860
        s.chanStatusMgr = chanStatusMgr
3✔
861

3✔
862
        // If enabled, use either UPnP or NAT-PMP to automatically configure
3✔
863
        // port forwarding for users behind a NAT.
3✔
864
        if cfg.NAT {
3✔
865
                srvrLog.Info("Scanning local network for a UPnP enabled device")
×
866

×
867
                discoveryTimeout := time.Duration(10 * time.Second)
×
868

×
869
                ctx, cancel := context.WithTimeout(
×
870
                        context.Background(), discoveryTimeout,
×
871
                )
×
872
                defer cancel()
×
873
                upnp, err := nat.DiscoverUPnP(ctx)
×
874
                if err == nil {
×
875
                        s.natTraversal = upnp
×
876
                } else {
×
877
                        // If we were not able to discover a UPnP enabled device
×
878
                        // on the local network, we'll fall back to attempting
×
879
                        // to discover a NAT-PMP enabled device.
×
880
                        srvrLog.Errorf("Unable to discover a UPnP enabled "+
×
881
                                "device on the local network: %v", err)
×
882

×
883
                        srvrLog.Info("Scanning local network for a NAT-PMP " +
×
884
                                "enabled device")
×
885

×
886
                        pmp, err := nat.DiscoverPMP(discoveryTimeout)
×
887
                        if err != nil {
×
888
                                err := fmt.Errorf("unable to discover a "+
×
889
                                        "NAT-PMP enabled device on the local "+
×
890
                                        "network: %v", err)
×
891
                                srvrLog.Error(err)
×
892
                                return nil, err
×
893
                        }
×
894

895
                        s.natTraversal = pmp
×
896
                }
897
        }
898

899
        // If we were requested to automatically configure port forwarding,
900
        // we'll use the ports that the server will be listening on.
901
        externalIPStrings := make([]string, len(cfg.ExternalIPs))
3✔
902
        for idx, ip := range cfg.ExternalIPs {
6✔
903
                externalIPStrings[idx] = ip.String()
3✔
904
        }
3✔
905
        if s.natTraversal != nil {
3✔
906
                listenPorts := make([]uint16, 0, len(listenAddrs))
×
907
                for _, listenAddr := range listenAddrs {
×
908
                        // At this point, the listen addresses should have
×
909
                        // already been normalized, so it's safe to ignore the
×
910
                        // errors.
×
911
                        _, portStr, _ := net.SplitHostPort(listenAddr.String())
×
912
                        port, _ := strconv.Atoi(portStr)
×
913

×
914
                        listenPorts = append(listenPorts, uint16(port))
×
915
                }
×
916

917
                ips, err := s.configurePortForwarding(listenPorts...)
×
918
                if err != nil {
×
919
                        srvrLog.Errorf("Unable to automatically set up port "+
×
920
                                "forwarding using %s: %v",
×
921
                                s.natTraversal.Name(), err)
×
922
                } else {
×
923
                        srvrLog.Infof("Automatically set up port forwarding "+
×
924
                                "using %s to advertise external IP",
×
925
                                s.natTraversal.Name())
×
926
                        externalIPStrings = append(externalIPStrings, ips...)
×
927
                }
×
928
        }
929

930
        // If external IP addresses have been specified, add those to the list
931
        // of this server's addresses.
932
        externalIPs, err := lncfg.NormalizeAddresses(
3✔
933
                externalIPStrings, strconv.Itoa(defaultPeerPort),
3✔
934
                cfg.net.ResolveTCPAddr,
3✔
935
        )
3✔
936
        if err != nil {
3✔
937
                return nil, err
×
938
        }
×
939

940
        selfAddrs := make([]net.Addr, 0, len(externalIPs))
3✔
941
        selfAddrs = append(selfAddrs, externalIPs...)
3✔
942

3✔
943
        // We'll now reconstruct a node announcement based on our current
3✔
944
        // configuration so we can send it out as a sort of heart beat within
3✔
945
        // the network.
3✔
946
        //
3✔
947
        // We'll start by parsing the node color from configuration.
3✔
948
        color, err := lncfg.ParseHexColor(cfg.Color)
3✔
949
        if err != nil {
3✔
950
                srvrLog.Errorf("unable to parse color: %v\n", err)
×
951
                return nil, err
×
952
        }
×
953

954
        // If no alias is provided, default to first 10 characters of public
955
        // key.
956
        alias := cfg.Alias
3✔
957
        if alias == "" {
6✔
958
                alias = hex.EncodeToString(serializedPubKey[:10])
3✔
959
        }
3✔
960
        nodeAlias, err := lnwire.NewNodeAlias(alias)
3✔
961
        if err != nil {
3✔
962
                return nil, err
×
963
        }
×
964

965
        // TODO(elle): All previously persisted node announcement fields (ie,
966
        //  not just LastUpdate) should be consulted here to ensure that we
967
        //  aren't overwriting any fields that may have been set during the
968
        //  last run of lnd.
969
        nodeLastUpdate := time.Now()
3✔
970
        srcNode, err := dbs.GraphDB.SourceNode(ctx)
3✔
971
        switch {
3✔
972
        // If we have a source node persisted in the DB already, then we just
973
        // need to make sure that the new LastUpdate time is at least one
974
        // second after the last update time.
975
        case err == nil:
3✔
976
                if srcNode.LastUpdate.Second() >= nodeLastUpdate.Second() {
6✔
977
                        nodeLastUpdate = srcNode.LastUpdate.Add(time.Second)
3✔
978
                }
3✔
979

980
        // If we don't have a source node persisted in the DB, then we'll
981
        // create a new one with the current time as the LastUpdate.
982
        case errors.Is(err, graphdb.ErrSourceNodeNotSet):
3✔
983

984
        // If the above cases are not matched, then we have an unhandled non
985
        // nil error.
986
        default:
×
987
                return nil, fmt.Errorf("unable to fetch source node: %w", err)
×
988
        }
989

990
        selfNode := &models.LightningNode{
3✔
991
                HaveNodeAnnouncement: true,
3✔
992
                LastUpdate:           nodeLastUpdate,
3✔
993
                Addresses:            selfAddrs,
3✔
994
                Alias:                nodeAlias.String(),
3✔
995
                Features:             s.featureMgr.Get(feature.SetNodeAnn),
3✔
996
                Color:                color,
3✔
997
        }
3✔
998
        copy(selfNode.PubKeyBytes[:], nodeKeyDesc.PubKey.SerializeCompressed())
3✔
999

3✔
1000
        // Based on the disk representation of the node announcement generated
3✔
1001
        // above, we'll generate a node announcement that can go out on the
3✔
1002
        // network so we can properly sign it.
3✔
1003
        nodeAnn, err := selfNode.NodeAnnouncement(false)
3✔
1004
        if err != nil {
3✔
1005
                return nil, fmt.Errorf("unable to gen self node ann: %w", err)
×
1006
        }
×
1007

1008
        // With the announcement generated, we'll sign it to properly
1009
        // authenticate the message on the network.
1010
        authSig, err := netann.SignAnnouncement(
3✔
1011
                s.nodeSigner, nodeKeyDesc.KeyLocator, nodeAnn,
3✔
1012
        )
3✔
1013
        if err != nil {
3✔
1014
                return nil, fmt.Errorf("unable to generate signature for "+
×
1015
                        "self node announcement: %v", err)
×
1016
        }
×
1017
        selfNode.AuthSigBytes = authSig.Serialize()
3✔
1018
        nodeAnn.Signature, err = lnwire.NewSigFromECDSARawSignature(
3✔
1019
                selfNode.AuthSigBytes,
3✔
1020
        )
3✔
1021
        if err != nil {
3✔
1022
                return nil, err
×
1023
        }
×
1024

1025
        // Finally, we'll update the representation on disk, and update our
1026
        // cached in-memory version as well.
1027
        if err := dbs.GraphDB.SetSourceNode(ctx, selfNode); err != nil {
3✔
1028
                return nil, fmt.Errorf("can't set self node: %w", err)
×
1029
        }
×
1030
        s.currentNodeAnn = nodeAnn
3✔
1031

3✔
1032
        // The router will get access to the payment ID sequencer, such that it
3✔
1033
        // can generate unique payment IDs.
3✔
1034
        sequencer, err := htlcswitch.NewPersistentSequencer(dbs.ChanStateDB)
3✔
1035
        if err != nil {
3✔
1036
                return nil, err
×
1037
        }
×
1038

1039
        // Instantiate mission control with config from the sub server.
1040
        //
1041
        // TODO(joostjager): When we are further in the process of moving to sub
1042
        // servers, the mission control instance itself can be moved there too.
1043
        routingConfig := routerrpc.GetRoutingConfig(cfg.SubRPCServers.RouterRPC)
3✔
1044

3✔
1045
        // We only initialize a probability estimator if there's no custom one.
3✔
1046
        var estimator routing.Estimator
3✔
1047
        if cfg.Estimator != nil {
3✔
1048
                estimator = cfg.Estimator
×
1049
        } else {
3✔
1050
                switch routingConfig.ProbabilityEstimatorType {
3✔
1051
                case routing.AprioriEstimatorName:
3✔
1052
                        aCfg := routingConfig.AprioriConfig
3✔
1053
                        aprioriConfig := routing.AprioriConfig{
3✔
1054
                                AprioriHopProbability: aCfg.HopProbability,
3✔
1055
                                PenaltyHalfLife:       aCfg.PenaltyHalfLife,
3✔
1056
                                AprioriWeight:         aCfg.Weight,
3✔
1057
                                CapacityFraction:      aCfg.CapacityFraction,
3✔
1058
                        }
3✔
1059

3✔
1060
                        estimator, err = routing.NewAprioriEstimator(
3✔
1061
                                aprioriConfig,
3✔
1062
                        )
3✔
1063
                        if err != nil {
3✔
1064
                                return nil, err
×
1065
                        }
×
1066

1067
                case routing.BimodalEstimatorName:
×
1068
                        bCfg := routingConfig.BimodalConfig
×
1069
                        bimodalConfig := routing.BimodalConfig{
×
1070
                                BimodalNodeWeight: bCfg.NodeWeight,
×
1071
                                BimodalScaleMsat: lnwire.MilliSatoshi(
×
1072
                                        bCfg.Scale,
×
1073
                                ),
×
1074
                                BimodalDecayTime: bCfg.DecayTime,
×
1075
                        }
×
1076

×
1077
                        estimator, err = routing.NewBimodalEstimator(
×
1078
                                bimodalConfig,
×
1079
                        )
×
1080
                        if err != nil {
×
1081
                                return nil, err
×
1082
                        }
×
1083

1084
                default:
×
1085
                        return nil, fmt.Errorf("unknown estimator type %v",
×
1086
                                routingConfig.ProbabilityEstimatorType)
×
1087
                }
1088
        }
1089

1090
        mcCfg := &routing.MissionControlConfig{
3✔
1091
                OnConfigUpdate:          fn.Some(s.UpdateRoutingConfig),
3✔
1092
                Estimator:               estimator,
3✔
1093
                MaxMcHistory:            routingConfig.MaxMcHistory,
3✔
1094
                McFlushInterval:         routingConfig.McFlushInterval,
3✔
1095
                MinFailureRelaxInterval: routing.DefaultMinFailureRelaxInterval,
3✔
1096
        }
3✔
1097

3✔
1098
        s.missionController, err = routing.NewMissionController(
3✔
1099
                dbs.ChanStateDB, selfNode.PubKeyBytes, mcCfg,
3✔
1100
        )
3✔
1101
        if err != nil {
3✔
1102
                return nil, fmt.Errorf("can't create mission control "+
×
1103
                        "manager: %w", err)
×
1104
        }
×
1105
        s.defaultMC, err = s.missionController.GetNamespacedStore(
3✔
1106
                routing.DefaultMissionControlNamespace,
3✔
1107
        )
3✔
1108
        if err != nil {
3✔
1109
                return nil, fmt.Errorf("can't create mission control in the "+
×
1110
                        "default namespace: %w", err)
×
1111
        }
×
1112

1113
        srvrLog.Debugf("Instantiating payment session source with config: "+
3✔
1114
                "AttemptCost=%v + %v%%, MinRouteProbability=%v",
3✔
1115
                int64(routingConfig.AttemptCost),
3✔
1116
                float64(routingConfig.AttemptCostPPM)/10000,
3✔
1117
                routingConfig.MinRouteProbability)
3✔
1118

3✔
1119
        pathFindingConfig := routing.PathFindingConfig{
3✔
1120
                AttemptCost: lnwire.NewMSatFromSatoshis(
3✔
1121
                        routingConfig.AttemptCost,
3✔
1122
                ),
3✔
1123
                AttemptCostPPM: routingConfig.AttemptCostPPM,
3✔
1124
                MinProbability: routingConfig.MinRouteProbability,
3✔
1125
        }
3✔
1126

3✔
1127
        sourceNode, err := dbs.GraphDB.SourceNode(ctx)
3✔
1128
        if err != nil {
3✔
1129
                return nil, fmt.Errorf("error getting source node: %w", err)
×
1130
        }
×
1131
        paymentSessionSource := &routing.SessionSource{
3✔
1132
                GraphSessionFactory: dbs.GraphDB,
3✔
1133
                SourceNode:          sourceNode,
3✔
1134
                MissionControl:      s.defaultMC,
3✔
1135
                GetLink:             s.htlcSwitch.GetLinkByShortID,
3✔
1136
                PathFindingConfig:   pathFindingConfig,
3✔
1137
        }
3✔
1138

3✔
1139
        s.controlTower = routing.NewControlTower(dbs.KVPaymentsDB)
3✔
1140

3✔
1141
        strictPruning := cfg.Bitcoin.Node == "neutrino" ||
3✔
1142
                cfg.Routing.StrictZombiePruning
3✔
1143

3✔
1144
        s.graphBuilder, err = graph.NewBuilder(&graph.Config{
3✔
1145
                SelfNode:            selfNode.PubKeyBytes,
3✔
1146
                Graph:               dbs.GraphDB,
3✔
1147
                Chain:               cc.ChainIO,
3✔
1148
                ChainView:           cc.ChainView,
3✔
1149
                Notifier:            cc.ChainNotifier,
3✔
1150
                ChannelPruneExpiry:  graph.DefaultChannelPruneExpiry,
3✔
1151
                GraphPruneInterval:  time.Hour,
3✔
1152
                FirstTimePruneDelay: graph.DefaultFirstTimePruneDelay,
3✔
1153
                AssumeChannelValid:  cfg.Routing.AssumeChannelValid,
3✔
1154
                StrictZombiePruning: strictPruning,
3✔
1155
                IsAlias:             aliasmgr.IsAlias,
3✔
1156
        })
3✔
1157
        if err != nil {
3✔
1158
                return nil, fmt.Errorf("can't create graph builder: %w", err)
×
1159
        }
×
1160

1161
        s.chanRouter, err = routing.New(routing.Config{
3✔
1162
                SelfNode:           selfNode.PubKeyBytes,
3✔
1163
                RoutingGraph:       dbs.GraphDB,
3✔
1164
                Chain:              cc.ChainIO,
3✔
1165
                Payer:              s.htlcSwitch,
3✔
1166
                Control:            s.controlTower,
3✔
1167
                MissionControl:     s.defaultMC,
3✔
1168
                SessionSource:      paymentSessionSource,
3✔
1169
                GetLink:            s.htlcSwitch.GetLinkByShortID,
3✔
1170
                NextPaymentID:      sequencer.NextID,
3✔
1171
                PathFindingConfig:  pathFindingConfig,
3✔
1172
                Clock:              clock.NewDefaultClock(),
3✔
1173
                ApplyChannelUpdate: s.graphBuilder.ApplyChannelUpdate,
3✔
1174
                ClosedSCIDs:        s.fetchClosedChannelSCIDs(),
3✔
1175
                TrafficShaper:      implCfg.TrafficShaper,
3✔
1176
        })
3✔
1177
        if err != nil {
3✔
1178
                return nil, fmt.Errorf("can't create router: %w", err)
×
1179
        }
×
1180

1181
        chanSeries := discovery.NewChanSeries(s.graphDB)
3✔
1182
        gossipMessageStore, err := discovery.NewMessageStore(dbs.ChanStateDB)
3✔
1183
        if err != nil {
3✔
1184
                return nil, err
×
1185
        }
×
1186
        waitingProofStore, err := channeldb.NewWaitingProofStore(dbs.ChanStateDB)
3✔
1187
        if err != nil {
3✔
1188
                return nil, err
×
1189
        }
×
1190

1191
        scidCloserMan := discovery.NewScidCloserMan(s.graphDB, s.chanStateDB)
3✔
1192

3✔
1193
        s.authGossiper = discovery.New(discovery.Config{
3✔
1194
                Graph:                 s.graphBuilder,
3✔
1195
                ChainIO:               s.cc.ChainIO,
3✔
1196
                Notifier:              s.cc.ChainNotifier,
3✔
1197
                ChainHash:             *s.cfg.ActiveNetParams.GenesisHash,
3✔
1198
                Broadcast:             s.BroadcastMessage,
3✔
1199
                ChanSeries:            chanSeries,
3✔
1200
                NotifyWhenOnline:      s.NotifyWhenOnline,
3✔
1201
                NotifyWhenOffline:     s.NotifyWhenOffline,
3✔
1202
                FetchSelfAnnouncement: s.getNodeAnnouncement,
3✔
1203
                UpdateSelfAnnouncement: func() (lnwire.NodeAnnouncement,
3✔
1204
                        error) {
3✔
1205

×
1206
                        return s.genNodeAnnouncement(nil)
×
1207
                },
×
1208
                ProofMatureDelta:        cfg.Gossip.AnnouncementConf,
1209
                TrickleDelay:            time.Millisecond * time.Duration(cfg.TrickleDelay),
1210
                RetransmitTicker:        ticker.New(time.Minute * 30),
1211
                RebroadcastInterval:     time.Hour * 24,
1212
                WaitingProofStore:       waitingProofStore,
1213
                MessageStore:            gossipMessageStore,
1214
                AnnSigner:               s.nodeSigner,
1215
                RotateTicker:            ticker.New(discovery.DefaultSyncerRotationInterval),
1216
                HistoricalSyncTicker:    ticker.New(cfg.HistoricalSyncInterval),
1217
                NumActiveSyncers:        cfg.NumGraphSyncPeers,
1218
                NoTimestampQueries:      cfg.ProtocolOptions.NoTimestampQueryOption, //nolint:ll
1219
                MinimumBatchSize:        10,
1220
                SubBatchDelay:           cfg.Gossip.SubBatchDelay,
1221
                IgnoreHistoricalFilters: cfg.IgnoreHistoricalGossipFilters,
1222
                PinnedSyncers:           cfg.Gossip.PinnedSyncers,
1223
                MaxChannelUpdateBurst:   cfg.Gossip.MaxChannelUpdateBurst,
1224
                ChannelUpdateInterval:   cfg.Gossip.ChannelUpdateInterval,
1225
                IsAlias:                 aliasmgr.IsAlias,
1226
                SignAliasUpdate:         s.signAliasUpdate,
1227
                FindBaseByAlias:         s.aliasMgr.FindBaseSCID,
1228
                GetAlias:                s.aliasMgr.GetPeerAlias,
1229
                FindChannel:             s.findChannel,
1230
                IsStillZombieChannel:    s.graphBuilder.IsZombieChannel,
1231
                ScidCloser:              scidCloserMan,
1232
                AssumeChannelValid:      cfg.Routing.AssumeChannelValid,
1233
                MsgRateBytes:            cfg.Gossip.MsgRateBytes,
1234
                MsgBurstBytes:           cfg.Gossip.MsgBurstBytes,
1235
                FilterConcurrency:       cfg.Gossip.FilterConcurrency,
1236
        }, nodeKeyDesc)
1237

1238
        accessCfg := &accessManConfig{
3✔
1239
                initAccessPerms: func() (map[string]channeldb.ChanCount,
3✔
1240
                        error) {
6✔
1241

3✔
1242
                        genesisHash := *s.cfg.ActiveNetParams.GenesisHash
3✔
1243
                        return s.chanStateDB.FetchPermAndTempPeers(
3✔
1244
                                genesisHash[:],
3✔
1245
                        )
3✔
1246
                },
3✔
1247
                shouldDisconnect:   s.authGossiper.ShouldDisconnect,
1248
                maxRestrictedSlots: int64(s.cfg.NumRestrictedSlots),
1249
        }
1250

1251
        peerAccessMan, err := newAccessMan(accessCfg)
3✔
1252
        if err != nil {
3✔
1253
                return nil, err
×
1254
        }
×
1255

1256
        s.peerAccessMan = peerAccessMan
3✔
1257

3✔
1258
        selfVertex := route.Vertex(nodeKeyDesc.PubKey.SerializeCompressed())
3✔
1259
        //nolint:ll
3✔
1260
        s.localChanMgr = &localchans.Manager{
3✔
1261
                SelfPub:              nodeKeyDesc.PubKey,
3✔
1262
                DefaultRoutingPolicy: cc.RoutingPolicy,
3✔
1263
                ForAllOutgoingChannels: func(ctx context.Context,
3✔
1264
                        cb func(*models.ChannelEdgeInfo,
3✔
1265
                                *models.ChannelEdgePolicy) error,
3✔
1266
                        reset func()) error {
6✔
1267

3✔
1268
                        return s.graphDB.ForEachNodeChannel(ctx, selfVertex,
3✔
1269
                                func(c *models.ChannelEdgeInfo,
3✔
1270
                                        e *models.ChannelEdgePolicy,
3✔
1271
                                        _ *models.ChannelEdgePolicy) error {
6✔
1272

3✔
1273
                                        // NOTE: The invoked callback here may
3✔
1274
                                        // receive a nil channel policy.
3✔
1275
                                        return cb(c, e)
3✔
1276
                                }, reset,
3✔
1277
                        )
1278
                },
1279
                PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
1280
                UpdateForwardingPolicies:  s.htlcSwitch.UpdateForwardingPolicies,
1281
                FetchChannel:              s.chanStateDB.FetchChannel,
1282
                AddEdge: func(ctx context.Context,
1283
                        edge *models.ChannelEdgeInfo) error {
×
1284

×
1285
                        return s.graphBuilder.AddEdge(ctx, edge)
×
1286
                },
×
1287
        }
1288

1289
        utxnStore, err := contractcourt.NewNurseryStore(
3✔
1290
                s.cfg.ActiveNetParams.GenesisHash, dbs.ChanStateDB,
3✔
1291
        )
3✔
1292
        if err != nil {
3✔
1293
                srvrLog.Errorf("unable to create nursery store: %v", err)
×
1294
                return nil, err
×
1295
        }
×
1296

1297
        sweeperStore, err := sweep.NewSweeperStore(
3✔
1298
                dbs.ChanStateDB, s.cfg.ActiveNetParams.GenesisHash,
3✔
1299
        )
3✔
1300
        if err != nil {
3✔
1301
                srvrLog.Errorf("unable to create sweeper store: %v", err)
×
1302
                return nil, err
×
1303
        }
×
1304

1305
        aggregator := sweep.NewBudgetAggregator(
3✔
1306
                cc.FeeEstimator, sweep.DefaultMaxInputsPerTx,
3✔
1307
                s.implCfg.AuxSweeper,
3✔
1308
        )
3✔
1309

3✔
1310
        s.txPublisher = sweep.NewTxPublisher(sweep.TxPublisherConfig{
3✔
1311
                Signer:     cc.Wallet.Cfg.Signer,
3✔
1312
                Wallet:     cc.Wallet,
3✔
1313
                Estimator:  cc.FeeEstimator,
3✔
1314
                Notifier:   cc.ChainNotifier,
3✔
1315
                AuxSweeper: s.implCfg.AuxSweeper,
3✔
1316
        })
3✔
1317

3✔
1318
        s.sweeper = sweep.New(&sweep.UtxoSweeperConfig{
3✔
1319
                FeeEstimator: cc.FeeEstimator,
3✔
1320
                GenSweepScript: newSweepPkScriptGen(
3✔
1321
                        cc.Wallet, s.cfg.ActiveNetParams.Params,
3✔
1322
                ),
3✔
1323
                Signer:               cc.Wallet.Cfg.Signer,
3✔
1324
                Wallet:               newSweeperWallet(cc.Wallet),
3✔
1325
                Mempool:              cc.MempoolNotifier,
3✔
1326
                Notifier:             cc.ChainNotifier,
3✔
1327
                Store:                sweeperStore,
3✔
1328
                MaxInputsPerTx:       sweep.DefaultMaxInputsPerTx,
3✔
1329
                MaxFeeRate:           cfg.Sweeper.MaxFeeRate,
3✔
1330
                Aggregator:           aggregator,
3✔
1331
                Publisher:            s.txPublisher,
3✔
1332
                NoDeadlineConfTarget: cfg.Sweeper.NoDeadlineConfTarget,
3✔
1333
        })
3✔
1334

3✔
1335
        s.utxoNursery = contractcourt.NewUtxoNursery(&contractcourt.NurseryConfig{
3✔
1336
                ChainIO:             cc.ChainIO,
3✔
1337
                ConfDepth:           1,
3✔
1338
                FetchClosedChannels: s.chanStateDB.FetchClosedChannels,
3✔
1339
                FetchClosedChannel:  s.chanStateDB.FetchClosedChannel,
3✔
1340
                Notifier:            cc.ChainNotifier,
3✔
1341
                PublishTransaction:  cc.Wallet.PublishTransaction,
3✔
1342
                Store:               utxnStore,
3✔
1343
                SweepInput:          s.sweeper.SweepInput,
3✔
1344
                Budget:              s.cfg.Sweeper.Budget,
3✔
1345
        })
3✔
1346

3✔
1347
        // Construct a closure that wraps the htlcswitch's CloseLink method.
3✔
1348
        closeLink := func(chanPoint *wire.OutPoint,
3✔
1349
                closureType contractcourt.ChannelCloseType) {
6✔
1350
                // TODO(conner): Properly respect the update and error channels
3✔
1351
                // returned by CloseLink.
3✔
1352

3✔
1353
                // Instruct the switch to close the channel.  Provide no close out
3✔
1354
                // delivery script or target fee per kw because user input is not
3✔
1355
                // available when the remote peer closes the channel.
3✔
1356
                s.htlcSwitch.CloseLink(
3✔
1357
                        context.Background(), chanPoint, closureType, 0, 0, nil,
3✔
1358
                )
3✔
1359
        }
3✔
1360

1361
        // We will use the following channel to reliably hand off contract
1362
        // breach events from the ChannelArbitrator to the BreachArbitrator,
1363
        contractBreaches := make(chan *contractcourt.ContractBreachEvent, 1)
3✔
1364

3✔
1365
        s.breachArbitrator = contractcourt.NewBreachArbitrator(
3✔
1366
                &contractcourt.BreachConfig{
3✔
1367
                        CloseLink: closeLink,
3✔
1368
                        DB:        s.chanStateDB,
3✔
1369
                        Estimator: s.cc.FeeEstimator,
3✔
1370
                        GenSweepScript: newSweepPkScriptGen(
3✔
1371
                                cc.Wallet, s.cfg.ActiveNetParams.Params,
3✔
1372
                        ),
3✔
1373
                        Notifier:           cc.ChainNotifier,
3✔
1374
                        PublishTransaction: cc.Wallet.PublishTransaction,
3✔
1375
                        ContractBreaches:   contractBreaches,
3✔
1376
                        Signer:             cc.Wallet.Cfg.Signer,
3✔
1377
                        Store: contractcourt.NewRetributionStore(
3✔
1378
                                dbs.ChanStateDB,
3✔
1379
                        ),
3✔
1380
                        AuxSweeper: s.implCfg.AuxSweeper,
3✔
1381
                },
3✔
1382
        )
3✔
1383

3✔
1384
        //nolint:ll
3✔
1385
        s.chainArb = contractcourt.NewChainArbitrator(contractcourt.ChainArbitratorConfig{
3✔
1386
                ChainHash:              *s.cfg.ActiveNetParams.GenesisHash,
3✔
1387
                IncomingBroadcastDelta: lncfg.DefaultIncomingBroadcastDelta,
3✔
1388
                OutgoingBroadcastDelta: lncfg.DefaultOutgoingBroadcastDelta,
3✔
1389
                NewSweepAddr: func() ([]byte, error) {
3✔
1390
                        addr, err := newSweepPkScriptGen(
×
1391
                                cc.Wallet, netParams,
×
1392
                        )().Unpack()
×
1393
                        if err != nil {
×
1394
                                return nil, err
×
1395
                        }
×
1396

1397
                        return addr.DeliveryAddress, nil
×
1398
                },
1399
                PublishTx: cc.Wallet.PublishTransaction,
1400
                DeliverResolutionMsg: func(msgs ...contractcourt.ResolutionMsg) error {
3✔
1401
                        for _, msg := range msgs {
6✔
1402
                                err := s.htlcSwitch.ProcessContractResolution(msg)
3✔
1403
                                if err != nil {
3✔
1404
                                        return err
×
1405
                                }
×
1406
                        }
1407
                        return nil
3✔
1408
                },
1409
                IncubateOutputs: func(chanPoint wire.OutPoint,
1410
                        outHtlcRes fn.Option[lnwallet.OutgoingHtlcResolution],
1411
                        inHtlcRes fn.Option[lnwallet.IncomingHtlcResolution],
1412
                        broadcastHeight uint32,
1413
                        deadlineHeight fn.Option[int32]) error {
3✔
1414

3✔
1415
                        return s.utxoNursery.IncubateOutputs(
3✔
1416
                                chanPoint, outHtlcRes, inHtlcRes,
3✔
1417
                                broadcastHeight, deadlineHeight,
3✔
1418
                        )
3✔
1419
                },
3✔
1420
                PreimageDB:   s.witnessBeacon,
1421
                Notifier:     cc.ChainNotifier,
1422
                Mempool:      cc.MempoolNotifier,
1423
                Signer:       cc.Wallet.Cfg.Signer,
1424
                FeeEstimator: cc.FeeEstimator,
1425
                ChainIO:      cc.ChainIO,
1426
                MarkLinkInactive: func(chanPoint wire.OutPoint) error {
3✔
1427
                        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
1428
                        s.htlcSwitch.RemoveLink(chanID)
3✔
1429
                        return nil
3✔
1430
                },
3✔
1431
                IsOurAddress: cc.Wallet.IsOurAddress,
1432
                ContractBreach: func(chanPoint wire.OutPoint,
1433
                        breachRet *lnwallet.BreachRetribution) error {
3✔
1434

3✔
1435
                        // processACK will handle the BreachArbitrator ACKing
3✔
1436
                        // the event.
3✔
1437
                        finalErr := make(chan error, 1)
3✔
1438
                        processACK := func(brarErr error) {
6✔
1439
                                if brarErr != nil {
3✔
1440
                                        finalErr <- brarErr
×
1441
                                        return
×
1442
                                }
×
1443

1444
                                // If the BreachArbitrator successfully handled
1445
                                // the event, we can signal that the handoff
1446
                                // was successful.
1447
                                finalErr <- nil
3✔
1448
                        }
1449

1450
                        event := &contractcourt.ContractBreachEvent{
3✔
1451
                                ChanPoint:         chanPoint,
3✔
1452
                                ProcessACK:        processACK,
3✔
1453
                                BreachRetribution: breachRet,
3✔
1454
                        }
3✔
1455

3✔
1456
                        // Send the contract breach event to the
3✔
1457
                        // BreachArbitrator.
3✔
1458
                        select {
3✔
1459
                        case contractBreaches <- event:
3✔
1460
                        case <-s.quit:
×
1461
                                return ErrServerShuttingDown
×
1462
                        }
1463

1464
                        // We'll wait for a final error to be available from
1465
                        // the BreachArbitrator.
1466
                        select {
3✔
1467
                        case err := <-finalErr:
3✔
1468
                                return err
3✔
1469
                        case <-s.quit:
×
1470
                                return ErrServerShuttingDown
×
1471
                        }
1472
                },
1473
                DisableChannel: func(chanPoint wire.OutPoint) error {
3✔
1474
                        return s.chanStatusMgr.RequestDisable(chanPoint, false)
3✔
1475
                },
3✔
1476
                Sweeper:                       s.sweeper,
1477
                Registry:                      s.invoices,
1478
                NotifyClosedChannel:           s.channelNotifier.NotifyClosedChannelEvent,
1479
                NotifyFullyResolvedChannel:    s.channelNotifier.NotifyFullyResolvedChannelEvent,
1480
                OnionProcessor:                s.sphinx,
1481
                PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
1482
                IsForwardedHTLC:               s.htlcSwitch.IsForwardedHTLC,
1483
                Clock:                         clock.NewDefaultClock(),
1484
                SubscribeBreachComplete:       s.breachArbitrator.SubscribeBreachComplete,
1485
                PutFinalHtlcOutcome:           s.chanStateDB.PutOnchainFinalHtlcOutcome,
1486
                HtlcNotifier:                  s.htlcNotifier,
1487
                Budget:                        *s.cfg.Sweeper.Budget,
1488

1489
                // TODO(yy): remove this hack once PaymentCircuit is interfaced.
1490
                QueryIncomingCircuit: func(
1491
                        circuit models.CircuitKey) *models.CircuitKey {
3✔
1492

3✔
1493
                        // Get the circuit map.
3✔
1494
                        circuits := s.htlcSwitch.CircuitLookup()
3✔
1495

3✔
1496
                        // Lookup the outgoing circuit.
3✔
1497
                        pc := circuits.LookupOpenCircuit(circuit)
3✔
1498
                        if pc == nil {
5✔
1499
                                return nil
2✔
1500
                        }
2✔
1501

1502
                        return &pc.Incoming
3✔
1503
                },
1504
                AuxLeafStore: implCfg.AuxLeafStore,
1505
                AuxSigner:    implCfg.AuxSigner,
1506
                AuxResolver:  implCfg.AuxContractResolver,
1507
        }, dbs.ChanStateDB)
1508

1509
        // Select the configuration and funding parameters for Bitcoin.
1510
        chainCfg := cfg.Bitcoin
3✔
1511
        minRemoteDelay := funding.MinBtcRemoteDelay
3✔
1512
        maxRemoteDelay := funding.MaxBtcRemoteDelay
3✔
1513

3✔
1514
        var chanIDSeed [32]byte
3✔
1515
        if _, err := rand.Read(chanIDSeed[:]); err != nil {
3✔
1516
                return nil, err
×
1517
        }
×
1518

1519
        // Wrap the DeleteChannelEdges method so that the funding manager can
1520
        // use it without depending on several layers of indirection.
1521
        deleteAliasEdge := func(scid lnwire.ShortChannelID) (
3✔
1522
                *models.ChannelEdgePolicy, error) {
6✔
1523

3✔
1524
                info, e1, e2, err := s.graphDB.FetchChannelEdgesByID(
3✔
1525
                        scid.ToUint64(),
3✔
1526
                )
3✔
1527
                if errors.Is(err, graphdb.ErrEdgeNotFound) {
3✔
1528
                        // This is unlikely but there is a slim chance of this
×
1529
                        // being hit if lnd was killed via SIGKILL and the
×
1530
                        // funding manager was stepping through the delete
×
1531
                        // alias edge logic.
×
1532
                        return nil, nil
×
1533
                } else if err != nil {
3✔
1534
                        return nil, err
×
1535
                }
×
1536

1537
                // Grab our key to find our policy.
1538
                var ourKey [33]byte
3✔
1539
                copy(ourKey[:], nodeKeyDesc.PubKey.SerializeCompressed())
3✔
1540

3✔
1541
                var ourPolicy *models.ChannelEdgePolicy
3✔
1542
                if info != nil && info.NodeKey1Bytes == ourKey {
6✔
1543
                        ourPolicy = e1
3✔
1544
                } else {
6✔
1545
                        ourPolicy = e2
3✔
1546
                }
3✔
1547

1548
                if ourPolicy == nil {
3✔
1549
                        // Something is wrong, so return an error.
×
1550
                        return nil, fmt.Errorf("we don't have an edge")
×
1551
                }
×
1552

1553
                err = s.graphDB.DeleteChannelEdges(
3✔
1554
                        false, false, scid.ToUint64(),
3✔
1555
                )
3✔
1556
                return ourPolicy, err
3✔
1557
        }
1558

1559
        // For the reservationTimeout and the zombieSweeperInterval different
1560
        // values are set in case we are in a dev environment so enhance test
1561
        // capacilities.
1562
        reservationTimeout := chanfunding.DefaultReservationTimeout
3✔
1563
        zombieSweeperInterval := lncfg.DefaultZombieSweeperInterval
3✔
1564

3✔
1565
        // Get the development config for funding manager. If we are not in
3✔
1566
        // development mode, this would be nil.
3✔
1567
        var devCfg *funding.DevConfig
3✔
1568
        if lncfg.IsDevBuild() {
6✔
1569
                devCfg = &funding.DevConfig{
3✔
1570
                        ProcessChannelReadyWait: cfg.Dev.ChannelReadyWait(),
3✔
1571
                        MaxWaitNumBlocksFundingConf: cfg.Dev.
3✔
1572
                                GetMaxWaitNumBlocksFundingConf(),
3✔
1573
                }
3✔
1574

3✔
1575
                reservationTimeout = cfg.Dev.GetReservationTimeout()
3✔
1576
                zombieSweeperInterval = cfg.Dev.GetZombieSweeperInterval()
3✔
1577

3✔
1578
                srvrLog.Debugf("Using the dev config for the fundingMgr: %v, "+
3✔
1579
                        "reservationTimeout=%v, zombieSweeperInterval=%v",
3✔
1580
                        devCfg, reservationTimeout, zombieSweeperInterval)
3✔
1581
        }
3✔
1582

1583
        //nolint:ll
1584
        s.fundingMgr, err = funding.NewFundingManager(funding.Config{
3✔
1585
                Dev:                devCfg,
3✔
1586
                NoWumboChans:       !cfg.ProtocolOptions.Wumbo(),
3✔
1587
                IDKey:              nodeKeyDesc.PubKey,
3✔
1588
                IDKeyLoc:           nodeKeyDesc.KeyLocator,
3✔
1589
                Wallet:             cc.Wallet,
3✔
1590
                PublishTransaction: cc.Wallet.PublishTransaction,
3✔
1591
                UpdateLabel: func(hash chainhash.Hash, label string) error {
6✔
1592
                        return cc.Wallet.LabelTransaction(hash, label, true)
3✔
1593
                },
3✔
1594
                Notifier:     cc.ChainNotifier,
1595
                ChannelDB:    s.chanStateDB,
1596
                FeeEstimator: cc.FeeEstimator,
1597
                SignMessage:  cc.MsgSigner.SignMessage,
1598
                CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement,
1599
                        error) {
3✔
1600

3✔
1601
                        return s.genNodeAnnouncement(nil)
3✔
1602
                },
3✔
1603
                SendAnnouncement:     s.authGossiper.ProcessLocalAnnouncement,
1604
                NotifyWhenOnline:     s.NotifyWhenOnline,
1605
                TempChanIDSeed:       chanIDSeed,
1606
                FindChannel:          s.findChannel,
1607
                DefaultRoutingPolicy: cc.RoutingPolicy,
1608
                DefaultMinHtlcIn:     cc.MinHtlcIn,
1609
                NumRequiredConfs: func(chanAmt btcutil.Amount,
1610
                        pushAmt lnwire.MilliSatoshi) uint16 {
3✔
1611
                        // For large channels we increase the number
3✔
1612
                        // of confirmations we require for the
3✔
1613
                        // channel to be considered open. As it is
3✔
1614
                        // always the responder that gets to choose
3✔
1615
                        // value, the pushAmt is value being pushed
3✔
1616
                        // to us. This means we have more to lose
3✔
1617
                        // in the case this gets re-orged out, and
3✔
1618
                        // we will require more confirmations before
3✔
1619
                        // we consider it open.
3✔
1620

3✔
1621
                        // In case the user has explicitly specified
3✔
1622
                        // a default value for the number of
3✔
1623
                        // confirmations, we use it.
3✔
1624
                        defaultConf := uint16(chainCfg.DefaultNumChanConfs)
3✔
1625
                        if defaultConf != 0 {
6✔
1626
                                return defaultConf
3✔
1627
                        }
3✔
1628

1629
                        minConf := uint64(3)
×
1630
                        maxConf := uint64(6)
×
1631

×
1632
                        // If this is a wumbo channel, then we'll require the
×
1633
                        // max amount of confirmations.
×
1634
                        if chanAmt > MaxFundingAmount {
×
1635
                                return uint16(maxConf)
×
1636
                        }
×
1637

1638
                        // If not we return a value scaled linearly
1639
                        // between 3 and 6, depending on channel size.
1640
                        // TODO(halseth): Use 1 as minimum?
1641
                        maxChannelSize := uint64(
×
1642
                                lnwire.NewMSatFromSatoshis(MaxFundingAmount))
×
1643
                        stake := lnwire.NewMSatFromSatoshis(chanAmt) + pushAmt
×
1644
                        conf := maxConf * uint64(stake) / maxChannelSize
×
1645
                        if conf < minConf {
×
1646
                                conf = minConf
×
1647
                        }
×
1648
                        if conf > maxConf {
×
1649
                                conf = maxConf
×
1650
                        }
×
1651
                        return uint16(conf)
×
1652
                },
1653
                RequiredRemoteDelay: func(chanAmt btcutil.Amount) uint16 {
3✔
1654
                        // We scale the remote CSV delay (the time the
3✔
1655
                        // remote have to claim funds in case of a unilateral
3✔
1656
                        // close) linearly from minRemoteDelay blocks
3✔
1657
                        // for small channels, to maxRemoteDelay blocks
3✔
1658
                        // for channels of size MaxFundingAmount.
3✔
1659

3✔
1660
                        // In case the user has explicitly specified
3✔
1661
                        // a default value for the remote delay, we
3✔
1662
                        // use it.
3✔
1663
                        defaultDelay := uint16(chainCfg.DefaultRemoteDelay)
3✔
1664
                        if defaultDelay > 0 {
6✔
1665
                                return defaultDelay
3✔
1666
                        }
3✔
1667

1668
                        // If this is a wumbo channel, then we'll require the
1669
                        // max value.
1670
                        if chanAmt > MaxFundingAmount {
×
1671
                                return maxRemoteDelay
×
1672
                        }
×
1673

1674
                        // If not we scale according to channel size.
1675
                        delay := uint16(btcutil.Amount(maxRemoteDelay) *
×
1676
                                chanAmt / MaxFundingAmount)
×
1677
                        if delay < minRemoteDelay {
×
1678
                                delay = minRemoteDelay
×
1679
                        }
×
1680
                        if delay > maxRemoteDelay {
×
1681
                                delay = maxRemoteDelay
×
1682
                        }
×
1683
                        return delay
×
1684
                },
1685
                WatchNewChannel: func(channel *channeldb.OpenChannel,
1686
                        peerKey *btcec.PublicKey) error {
3✔
1687

3✔
1688
                        // First, we'll mark this new peer as a persistent peer
3✔
1689
                        // for re-connection purposes. If the peer is not yet
3✔
1690
                        // tracked or the user hasn't requested it to be perm,
3✔
1691
                        // we'll set false to prevent the server from continuing
3✔
1692
                        // to connect to this peer even if the number of
3✔
1693
                        // channels with this peer is zero.
3✔
1694
                        s.mu.Lock()
3✔
1695
                        pubStr := string(peerKey.SerializeCompressed())
3✔
1696
                        if _, ok := s.persistentPeers[pubStr]; !ok {
6✔
1697
                                s.persistentPeers[pubStr] = false
3✔
1698
                        }
3✔
1699
                        s.mu.Unlock()
3✔
1700

3✔
1701
                        // With that taken care of, we'll send this channel to
3✔
1702
                        // the chain arb so it can react to on-chain events.
3✔
1703
                        return s.chainArb.WatchNewChannel(channel)
3✔
1704
                },
1705
                ReportShortChanID: func(chanPoint wire.OutPoint) error {
3✔
1706
                        cid := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
1707
                        return s.htlcSwitch.UpdateShortChanID(cid)
3✔
1708
                },
3✔
1709
                RequiredRemoteChanReserve: func(chanAmt,
1710
                        dustLimit btcutil.Amount) btcutil.Amount {
3✔
1711

3✔
1712
                        // By default, we'll require the remote peer to maintain
3✔
1713
                        // at least 1% of the total channel capacity at all
3✔
1714
                        // times. If this value ends up dipping below the dust
3✔
1715
                        // limit, then we'll use the dust limit itself as the
3✔
1716
                        // reserve as required by BOLT #2.
3✔
1717
                        reserve := chanAmt / 100
3✔
1718
                        if reserve < dustLimit {
6✔
1719
                                reserve = dustLimit
3✔
1720
                        }
3✔
1721

1722
                        return reserve
3✔
1723
                },
1724
                RequiredRemoteMaxValue: func(chanAmt btcutil.Amount) lnwire.MilliSatoshi {
3✔
1725
                        // By default, we'll allow the remote peer to fully
3✔
1726
                        // utilize the full bandwidth of the channel, minus our
3✔
1727
                        // required reserve.
3✔
1728
                        reserve := lnwire.NewMSatFromSatoshis(chanAmt / 100)
3✔
1729
                        return lnwire.NewMSatFromSatoshis(chanAmt) - reserve
3✔
1730
                },
3✔
1731
                RequiredRemoteMaxHTLCs: func(chanAmt btcutil.Amount) uint16 {
3✔
1732
                        if cfg.DefaultRemoteMaxHtlcs > 0 {
6✔
1733
                                return cfg.DefaultRemoteMaxHtlcs
3✔
1734
                        }
3✔
1735

1736
                        // By default, we'll permit them to utilize the full
1737
                        // channel bandwidth.
1738
                        return uint16(input.MaxHTLCNumber / 2)
×
1739
                },
1740
                ZombieSweeperInterval:         zombieSweeperInterval,
1741
                ReservationTimeout:            reservationTimeout,
1742
                MinChanSize:                   btcutil.Amount(cfg.MinChanSize),
1743
                MaxChanSize:                   btcutil.Amount(cfg.MaxChanSize),
1744
                MaxPendingChannels:            cfg.MaxPendingChannels,
1745
                RejectPush:                    cfg.RejectPush,
1746
                MaxLocalCSVDelay:              chainCfg.MaxLocalDelay,
1747
                NotifyOpenChannelEvent:        s.notifyOpenChannelPeerEvent,
1748
                OpenChannelPredicate:          chanPredicate,
1749
                NotifyPendingOpenChannelEvent: s.notifyPendingOpenChannelPeerEvent,
1750
                NotifyFundingTimeout:          s.notifyFundingTimeoutPeerEvent,
1751
                EnableUpfrontShutdown:         cfg.EnableUpfrontShutdown,
1752
                MaxAnchorsCommitFeeRate: chainfee.SatPerKVByte(
1753
                        s.cfg.MaxCommitFeeRateAnchors * 1000).FeePerKWeight(),
1754
                DeleteAliasEdge:      deleteAliasEdge,
1755
                AliasManager:         s.aliasMgr,
1756
                IsSweeperOutpoint:    s.sweeper.IsSweeperOutpoint,
1757
                AuxFundingController: implCfg.AuxFundingController,
1758
                AuxSigner:            implCfg.AuxSigner,
1759
                AuxResolver:          implCfg.AuxContractResolver,
1760
        })
1761
        if err != nil {
3✔
1762
                return nil, err
×
1763
        }
×
1764

1765
        // Next, we'll assemble the sub-system that will maintain an on-disk
1766
        // static backup of the latest channel state.
1767
        chanNotifier := &channelNotifier{
3✔
1768
                chanNotifier: s.channelNotifier,
3✔
1769
                addrs:        s.addrSource,
3✔
1770
        }
3✔
1771
        backupFile := chanbackup.NewMultiFile(
3✔
1772
                cfg.BackupFilePath, cfg.NoBackupArchive,
3✔
1773
        )
3✔
1774
        startingChans, err := chanbackup.FetchStaticChanBackups(
3✔
1775
                ctx, s.chanStateDB, s.addrSource,
3✔
1776
        )
3✔
1777
        if err != nil {
3✔
1778
                return nil, err
×
1779
        }
×
1780
        s.chanSubSwapper, err = chanbackup.NewSubSwapper(
3✔
1781
                ctx, startingChans, chanNotifier, s.cc.KeyRing, backupFile,
3✔
1782
        )
3✔
1783
        if err != nil {
3✔
1784
                return nil, err
×
1785
        }
×
1786

1787
        // Assemble a peer notifier which will provide clients with subscriptions
1788
        // to peer online and offline events.
1789
        s.peerNotifier = peernotifier.New()
3✔
1790

3✔
1791
        // Create a channel event store which monitors all open channels.
3✔
1792
        s.chanEventStore = chanfitness.NewChannelEventStore(&chanfitness.Config{
3✔
1793
                SubscribeChannelEvents: func() (subscribe.Subscription, error) {
6✔
1794
                        return s.channelNotifier.SubscribeChannelEvents()
3✔
1795
                },
3✔
1796
                SubscribePeerEvents: func() (subscribe.Subscription, error) {
3✔
1797
                        return s.peerNotifier.SubscribePeerEvents()
3✔
1798
                },
3✔
1799
                GetOpenChannels: s.chanStateDB.FetchAllOpenChannels,
1800
                Clock:           clock.NewDefaultClock(),
1801
                ReadFlapCount:   s.miscDB.ReadFlapCount,
1802
                WriteFlapCount:  s.miscDB.WriteFlapCounts,
1803
                FlapCountTicker: ticker.New(chanfitness.FlapCountFlushRate),
1804
        })
1805

1806
        if cfg.WtClient.Active {
6✔
1807
                policy := wtpolicy.DefaultPolicy()
3✔
1808
                policy.MaxUpdates = cfg.WtClient.MaxUpdates
3✔
1809

3✔
1810
                // We expose the sweep fee rate in sat/vbyte, but the tower
3✔
1811
                // protocol operations on sat/kw.
3✔
1812
                sweepRateSatPerVByte := chainfee.SatPerKVByte(
3✔
1813
                        1000 * cfg.WtClient.SweepFeeRate,
3✔
1814
                )
3✔
1815

3✔
1816
                policy.SweepFeeRate = sweepRateSatPerVByte.FeePerKWeight()
3✔
1817

3✔
1818
                if err := policy.Validate(); err != nil {
3✔
1819
                        return nil, err
×
1820
                }
×
1821

1822
                // authDial is the wrapper around the btrontide.Dial for the
1823
                // watchtower.
1824
                authDial := func(localKey keychain.SingleKeyECDH,
3✔
1825
                        netAddr *lnwire.NetAddress,
3✔
1826
                        dialer tor.DialFunc) (wtserver.Peer, error) {
6✔
1827

3✔
1828
                        return brontide.Dial(
3✔
1829
                                localKey, netAddr, cfg.ConnectionTimeout, dialer,
3✔
1830
                        )
3✔
1831
                }
3✔
1832

1833
                // buildBreachRetribution is a call-back that can be used to
1834
                // query the BreachRetribution info and channel type given a
1835
                // channel ID and commitment height.
1836
                buildBreachRetribution := func(chanID lnwire.ChannelID,
3✔
1837
                        commitHeight uint64) (*lnwallet.BreachRetribution,
3✔
1838
                        channeldb.ChannelType, error) {
6✔
1839

3✔
1840
                        channel, err := s.chanStateDB.FetchChannelByID(
3✔
1841
                                nil, chanID,
3✔
1842
                        )
3✔
1843
                        if err != nil {
3✔
1844
                                return nil, 0, err
×
1845
                        }
×
1846

1847
                        br, err := lnwallet.NewBreachRetribution(
3✔
1848
                                channel, commitHeight, 0, nil,
3✔
1849
                                implCfg.AuxLeafStore,
3✔
1850
                                implCfg.AuxContractResolver,
3✔
1851
                        )
3✔
1852
                        if err != nil {
3✔
1853
                                return nil, 0, err
×
1854
                        }
×
1855

1856
                        return br, channel.ChanType, nil
3✔
1857
                }
1858

1859
                fetchClosedChannel := s.chanStateDB.FetchClosedChannelForID
3✔
1860

3✔
1861
                // Copy the policy for legacy channels and set the blob flag
3✔
1862
                // signalling support for anchor channels.
3✔
1863
                anchorPolicy := policy
3✔
1864
                anchorPolicy.BlobType |= blob.Type(blob.FlagAnchorChannel)
3✔
1865

3✔
1866
                // Copy the policy for legacy channels and set the blob flag
3✔
1867
                // signalling support for taproot channels.
3✔
1868
                taprootPolicy := policy
3✔
1869
                taprootPolicy.TxPolicy.BlobType |= blob.Type(
3✔
1870
                        blob.FlagTaprootChannel,
3✔
1871
                )
3✔
1872

3✔
1873
                s.towerClientMgr, err = wtclient.NewManager(&wtclient.Config{
3✔
1874
                        FetchClosedChannel:     fetchClosedChannel,
3✔
1875
                        BuildBreachRetribution: buildBreachRetribution,
3✔
1876
                        SessionCloseRange:      cfg.WtClient.SessionCloseRange,
3✔
1877
                        ChainNotifier:          s.cc.ChainNotifier,
3✔
1878
                        SubscribeChannelEvents: func() (subscribe.Subscription,
3✔
1879
                                error) {
6✔
1880

3✔
1881
                                return s.channelNotifier.
3✔
1882
                                        SubscribeChannelEvents()
3✔
1883
                        },
3✔
1884
                        Signer: cc.Wallet.Cfg.Signer,
1885
                        NewAddress: func() ([]byte, error) {
3✔
1886
                                addr, err := newSweepPkScriptGen(
3✔
1887
                                        cc.Wallet, netParams,
3✔
1888
                                )().Unpack()
3✔
1889
                                if err != nil {
3✔
1890
                                        return nil, err
×
1891
                                }
×
1892

1893
                                return addr.DeliveryAddress, nil
3✔
1894
                        },
1895
                        SecretKeyRing:      s.cc.KeyRing,
1896
                        Dial:               cfg.net.Dial,
1897
                        AuthDial:           authDial,
1898
                        DB:                 dbs.TowerClientDB,
1899
                        ChainHash:          *s.cfg.ActiveNetParams.GenesisHash,
1900
                        MinBackoff:         10 * time.Second,
1901
                        MaxBackoff:         5 * time.Minute,
1902
                        MaxTasksInMemQueue: cfg.WtClient.MaxTasksInMemQueue,
1903
                }, policy, anchorPolicy, taprootPolicy)
1904
                if err != nil {
3✔
1905
                        return nil, err
×
1906
                }
×
1907
        }
1908

1909
        if len(cfg.ExternalHosts) != 0 {
3✔
1910
                advertisedIPs := make(map[string]struct{})
×
1911
                for _, addr := range s.currentNodeAnn.Addresses {
×
1912
                        advertisedIPs[addr.String()] = struct{}{}
×
1913
                }
×
1914

1915
                s.hostAnn = netann.NewHostAnnouncer(netann.HostAnnouncerConfig{
×
1916
                        Hosts:         cfg.ExternalHosts,
×
1917
                        RefreshTicker: ticker.New(defaultHostSampleInterval),
×
1918
                        LookupHost: func(host string) (net.Addr, error) {
×
1919
                                return lncfg.ParseAddressString(
×
1920
                                        host, strconv.Itoa(defaultPeerPort),
×
1921
                                        cfg.net.ResolveTCPAddr,
×
1922
                                )
×
1923
                        },
×
1924
                        AdvertisedIPs: advertisedIPs,
1925
                        AnnounceNewIPs: netann.IPAnnouncer(
1926
                                func(modifier ...netann.NodeAnnModifier) (
1927
                                        lnwire.NodeAnnouncement, error) {
×
1928

×
1929
                                        return s.genNodeAnnouncement(
×
1930
                                                nil, modifier...,
×
1931
                                        )
×
1932
                                }),
×
1933
                })
1934
        }
1935

1936
        // Create liveness monitor.
1937
        s.createLivenessMonitor(cfg, cc, leaderElector)
3✔
1938

3✔
1939
        listeners := make([]net.Listener, len(listenAddrs))
3✔
1940
        for i, listenAddr := range listenAddrs {
6✔
1941
                // Note: though brontide.NewListener uses ResolveTCPAddr, it
3✔
1942
                // doesn't need to call the general lndResolveTCP function
3✔
1943
                // since we are resolving a local address.
3✔
1944

3✔
1945
                // RESOLVE: We are actually partially accepting inbound
3✔
1946
                // connection requests when we call NewListener.
3✔
1947
                listeners[i], err = brontide.NewListener(
3✔
1948
                        nodeKeyECDH, listenAddr.String(),
3✔
1949
                        // TODO(yy): remove this check and unify the inbound
3✔
1950
                        // connection check inside `InboundPeerConnected`.
3✔
1951
                        s.peerAccessMan.checkAcceptIncomingConn,
3✔
1952
                )
3✔
1953
                if err != nil {
3✔
1954
                        return nil, err
×
1955
                }
×
1956
        }
1957

1958
        // Create the connection manager which will be responsible for
1959
        // maintaining persistent outbound connections and also accepting new
1960
        // incoming connections
1961
        cmgr, err := connmgr.New(&connmgr.Config{
3✔
1962
                Listeners:      listeners,
3✔
1963
                OnAccept:       s.InboundPeerConnected,
3✔
1964
                RetryDuration:  time.Second * 5,
3✔
1965
                TargetOutbound: 100,
3✔
1966
                Dial: noiseDial(
3✔
1967
                        nodeKeyECDH, s.cfg.net, s.cfg.ConnectionTimeout,
3✔
1968
                ),
3✔
1969
                OnConnection: s.OutboundPeerConnected,
3✔
1970
        })
3✔
1971
        if err != nil {
3✔
1972
                return nil, err
×
1973
        }
×
1974
        s.connMgr = cmgr
3✔
1975

3✔
1976
        // Finally, register the subsystems in blockbeat.
3✔
1977
        s.registerBlockConsumers()
3✔
1978

3✔
1979
        return s, nil
3✔
1980
}
1981

1982
// UpdateRoutingConfig is a callback function to update the routing config
1983
// values in the main cfg.
1984
func (s *server) UpdateRoutingConfig(cfg *routing.MissionControlConfig) {
3✔
1985
        routerCfg := s.cfg.SubRPCServers.RouterRPC
3✔
1986

3✔
1987
        switch c := cfg.Estimator.Config().(type) {
3✔
1988
        case routing.AprioriConfig:
3✔
1989
                routerCfg.ProbabilityEstimatorType =
3✔
1990
                        routing.AprioriEstimatorName
3✔
1991

3✔
1992
                targetCfg := routerCfg.AprioriConfig
3✔
1993
                targetCfg.PenaltyHalfLife = c.PenaltyHalfLife
3✔
1994
                targetCfg.Weight = c.AprioriWeight
3✔
1995
                targetCfg.CapacityFraction = c.CapacityFraction
3✔
1996
                targetCfg.HopProbability = c.AprioriHopProbability
3✔
1997

1998
        case routing.BimodalConfig:
3✔
1999
                routerCfg.ProbabilityEstimatorType =
3✔
2000
                        routing.BimodalEstimatorName
3✔
2001

3✔
2002
                targetCfg := routerCfg.BimodalConfig
3✔
2003
                targetCfg.Scale = int64(c.BimodalScaleMsat)
3✔
2004
                targetCfg.NodeWeight = c.BimodalNodeWeight
3✔
2005
                targetCfg.DecayTime = c.BimodalDecayTime
3✔
2006
        }
2007

2008
        routerCfg.MaxMcHistory = cfg.MaxMcHistory
3✔
2009
}
2010

2011
// registerBlockConsumers registers the subsystems that consume block events.
2012
// By calling `RegisterQueue`, a list of subsystems are registered in the
2013
// blockbeat for block notifications. When a new block arrives, the subsystems
2014
// in the same queue are notified sequentially, and different queues are
2015
// notified concurrently.
2016
//
2017
// NOTE: To put a subsystem in a different queue, create a slice and pass it to
2018
// a new `RegisterQueue` call.
2019
func (s *server) registerBlockConsumers() {
3✔
2020
        // In this queue, when a new block arrives, it will be received and
3✔
2021
        // processed in this order: chainArb -> sweeper -> txPublisher.
3✔
2022
        consumers := []chainio.Consumer{
3✔
2023
                s.chainArb,
3✔
2024
                s.sweeper,
3✔
2025
                s.txPublisher,
3✔
2026
        }
3✔
2027
        s.blockbeatDispatcher.RegisterQueue(consumers)
3✔
2028
}
3✔
2029

2030
// signAliasUpdate takes a ChannelUpdate and returns the signature. This is
2031
// used for option_scid_alias channels where the ChannelUpdate to be sent back
2032
// may differ from what is on disk.
2033
func (s *server) signAliasUpdate(u *lnwire.ChannelUpdate1) (*ecdsa.Signature,
2034
        error) {
3✔
2035

3✔
2036
        data, err := u.DataToSign()
3✔
2037
        if err != nil {
3✔
2038
                return nil, err
×
2039
        }
×
2040

2041
        return s.cc.MsgSigner.SignMessage(s.identityKeyLoc, data, true)
3✔
2042
}
2043

2044
// createLivenessMonitor creates a set of health checks using our configured
2045
// values and uses these checks to create a liveness monitor. Available
2046
// health checks,
2047
//   - chainHealthCheck (will be disabled for --nochainbackend mode)
2048
//   - diskCheck
2049
//   - tlsHealthCheck
2050
//   - torController, only created when tor is enabled.
2051
//
2052
// If a health check has been disabled by setting attempts to 0, our monitor
2053
// will not run it.
2054
func (s *server) createLivenessMonitor(cfg *Config, cc *chainreg.ChainControl,
2055
        leaderElector cluster.LeaderElector) {
3✔
2056

3✔
2057
        chainBackendAttempts := cfg.HealthChecks.ChainCheck.Attempts
3✔
2058
        if cfg.Bitcoin.Node == "nochainbackend" {
3✔
2059
                srvrLog.Info("Disabling chain backend checks for " +
×
2060
                        "nochainbackend mode")
×
2061

×
2062
                chainBackendAttempts = 0
×
2063
        }
×
2064

2065
        chainHealthCheck := healthcheck.NewObservation(
3✔
2066
                "chain backend",
3✔
2067
                cc.HealthCheck,
3✔
2068
                cfg.HealthChecks.ChainCheck.Interval,
3✔
2069
                cfg.HealthChecks.ChainCheck.Timeout,
3✔
2070
                cfg.HealthChecks.ChainCheck.Backoff,
3✔
2071
                chainBackendAttempts,
3✔
2072
        )
3✔
2073

3✔
2074
        diskCheck := healthcheck.NewObservation(
3✔
2075
                "disk space",
3✔
2076
                func() error {
3✔
2077
                        free, err := healthcheck.AvailableDiskSpaceRatio(
×
2078
                                cfg.LndDir,
×
2079
                        )
×
2080
                        if err != nil {
×
2081
                                return err
×
2082
                        }
×
2083

2084
                        // If we have more free space than we require,
2085
                        // we return a nil error.
2086
                        if free > cfg.HealthChecks.DiskCheck.RequiredRemaining {
×
2087
                                return nil
×
2088
                        }
×
2089

2090
                        return fmt.Errorf("require: %v free space, got: %v",
×
2091
                                cfg.HealthChecks.DiskCheck.RequiredRemaining,
×
2092
                                free)
×
2093
                },
2094
                cfg.HealthChecks.DiskCheck.Interval,
2095
                cfg.HealthChecks.DiskCheck.Timeout,
2096
                cfg.HealthChecks.DiskCheck.Backoff,
2097
                cfg.HealthChecks.DiskCheck.Attempts,
2098
        )
2099

2100
        tlsHealthCheck := healthcheck.NewObservation(
3✔
2101
                "tls",
3✔
2102
                func() error {
3✔
2103
                        expired, expTime, err := s.tlsManager.IsCertExpired(
×
2104
                                s.cc.KeyRing,
×
2105
                        )
×
2106
                        if err != nil {
×
2107
                                return err
×
2108
                        }
×
2109
                        if expired {
×
2110
                                return fmt.Errorf("TLS certificate is "+
×
2111
                                        "expired as of %v", expTime)
×
2112
                        }
×
2113

2114
                        // If the certificate is not outdated, no error needs
2115
                        // to be returned
2116
                        return nil
×
2117
                },
2118
                cfg.HealthChecks.TLSCheck.Interval,
2119
                cfg.HealthChecks.TLSCheck.Timeout,
2120
                cfg.HealthChecks.TLSCheck.Backoff,
2121
                cfg.HealthChecks.TLSCheck.Attempts,
2122
        )
2123

2124
        checks := []*healthcheck.Observation{
3✔
2125
                chainHealthCheck, diskCheck, tlsHealthCheck,
3✔
2126
        }
3✔
2127

3✔
2128
        // If Tor is enabled, add the healthcheck for tor connection.
3✔
2129
        if s.torController != nil {
3✔
2130
                torConnectionCheck := healthcheck.NewObservation(
×
2131
                        "tor connection",
×
2132
                        func() error {
×
2133
                                return healthcheck.CheckTorServiceStatus(
×
2134
                                        s.torController,
×
2135
                                        func() error {
×
2136
                                                return s.createNewHiddenService(
×
2137
                                                        context.TODO(),
×
2138
                                                )
×
2139
                                        },
×
2140
                                )
2141
                        },
2142
                        cfg.HealthChecks.TorConnection.Interval,
2143
                        cfg.HealthChecks.TorConnection.Timeout,
2144
                        cfg.HealthChecks.TorConnection.Backoff,
2145
                        cfg.HealthChecks.TorConnection.Attempts,
2146
                )
2147
                checks = append(checks, torConnectionCheck)
×
2148
        }
2149

2150
        // If remote signing is enabled, add the healthcheck for the remote
2151
        // signing RPC interface.
2152
        if s.cfg.RemoteSigner != nil && s.cfg.RemoteSigner.Enable {
6✔
2153
                // Because we have two cascading timeouts here, we need to add
3✔
2154
                // some slack to the "outer" one of them in case the "inner"
3✔
2155
                // returns exactly on time.
3✔
2156
                overhead := time.Millisecond * 10
3✔
2157

3✔
2158
                remoteSignerConnectionCheck := healthcheck.NewObservation(
3✔
2159
                        "remote signer connection",
3✔
2160
                        rpcwallet.HealthCheck(
3✔
2161
                                s.cfg.RemoteSigner,
3✔
2162

3✔
2163
                                // For the health check we might to be even
3✔
2164
                                // stricter than the initial/normal connect, so
3✔
2165
                                // we use the health check timeout here.
3✔
2166
                                cfg.HealthChecks.RemoteSigner.Timeout,
3✔
2167
                        ),
3✔
2168
                        cfg.HealthChecks.RemoteSigner.Interval,
3✔
2169
                        cfg.HealthChecks.RemoteSigner.Timeout+overhead,
3✔
2170
                        cfg.HealthChecks.RemoteSigner.Backoff,
3✔
2171
                        cfg.HealthChecks.RemoteSigner.Attempts,
3✔
2172
                )
3✔
2173
                checks = append(checks, remoteSignerConnectionCheck)
3✔
2174
        }
3✔
2175

2176
        // If we have a leader elector, we add a health check to ensure we are
2177
        // still the leader. During normal operation, we should always be the
2178
        // leader, but there are circumstances where this may change, such as
2179
        // when we lose network connectivity for long enough expiring out lease.
2180
        if leaderElector != nil {
3✔
2181
                leaderCheck := healthcheck.NewObservation(
×
2182
                        "leader status",
×
2183
                        func() error {
×
2184
                                // Check if we are still the leader. Note that
×
2185
                                // we don't need to use a timeout context here
×
2186
                                // as the healthcheck observer will handle the
×
2187
                                // timeout case for us.
×
2188
                                timeoutCtx, cancel := context.WithTimeout(
×
2189
                                        context.Background(),
×
2190
                                        cfg.HealthChecks.LeaderCheck.Timeout,
×
2191
                                )
×
2192
                                defer cancel()
×
2193

×
2194
                                leader, err := leaderElector.IsLeader(
×
2195
                                        timeoutCtx,
×
2196
                                )
×
2197
                                if err != nil {
×
2198
                                        return fmt.Errorf("unable to check if "+
×
2199
                                                "still leader: %v", err)
×
2200
                                }
×
2201

2202
                                if !leader {
×
2203
                                        srvrLog.Debug("Not the current leader")
×
2204
                                        return fmt.Errorf("not the current " +
×
2205
                                                "leader")
×
2206
                                }
×
2207

2208
                                return nil
×
2209
                        },
2210
                        cfg.HealthChecks.LeaderCheck.Interval,
2211
                        cfg.HealthChecks.LeaderCheck.Timeout,
2212
                        cfg.HealthChecks.LeaderCheck.Backoff,
2213
                        cfg.HealthChecks.LeaderCheck.Attempts,
2214
                )
2215

2216
                checks = append(checks, leaderCheck)
×
2217
        }
2218

2219
        // If we have not disabled all of our health checks, we create a
2220
        // liveness monitor with our configured checks.
2221
        s.livenessMonitor = healthcheck.NewMonitor(
3✔
2222
                &healthcheck.Config{
3✔
2223
                        Checks:   checks,
3✔
2224
                        Shutdown: srvrLog.Criticalf,
3✔
2225
                },
3✔
2226
        )
3✔
2227
}
2228

2229
// Started returns true if the server has been started, and false otherwise.
2230
// NOTE: This function is safe for concurrent access.
2231
func (s *server) Started() bool {
3✔
2232
        return atomic.LoadInt32(&s.active) != 0
3✔
2233
}
3✔
2234

2235
// cleaner is used to aggregate "cleanup" functions during an operation that
2236
// starts several subsystems. In case one of the subsystem fails to start
2237
// and a proper resource cleanup is required, the "run" method achieves this
2238
// by running all these added "cleanup" functions.
2239
type cleaner []func() error
2240

2241
// add is used to add a cleanup function to be called when
2242
// the run function is executed.
2243
func (c cleaner) add(cleanup func() error) cleaner {
3✔
2244
        return append(c, cleanup)
3✔
2245
}
3✔
2246

2247
// run is used to run all the previousely added cleanup functions.
2248
func (c cleaner) run() {
×
2249
        for i := len(c) - 1; i >= 0; i-- {
×
2250
                if err := c[i](); err != nil {
×
2251
                        srvrLog.Errorf("Cleanup failed: %v", err)
×
2252
                }
×
2253
        }
2254
}
2255

2256
// startLowLevelServices starts the low-level services of the server. These
2257
// services must be started successfully before running the main server. The
2258
// services are,
2259
// 1. the chain notifier.
2260
//
2261
// TODO(yy): identify and add more low-level services here.
2262
func (s *server) startLowLevelServices() error {
3✔
2263
        var startErr error
3✔
2264

3✔
2265
        cleanup := cleaner{}
3✔
2266

3✔
2267
        cleanup = cleanup.add(s.cc.ChainNotifier.Stop)
3✔
2268
        if err := s.cc.ChainNotifier.Start(); err != nil {
3✔
2269
                startErr = err
×
2270
        }
×
2271

2272
        if startErr != nil {
3✔
2273
                cleanup.run()
×
2274
        }
×
2275

2276
        return startErr
3✔
2277
}
2278

2279
// Start starts the main daemon server, all requested listeners, and any helper
2280
// goroutines.
2281
// NOTE: This function is safe for concurrent access.
2282
//
2283
//nolint:funlen
2284
func (s *server) Start(ctx context.Context) error {
3✔
2285
        // Get the current blockbeat.
3✔
2286
        beat, err := s.getStartingBeat()
3✔
2287
        if err != nil {
3✔
2288
                return err
×
2289
        }
×
2290

2291
        var startErr error
3✔
2292

3✔
2293
        // If one sub system fails to start, the following code ensures that the
3✔
2294
        // previous started ones are stopped. It also ensures a proper wallet
3✔
2295
        // shutdown which is important for releasing its resources (boltdb, etc...)
3✔
2296
        cleanup := cleaner{}
3✔
2297

3✔
2298
        s.start.Do(func() {
6✔
2299
                cleanup = cleanup.add(s.customMessageServer.Stop)
3✔
2300
                if err := s.customMessageServer.Start(); err != nil {
3✔
2301
                        startErr = err
×
2302
                        return
×
2303
                }
×
2304

2305
                if s.hostAnn != nil {
3✔
2306
                        cleanup = cleanup.add(s.hostAnn.Stop)
×
2307
                        if err := s.hostAnn.Start(); err != nil {
×
2308
                                startErr = err
×
2309
                                return
×
2310
                        }
×
2311
                }
2312

2313
                if s.livenessMonitor != nil {
6✔
2314
                        cleanup = cleanup.add(s.livenessMonitor.Stop)
3✔
2315
                        if err := s.livenessMonitor.Start(); err != nil {
3✔
2316
                                startErr = err
×
2317
                                return
×
2318
                        }
×
2319
                }
2320

2321
                // Start the notification server. This is used so channel
2322
                // management goroutines can be notified when a funding
2323
                // transaction reaches a sufficient number of confirmations, or
2324
                // when the input for the funding transaction is spent in an
2325
                // attempt at an uncooperative close by the counterparty.
2326
                cleanup = cleanup.add(s.sigPool.Stop)
3✔
2327
                if err := s.sigPool.Start(); err != nil {
3✔
2328
                        startErr = err
×
2329
                        return
×
2330
                }
×
2331

2332
                cleanup = cleanup.add(s.writePool.Stop)
3✔
2333
                if err := s.writePool.Start(); err != nil {
3✔
2334
                        startErr = err
×
2335
                        return
×
2336
                }
×
2337

2338
                cleanup = cleanup.add(s.readPool.Stop)
3✔
2339
                if err := s.readPool.Start(); err != nil {
3✔
2340
                        startErr = err
×
2341
                        return
×
2342
                }
×
2343

2344
                cleanup = cleanup.add(s.cc.BestBlockTracker.Stop)
3✔
2345
                if err := s.cc.BestBlockTracker.Start(); err != nil {
3✔
2346
                        startErr = err
×
2347
                        return
×
2348
                }
×
2349

2350
                cleanup = cleanup.add(s.channelNotifier.Stop)
3✔
2351
                if err := s.channelNotifier.Start(); err != nil {
3✔
2352
                        startErr = err
×
2353
                        return
×
2354
                }
×
2355

2356
                cleanup = cleanup.add(func() error {
3✔
2357
                        return s.peerNotifier.Stop()
×
2358
                })
×
2359
                if err := s.peerNotifier.Start(); err != nil {
3✔
2360
                        startErr = err
×
2361
                        return
×
2362
                }
×
2363

2364
                cleanup = cleanup.add(s.htlcNotifier.Stop)
3✔
2365
                if err := s.htlcNotifier.Start(); err != nil {
3✔
2366
                        startErr = err
×
2367
                        return
×
2368
                }
×
2369

2370
                if s.towerClientMgr != nil {
6✔
2371
                        cleanup = cleanup.add(s.towerClientMgr.Stop)
3✔
2372
                        if err := s.towerClientMgr.Start(); err != nil {
3✔
2373
                                startErr = err
×
2374
                                return
×
2375
                        }
×
2376
                }
2377

2378
                cleanup = cleanup.add(s.txPublisher.Stop)
3✔
2379
                if err := s.txPublisher.Start(beat); err != nil {
3✔
2380
                        startErr = err
×
2381
                        return
×
2382
                }
×
2383

2384
                cleanup = cleanup.add(s.sweeper.Stop)
3✔
2385
                if err := s.sweeper.Start(beat); err != nil {
3✔
2386
                        startErr = err
×
2387
                        return
×
2388
                }
×
2389

2390
                cleanup = cleanup.add(s.utxoNursery.Stop)
3✔
2391
                if err := s.utxoNursery.Start(); err != nil {
3✔
2392
                        startErr = err
×
2393
                        return
×
2394
                }
×
2395

2396
                cleanup = cleanup.add(s.breachArbitrator.Stop)
3✔
2397
                if err := s.breachArbitrator.Start(); err != nil {
3✔
2398
                        startErr = err
×
2399
                        return
×
2400
                }
×
2401

2402
                cleanup = cleanup.add(s.fundingMgr.Stop)
3✔
2403
                if err := s.fundingMgr.Start(); err != nil {
3✔
2404
                        startErr = err
×
2405
                        return
×
2406
                }
×
2407

2408
                // htlcSwitch must be started before chainArb since the latter
2409
                // relies on htlcSwitch to deliver resolution message upon
2410
                // start.
2411
                cleanup = cleanup.add(s.htlcSwitch.Stop)
3✔
2412
                if err := s.htlcSwitch.Start(); err != nil {
3✔
2413
                        startErr = err
×
2414
                        return
×
2415
                }
×
2416

2417
                cleanup = cleanup.add(s.interceptableSwitch.Stop)
3✔
2418
                if err := s.interceptableSwitch.Start(); err != nil {
3✔
2419
                        startErr = err
×
2420
                        return
×
2421
                }
×
2422

2423
                cleanup = cleanup.add(s.invoiceHtlcModifier.Stop)
3✔
2424
                if err := s.invoiceHtlcModifier.Start(); err != nil {
3✔
2425
                        startErr = err
×
2426
                        return
×
2427
                }
×
2428

2429
                cleanup = cleanup.add(s.chainArb.Stop)
3✔
2430
                if err := s.chainArb.Start(beat); err != nil {
3✔
2431
                        startErr = err
×
2432
                        return
×
2433
                }
×
2434

2435
                cleanup = cleanup.add(s.graphDB.Stop)
3✔
2436
                if err := s.graphDB.Start(); err != nil {
3✔
2437
                        startErr = err
×
2438
                        return
×
2439
                }
×
2440

2441
                cleanup = cleanup.add(s.graphBuilder.Stop)
3✔
2442
                if err := s.graphBuilder.Start(); err != nil {
3✔
2443
                        startErr = err
×
2444
                        return
×
2445
                }
×
2446

2447
                cleanup = cleanup.add(s.chanRouter.Stop)
3✔
2448
                if err := s.chanRouter.Start(); err != nil {
3✔
2449
                        startErr = err
×
2450
                        return
×
2451
                }
×
2452
                // The authGossiper depends on the chanRouter and therefore
2453
                // should be started after it.
2454
                cleanup = cleanup.add(s.authGossiper.Stop)
3✔
2455
                if err := s.authGossiper.Start(); err != nil {
3✔
2456
                        startErr = err
×
2457
                        return
×
2458
                }
×
2459

2460
                cleanup = cleanup.add(s.invoices.Stop)
3✔
2461
                if err := s.invoices.Start(); err != nil {
3✔
2462
                        startErr = err
×
2463
                        return
×
2464
                }
×
2465

2466
                cleanup = cleanup.add(s.sphinx.Stop)
3✔
2467
                if err := s.sphinx.Start(); err != nil {
3✔
2468
                        startErr = err
×
2469
                        return
×
2470
                }
×
2471

2472
                cleanup = cleanup.add(s.chanStatusMgr.Stop)
3✔
2473
                if err := s.chanStatusMgr.Start(); err != nil {
3✔
2474
                        startErr = err
×
2475
                        return
×
2476
                }
×
2477

2478
                cleanup = cleanup.add(s.chanEventStore.Stop)
3✔
2479
                if err := s.chanEventStore.Start(); err != nil {
3✔
2480
                        startErr = err
×
2481
                        return
×
2482
                }
×
2483

2484
                cleanup.add(func() error {
3✔
2485
                        s.missionController.StopStoreTickers()
×
2486
                        return nil
×
2487
                })
×
2488
                s.missionController.RunStoreTickers()
3✔
2489

3✔
2490
                // Before we start the connMgr, we'll check to see if we have
3✔
2491
                // any backups to recover. We do this now as we want to ensure
3✔
2492
                // that have all the information we need to handle channel
3✔
2493
                // recovery _before_ we even accept connections from any peers.
3✔
2494
                chanRestorer := &chanDBRestorer{
3✔
2495
                        db:         s.chanStateDB,
3✔
2496
                        secretKeys: s.cc.KeyRing,
3✔
2497
                        chainArb:   s.chainArb,
3✔
2498
                }
3✔
2499
                if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
3✔
2500
                        _, err := chanbackup.UnpackAndRecoverSingles(
×
2501
                                s.chansToRestore.PackedSingleChanBackups,
×
2502
                                s.cc.KeyRing, chanRestorer, s,
×
2503
                        )
×
2504
                        if err != nil {
×
2505
                                startErr = fmt.Errorf("unable to unpack single "+
×
2506
                                        "backups: %v", err)
×
2507
                                return
×
2508
                        }
×
2509
                }
2510
                if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
6✔
2511
                        _, err := chanbackup.UnpackAndRecoverMulti(
3✔
2512
                                s.chansToRestore.PackedMultiChanBackup,
3✔
2513
                                s.cc.KeyRing, chanRestorer, s,
3✔
2514
                        )
3✔
2515
                        if err != nil {
3✔
2516
                                startErr = fmt.Errorf("unable to unpack chan "+
×
2517
                                        "backup: %v", err)
×
2518
                                return
×
2519
                        }
×
2520
                }
2521

2522
                // chanSubSwapper must be started after the `channelNotifier`
2523
                // because it depends on channel events as a synchronization
2524
                // point.
2525
                cleanup = cleanup.add(s.chanSubSwapper.Stop)
3✔
2526
                if err := s.chanSubSwapper.Start(); err != nil {
3✔
2527
                        startErr = err
×
2528
                        return
×
2529
                }
×
2530

2531
                if s.torController != nil {
3✔
2532
                        cleanup = cleanup.add(s.torController.Stop)
×
2533
                        if err := s.createNewHiddenService(ctx); err != nil {
×
2534
                                startErr = err
×
2535
                                return
×
2536
                        }
×
2537
                }
2538

2539
                if s.natTraversal != nil {
3✔
2540
                        s.wg.Add(1)
×
2541
                        go s.watchExternalIP()
×
2542
                }
×
2543

2544
                // Start connmgr last to prevent connections before init.
2545
                cleanup = cleanup.add(func() error {
3✔
2546
                        s.connMgr.Stop()
×
2547
                        return nil
×
2548
                })
×
2549

2550
                // RESOLVE: s.connMgr.Start() is called here, but
2551
                // brontide.NewListener() is called in newServer. This means
2552
                // that we are actually listening and partially accepting
2553
                // inbound connections even before the connMgr starts.
2554
                //
2555
                // TODO(yy): move the log into the connMgr's `Start` method.
2556
                srvrLog.Info("connMgr starting...")
3✔
2557
                s.connMgr.Start()
3✔
2558
                srvrLog.Debug("connMgr started")
3✔
2559

3✔
2560
                // If peers are specified as a config option, we'll add those
3✔
2561
                // peers first.
3✔
2562
                for _, peerAddrCfg := range s.cfg.AddPeers {
6✔
2563
                        parsedPubkey, parsedHost, err := lncfg.ParseLNAddressPubkey(
3✔
2564
                                peerAddrCfg,
3✔
2565
                        )
3✔
2566
                        if err != nil {
3✔
2567
                                startErr = fmt.Errorf("unable to parse peer "+
×
2568
                                        "pubkey from config: %v", err)
×
2569
                                return
×
2570
                        }
×
2571
                        addr, err := parseAddr(parsedHost, s.cfg.net)
3✔
2572
                        if err != nil {
3✔
2573
                                startErr = fmt.Errorf("unable to parse peer "+
×
2574
                                        "address provided as a config option: "+
×
2575
                                        "%v", err)
×
2576
                                return
×
2577
                        }
×
2578

2579
                        peerAddr := &lnwire.NetAddress{
3✔
2580
                                IdentityKey: parsedPubkey,
3✔
2581
                                Address:     addr,
3✔
2582
                                ChainNet:    s.cfg.ActiveNetParams.Net,
3✔
2583
                        }
3✔
2584

3✔
2585
                        err = s.ConnectToPeer(
3✔
2586
                                peerAddr, true,
3✔
2587
                                s.cfg.ConnectionTimeout,
3✔
2588
                        )
3✔
2589
                        if err != nil {
3✔
2590
                                startErr = fmt.Errorf("unable to connect to "+
×
2591
                                        "peer address provided as a config "+
×
2592
                                        "option: %v", err)
×
2593
                                return
×
2594
                        }
×
2595
                }
2596

2597
                // Subscribe to NodeAnnouncements that advertise new addresses
2598
                // our persistent peers.
2599
                if err := s.updatePersistentPeerAddrs(); err != nil {
3✔
2600
                        srvrLog.Errorf("Failed to update persistent peer "+
×
2601
                                "addr: %v", err)
×
2602

×
2603
                        startErr = err
×
2604
                        return
×
2605
                }
×
2606

2607
                // With all the relevant sub-systems started, we'll now attempt
2608
                // to establish persistent connections to our direct channel
2609
                // collaborators within the network. Before doing so however,
2610
                // we'll prune our set of link nodes found within the database
2611
                // to ensure we don't reconnect to any nodes we no longer have
2612
                // open channels with.
2613
                if err := s.chanStateDB.PruneLinkNodes(); err != nil {
3✔
2614
                        srvrLog.Errorf("Failed to prune link nodes: %v", err)
×
2615

×
2616
                        startErr = err
×
2617
                        return
×
2618
                }
×
2619

2620
                if err := s.establishPersistentConnections(ctx); err != nil {
3✔
2621
                        srvrLog.Errorf("Failed to establish persistent "+
×
2622
                                "connections: %v", err)
×
2623
                }
×
2624

2625
                // setSeedList is a helper function that turns multiple DNS seed
2626
                // server tuples from the command line or config file into the
2627
                // data structure we need and does a basic formal sanity check
2628
                // in the process.
2629
                setSeedList := func(tuples []string, genesisHash chainhash.Hash) {
3✔
2630
                        if len(tuples) == 0 {
×
2631
                                return
×
2632
                        }
×
2633

2634
                        result := make([][2]string, len(tuples))
×
2635
                        for idx, tuple := range tuples {
×
2636
                                tuple = strings.TrimSpace(tuple)
×
2637
                                if len(tuple) == 0 {
×
2638
                                        return
×
2639
                                }
×
2640

2641
                                servers := strings.Split(tuple, ",")
×
2642
                                if len(servers) > 2 || len(servers) == 0 {
×
2643
                                        srvrLog.Warnf("Ignoring invalid DNS "+
×
2644
                                                "seed tuple: %v", servers)
×
2645
                                        return
×
2646
                                }
×
2647

2648
                                copy(result[idx][:], servers)
×
2649
                        }
2650

2651
                        chainreg.ChainDNSSeeds[genesisHash] = result
×
2652
                }
2653

2654
                // Let users overwrite the DNS seed nodes. We only allow them
2655
                // for bitcoin mainnet/testnet/signet.
2656
                if s.cfg.Bitcoin.MainNet {
3✔
2657
                        setSeedList(
×
2658
                                s.cfg.Bitcoin.DNSSeeds,
×
2659
                                chainreg.BitcoinMainnetGenesis,
×
2660
                        )
×
2661
                }
×
2662
                if s.cfg.Bitcoin.TestNet3 {
3✔
2663
                        setSeedList(
×
2664
                                s.cfg.Bitcoin.DNSSeeds,
×
2665
                                chainreg.BitcoinTestnetGenesis,
×
2666
                        )
×
2667
                }
×
2668
                if s.cfg.Bitcoin.TestNet4 {
3✔
2669
                        setSeedList(
×
2670
                                s.cfg.Bitcoin.DNSSeeds,
×
2671
                                chainreg.BitcoinTestnet4Genesis,
×
2672
                        )
×
2673
                }
×
2674
                if s.cfg.Bitcoin.SigNet {
3✔
2675
                        setSeedList(
×
2676
                                s.cfg.Bitcoin.DNSSeeds,
×
2677
                                chainreg.BitcoinSignetGenesis,
×
2678
                        )
×
2679
                }
×
2680

2681
                // If network bootstrapping hasn't been disabled, then we'll
2682
                // configure the set of active bootstrappers, and launch a
2683
                // dedicated goroutine to maintain a set of persistent
2684
                // connections.
2685
                if !s.cfg.NoNetBootstrap {
6✔
2686
                        bootstrappers, err := initNetworkBootstrappers(s)
3✔
2687
                        if err != nil {
3✔
2688
                                startErr = err
×
2689
                                return
×
2690
                        }
×
2691

2692
                        s.wg.Add(1)
3✔
2693
                        go s.peerBootstrapper(
3✔
2694
                                ctx, defaultMinPeers, bootstrappers,
3✔
2695
                        )
3✔
2696
                } else {
3✔
2697
                        srvrLog.Infof("Auto peer bootstrapping is disabled")
3✔
2698
                }
3✔
2699

2700
                // Start the blockbeat after all other subsystems have been
2701
                // started so they are ready to receive new blocks.
2702
                cleanup = cleanup.add(func() error {
3✔
2703
                        s.blockbeatDispatcher.Stop()
×
2704
                        return nil
×
2705
                })
×
2706
                if err := s.blockbeatDispatcher.Start(); err != nil {
3✔
2707
                        startErr = err
×
2708
                        return
×
2709
                }
×
2710

2711
                // Set the active flag now that we've completed the full
2712
                // startup.
2713
                atomic.StoreInt32(&s.active, 1)
3✔
2714
        })
2715

2716
        if startErr != nil {
3✔
2717
                cleanup.run()
×
2718
        }
×
2719
        return startErr
3✔
2720
}
2721

2722
// Stop gracefully shutsdown the main daemon server. This function will signal
2723
// any active goroutines, or helper objects to exit, then blocks until they've
2724
// all successfully exited. Additionally, any/all listeners are closed.
2725
// NOTE: This function is safe for concurrent access.
2726
func (s *server) Stop() error {
3✔
2727
        s.stop.Do(func() {
6✔
2728
                atomic.StoreInt32(&s.stopping, 1)
3✔
2729

3✔
2730
                ctx := context.Background()
3✔
2731

3✔
2732
                close(s.quit)
3✔
2733

3✔
2734
                // Shutdown connMgr first to prevent conns during shutdown.
3✔
2735
                s.connMgr.Stop()
3✔
2736

3✔
2737
                // Stop dispatching blocks to other systems immediately.
3✔
2738
                s.blockbeatDispatcher.Stop()
3✔
2739

3✔
2740
                // Shutdown the wallet, funding manager, and the rpc server.
3✔
2741
                if err := s.chanStatusMgr.Stop(); err != nil {
3✔
2742
                        srvrLog.Warnf("failed to stop chanStatusMgr: %v", err)
×
2743
                }
×
2744
                if err := s.htlcSwitch.Stop(); err != nil {
3✔
2745
                        srvrLog.Warnf("failed to stop htlcSwitch: %v", err)
×
2746
                }
×
2747
                if err := s.sphinx.Stop(); err != nil {
3✔
2748
                        srvrLog.Warnf("failed to stop sphinx: %v", err)
×
2749
                }
×
2750
                if err := s.invoices.Stop(); err != nil {
3✔
2751
                        srvrLog.Warnf("failed to stop invoices: %v", err)
×
2752
                }
×
2753
                if err := s.interceptableSwitch.Stop(); err != nil {
3✔
2754
                        srvrLog.Warnf("failed to stop interceptable "+
×
2755
                                "switch: %v", err)
×
2756
                }
×
2757
                if err := s.invoiceHtlcModifier.Stop(); err != nil {
3✔
2758
                        srvrLog.Warnf("failed to stop htlc invoices "+
×
2759
                                "modifier: %v", err)
×
2760
                }
×
2761
                if err := s.chanRouter.Stop(); err != nil {
3✔
2762
                        srvrLog.Warnf("failed to stop chanRouter: %v", err)
×
2763
                }
×
2764
                if err := s.graphBuilder.Stop(); err != nil {
3✔
2765
                        srvrLog.Warnf("failed to stop graphBuilder %v", err)
×
2766
                }
×
2767
                if err := s.graphDB.Stop(); err != nil {
3✔
2768
                        srvrLog.Warnf("failed to stop graphDB %v", err)
×
2769
                }
×
2770
                if err := s.chainArb.Stop(); err != nil {
3✔
2771
                        srvrLog.Warnf("failed to stop chainArb: %v", err)
×
2772
                }
×
2773
                if err := s.fundingMgr.Stop(); err != nil {
3✔
2774
                        srvrLog.Warnf("failed to stop fundingMgr: %v", err)
×
2775
                }
×
2776
                if err := s.breachArbitrator.Stop(); err != nil {
3✔
2777
                        srvrLog.Warnf("failed to stop breachArbitrator: %v",
×
2778
                                err)
×
2779
                }
×
2780
                if err := s.utxoNursery.Stop(); err != nil {
3✔
2781
                        srvrLog.Warnf("failed to stop utxoNursery: %v", err)
×
2782
                }
×
2783
                if err := s.authGossiper.Stop(); err != nil {
3✔
2784
                        srvrLog.Warnf("failed to stop authGossiper: %v", err)
×
2785
                }
×
2786
                if err := s.sweeper.Stop(); err != nil {
3✔
2787
                        srvrLog.Warnf("failed to stop sweeper: %v", err)
×
2788
                }
×
2789
                if err := s.txPublisher.Stop(); err != nil {
3✔
2790
                        srvrLog.Warnf("failed to stop txPublisher: %v", err)
×
2791
                }
×
2792
                if err := s.channelNotifier.Stop(); err != nil {
3✔
2793
                        srvrLog.Warnf("failed to stop channelNotifier: %v", err)
×
2794
                }
×
2795
                if err := s.peerNotifier.Stop(); err != nil {
3✔
2796
                        srvrLog.Warnf("failed to stop peerNotifier: %v", err)
×
2797
                }
×
2798
                if err := s.htlcNotifier.Stop(); err != nil {
3✔
2799
                        srvrLog.Warnf("failed to stop htlcNotifier: %v", err)
×
2800
                }
×
2801

2802
                // Update channel.backup file. Make sure to do it before
2803
                // stopping chanSubSwapper.
2804
                singles, err := chanbackup.FetchStaticChanBackups(
3✔
2805
                        ctx, s.chanStateDB, s.addrSource,
3✔
2806
                )
3✔
2807
                if err != nil {
3✔
2808
                        srvrLog.Warnf("failed to fetch channel states: %v",
×
2809
                                err)
×
2810
                } else {
3✔
2811
                        err := s.chanSubSwapper.ManualUpdate(singles)
3✔
2812
                        if err != nil {
6✔
2813
                                srvrLog.Warnf("Manual update of channel "+
3✔
2814
                                        "backup failed: %v", err)
3✔
2815
                        }
3✔
2816
                }
2817

2818
                if err := s.chanSubSwapper.Stop(); err != nil {
3✔
2819
                        srvrLog.Warnf("failed to stop chanSubSwapper: %v", err)
×
2820
                }
×
2821
                if err := s.cc.ChainNotifier.Stop(); err != nil {
3✔
2822
                        srvrLog.Warnf("Unable to stop ChainNotifier: %v", err)
×
2823
                }
×
2824
                if err := s.cc.BestBlockTracker.Stop(); err != nil {
3✔
2825
                        srvrLog.Warnf("Unable to stop BestBlockTracker: %v",
×
2826
                                err)
×
2827
                }
×
2828
                if err := s.chanEventStore.Stop(); err != nil {
3✔
2829
                        srvrLog.Warnf("Unable to stop ChannelEventStore: %v",
×
2830
                                err)
×
2831
                }
×
2832
                s.missionController.StopStoreTickers()
3✔
2833

3✔
2834
                // Disconnect from each active peers to ensure that
3✔
2835
                // peerTerminationWatchers signal completion to each peer.
3✔
2836
                for _, peer := range s.Peers() {
6✔
2837
                        err := s.DisconnectPeer(peer.IdentityKey())
3✔
2838
                        if err != nil {
3✔
2839
                                srvrLog.Warnf("could not disconnect peer: %v"+
×
2840
                                        "received error: %v", peer.IdentityKey(),
×
2841
                                        err,
×
2842
                                )
×
2843
                        }
×
2844
                }
2845

2846
                // Now that all connections have been torn down, stop the tower
2847
                // client which will reliably flush all queued states to the
2848
                // tower. If this is halted for any reason, the force quit timer
2849
                // will kick in and abort to allow this method to return.
2850
                if s.towerClientMgr != nil {
6✔
2851
                        if err := s.towerClientMgr.Stop(); err != nil {
3✔
2852
                                srvrLog.Warnf("Unable to shut down tower "+
×
2853
                                        "client manager: %v", err)
×
2854
                        }
×
2855
                }
2856

2857
                if s.hostAnn != nil {
3✔
2858
                        if err := s.hostAnn.Stop(); err != nil {
×
2859
                                srvrLog.Warnf("unable to shut down host "+
×
2860
                                        "annoucner: %v", err)
×
2861
                        }
×
2862
                }
2863

2864
                if s.livenessMonitor != nil {
6✔
2865
                        if err := s.livenessMonitor.Stop(); err != nil {
3✔
2866
                                srvrLog.Warnf("unable to shutdown liveness "+
×
2867
                                        "monitor: %v", err)
×
2868
                        }
×
2869
                }
2870

2871
                // Wait for all lingering goroutines to quit.
2872
                srvrLog.Debug("Waiting for server to shutdown...")
3✔
2873
                s.wg.Wait()
3✔
2874

3✔
2875
                srvrLog.Debug("Stopping buffer pools...")
3✔
2876
                s.sigPool.Stop()
3✔
2877
                s.writePool.Stop()
3✔
2878
                s.readPool.Stop()
3✔
2879
        })
2880

2881
        return nil
3✔
2882
}
2883

2884
// Stopped returns true if the server has been instructed to shutdown.
2885
// NOTE: This function is safe for concurrent access.
2886
func (s *server) Stopped() bool {
3✔
2887
        return atomic.LoadInt32(&s.stopping) != 0
3✔
2888
}
3✔
2889

2890
// configurePortForwarding attempts to set up port forwarding for the different
2891
// ports that the server will be listening on.
2892
//
2893
// NOTE: This should only be used when using some kind of NAT traversal to
2894
// automatically set up forwarding rules.
2895
func (s *server) configurePortForwarding(ports ...uint16) ([]string, error) {
×
2896
        ip, err := s.natTraversal.ExternalIP()
×
2897
        if err != nil {
×
2898
                return nil, err
×
2899
        }
×
2900
        s.lastDetectedIP = ip
×
2901

×
2902
        externalIPs := make([]string, 0, len(ports))
×
2903
        for _, port := range ports {
×
2904
                if err := s.natTraversal.AddPortMapping(port); err != nil {
×
2905
                        srvrLog.Debugf("Unable to forward port %d: %v", port, err)
×
2906
                        continue
×
2907
                }
2908

2909
                hostIP := fmt.Sprintf("%v:%d", ip, port)
×
2910
                externalIPs = append(externalIPs, hostIP)
×
2911
        }
2912

2913
        return externalIPs, nil
×
2914
}
2915

2916
// removePortForwarding attempts to clear the forwarding rules for the different
2917
// ports the server is currently listening on.
2918
//
2919
// NOTE: This should only be used when using some kind of NAT traversal to
2920
// automatically set up forwarding rules.
2921
func (s *server) removePortForwarding() {
×
2922
        forwardedPorts := s.natTraversal.ForwardedPorts()
×
2923
        for _, port := range forwardedPorts {
×
2924
                if err := s.natTraversal.DeletePortMapping(port); err != nil {
×
2925
                        srvrLog.Errorf("Unable to remove forwarding rules for "+
×
2926
                                "port %d: %v", port, err)
×
2927
                }
×
2928
        }
2929
}
2930

2931
// watchExternalIP continuously checks for an updated external IP address every
2932
// 15 minutes. Once a new IP address has been detected, it will automatically
2933
// handle port forwarding rules and send updated node announcements to the
2934
// currently connected peers.
2935
//
2936
// NOTE: This MUST be run as a goroutine.
2937
func (s *server) watchExternalIP() {
×
2938
        defer s.wg.Done()
×
2939

×
2940
        // Before exiting, we'll make sure to remove the forwarding rules set
×
2941
        // up by the server.
×
2942
        defer s.removePortForwarding()
×
2943

×
2944
        // Keep track of the external IPs set by the user to avoid replacing
×
2945
        // them when detecting a new IP.
×
2946
        ipsSetByUser := make(map[string]struct{})
×
2947
        for _, ip := range s.cfg.ExternalIPs {
×
2948
                ipsSetByUser[ip.String()] = struct{}{}
×
2949
        }
×
2950

2951
        forwardedPorts := s.natTraversal.ForwardedPorts()
×
2952

×
2953
        ticker := time.NewTicker(15 * time.Minute)
×
2954
        defer ticker.Stop()
×
2955
out:
×
2956
        for {
×
2957
                select {
×
2958
                case <-ticker.C:
×
2959
                        // We'll start off by making sure a new IP address has
×
2960
                        // been detected.
×
2961
                        ip, err := s.natTraversal.ExternalIP()
×
2962
                        if err != nil {
×
2963
                                srvrLog.Debugf("Unable to retrieve the "+
×
2964
                                        "external IP address: %v", err)
×
2965
                                continue
×
2966
                        }
2967

2968
                        // Periodically renew the NAT port forwarding.
2969
                        for _, port := range forwardedPorts {
×
2970
                                err := s.natTraversal.AddPortMapping(port)
×
2971
                                if err != nil {
×
2972
                                        srvrLog.Warnf("Unable to automatically "+
×
2973
                                                "re-create port forwarding using %s: %v",
×
2974
                                                s.natTraversal.Name(), err)
×
2975
                                } else {
×
2976
                                        srvrLog.Debugf("Automatically re-created "+
×
2977
                                                "forwarding for port %d using %s to "+
×
2978
                                                "advertise external IP",
×
2979
                                                port, s.natTraversal.Name())
×
2980
                                }
×
2981
                        }
2982

2983
                        if ip.Equal(s.lastDetectedIP) {
×
2984
                                continue
×
2985
                        }
2986

2987
                        srvrLog.Infof("Detected new external IP address %s", ip)
×
2988

×
2989
                        // Next, we'll craft the new addresses that will be
×
2990
                        // included in the new node announcement and advertised
×
2991
                        // to the network. Each address will consist of the new
×
2992
                        // IP detected and one of the currently advertised
×
2993
                        // ports.
×
2994
                        var newAddrs []net.Addr
×
2995
                        for _, port := range forwardedPorts {
×
2996
                                hostIP := fmt.Sprintf("%v:%d", ip, port)
×
2997
                                addr, err := net.ResolveTCPAddr("tcp", hostIP)
×
2998
                                if err != nil {
×
2999
                                        srvrLog.Debugf("Unable to resolve "+
×
3000
                                                "host %v: %v", addr, err)
×
3001
                                        continue
×
3002
                                }
3003

3004
                                newAddrs = append(newAddrs, addr)
×
3005
                        }
3006

3007
                        // Skip the update if we weren't able to resolve any of
3008
                        // the new addresses.
3009
                        if len(newAddrs) == 0 {
×
3010
                                srvrLog.Debug("Skipping node announcement " +
×
3011
                                        "update due to not being able to " +
×
3012
                                        "resolve any new addresses")
×
3013
                                continue
×
3014
                        }
3015

3016
                        // Now, we'll need to update the addresses in our node's
3017
                        // announcement in order to propagate the update
3018
                        // throughout the network. We'll only include addresses
3019
                        // that have a different IP from the previous one, as
3020
                        // the previous IP is no longer valid.
3021
                        currentNodeAnn := s.getNodeAnnouncement()
×
3022

×
3023
                        for _, addr := range currentNodeAnn.Addresses {
×
3024
                                host, _, err := net.SplitHostPort(addr.String())
×
3025
                                if err != nil {
×
3026
                                        srvrLog.Debugf("Unable to determine "+
×
3027
                                                "host from address %v: %v",
×
3028
                                                addr, err)
×
3029
                                        continue
×
3030
                                }
3031

3032
                                // We'll also make sure to include external IPs
3033
                                // set manually by the user.
3034
                                _, setByUser := ipsSetByUser[addr.String()]
×
3035
                                if setByUser || host != s.lastDetectedIP.String() {
×
3036
                                        newAddrs = append(newAddrs, addr)
×
3037
                                }
×
3038
                        }
3039

3040
                        // Then, we'll generate a new timestamped node
3041
                        // announcement with the updated addresses and broadcast
3042
                        // it to our peers.
3043
                        newNodeAnn, err := s.genNodeAnnouncement(
×
3044
                                nil, netann.NodeAnnSetAddrs(newAddrs),
×
3045
                        )
×
3046
                        if err != nil {
×
3047
                                srvrLog.Debugf("Unable to generate new node "+
×
3048
                                        "announcement: %v", err)
×
3049
                                continue
×
3050
                        }
3051

3052
                        err = s.BroadcastMessage(nil, &newNodeAnn)
×
3053
                        if err != nil {
×
3054
                                srvrLog.Debugf("Unable to broadcast new node "+
×
3055
                                        "announcement to peers: %v", err)
×
3056
                                continue
×
3057
                        }
3058

3059
                        // Finally, update the last IP seen to the current one.
3060
                        s.lastDetectedIP = ip
×
3061
                case <-s.quit:
×
3062
                        break out
×
3063
                }
3064
        }
3065
}
3066

3067
// initNetworkBootstrappers initializes a set of network peer bootstrappers
3068
// based on the server, and currently active bootstrap mechanisms as defined
3069
// within the current configuration.
3070
func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) {
3✔
3071
        srvrLog.Infof("Initializing peer network bootstrappers!")
3✔
3072

3✔
3073
        var bootStrappers []discovery.NetworkPeerBootstrapper
3✔
3074

3✔
3075
        // First, we'll create an instance of the ChannelGraphBootstrapper as
3✔
3076
        // this can be used by default if we've already partially seeded the
3✔
3077
        // network.
3✔
3078
        chanGraph := autopilot.ChannelGraphFromDatabase(s.graphDB)
3✔
3079
        graphBootstrapper, err := discovery.NewGraphBootstrapper(
3✔
3080
                chanGraph, s.cfg.Bitcoin.IsLocalNetwork(),
3✔
3081
        )
3✔
3082
        if err != nil {
3✔
3083
                return nil, err
×
3084
        }
×
3085
        bootStrappers = append(bootStrappers, graphBootstrapper)
3✔
3086

3✔
3087
        // If this isn't using simnet or regtest mode, then one of our
3✔
3088
        // additional bootstrapping sources will be the set of running DNS
3✔
3089
        // seeds.
3✔
3090
        if !s.cfg.Bitcoin.IsLocalNetwork() {
3✔
3091
                //nolint:ll
×
3092
                dnsSeeds, ok := chainreg.ChainDNSSeeds[*s.cfg.ActiveNetParams.GenesisHash]
×
3093

×
3094
                // If we have a set of DNS seeds for this chain, then we'll add
×
3095
                // it as an additional bootstrapping source.
×
3096
                if ok {
×
3097
                        srvrLog.Infof("Creating DNS peer bootstrapper with "+
×
3098
                                "seeds: %v", dnsSeeds)
×
3099

×
3100
                        dnsBootStrapper := discovery.NewDNSSeedBootstrapper(
×
3101
                                dnsSeeds, s.cfg.net, s.cfg.ConnectionTimeout,
×
3102
                        )
×
3103
                        bootStrappers = append(bootStrappers, dnsBootStrapper)
×
3104
                }
×
3105
        }
3106

3107
        return bootStrappers, nil
3✔
3108
}
3109

3110
// createBootstrapIgnorePeers creates a map of peers that the bootstrap process
3111
// needs to ignore, which is made of three parts,
3112
//   - the node itself needs to be skipped as it doesn't make sense to connect
3113
//     to itself.
3114
//   - the peers that already have connections with, as in s.peersByPub.
3115
//   - the peers that we are attempting to connect, as in s.persistentPeers.
3116
func (s *server) createBootstrapIgnorePeers() map[autopilot.NodeID]struct{} {
3✔
3117
        s.mu.RLock()
3✔
3118
        defer s.mu.RUnlock()
3✔
3119

3✔
3120
        ignore := make(map[autopilot.NodeID]struct{})
3✔
3121

3✔
3122
        // We should ignore ourselves from bootstrapping.
3✔
3123
        selfKey := autopilot.NewNodeID(s.identityECDH.PubKey())
3✔
3124
        ignore[selfKey] = struct{}{}
3✔
3125

3✔
3126
        // Ignore all connected peers.
3✔
3127
        for _, peer := range s.peersByPub {
3✔
3128
                nID := autopilot.NewNodeID(peer.IdentityKey())
×
3129
                ignore[nID] = struct{}{}
×
3130
        }
×
3131

3132
        // Ignore all persistent peers as they have a dedicated reconnecting
3133
        // process.
3134
        for pubKeyStr := range s.persistentPeers {
3✔
3135
                var nID autopilot.NodeID
×
3136
                copy(nID[:], []byte(pubKeyStr))
×
3137
                ignore[nID] = struct{}{}
×
3138
        }
×
3139

3140
        return ignore
3✔
3141
}
3142

3143
// peerBootstrapper is a goroutine which is tasked with attempting to establish
3144
// and maintain a target minimum number of outbound connections. With this
3145
// invariant, we ensure that our node is connected to a diverse set of peers
3146
// and that nodes newly joining the network receive an up to date network view
3147
// as soon as possible.
3148
func (s *server) peerBootstrapper(ctx context.Context, numTargetPeers uint32,
3149
        bootstrappers []discovery.NetworkPeerBootstrapper) {
3✔
3150

3✔
3151
        defer s.wg.Done()
3✔
3152

3✔
3153
        // Before we continue, init the ignore peers map.
3✔
3154
        ignoreList := s.createBootstrapIgnorePeers()
3✔
3155

3✔
3156
        // We'll start off by aggressively attempting connections to peers in
3✔
3157
        // order to be a part of the network as soon as possible.
3✔
3158
        s.initialPeerBootstrap(ctx, ignoreList, numTargetPeers, bootstrappers)
3✔
3159

3✔
3160
        // Once done, we'll attempt to maintain our target minimum number of
3✔
3161
        // peers.
3✔
3162
        //
3✔
3163
        // We'll use a 15 second backoff, and double the time every time an
3✔
3164
        // epoch fails up to a ceiling.
3✔
3165
        backOff := time.Second * 15
3✔
3166

3✔
3167
        // We'll create a new ticker to wake us up every 15 seconds so we can
3✔
3168
        // see if we've reached our minimum number of peers.
3✔
3169
        sampleTicker := time.NewTicker(backOff)
3✔
3170
        defer sampleTicker.Stop()
3✔
3171

3✔
3172
        // We'll use the number of attempts and errors to determine if we need
3✔
3173
        // to increase the time between discovery epochs.
3✔
3174
        var epochErrors uint32 // To be used atomically.
3✔
3175
        var epochAttempts uint32
3✔
3176

3✔
3177
        for {
6✔
3178
                select {
3✔
3179
                // The ticker has just woken us up, so we'll need to check if
3180
                // we need to attempt to connect our to any more peers.
3181
                case <-sampleTicker.C:
×
3182
                        // Obtain the current number of peers, so we can gauge
×
3183
                        // if we need to sample more peers or not.
×
3184
                        s.mu.RLock()
×
3185
                        numActivePeers := uint32(len(s.peersByPub))
×
3186
                        s.mu.RUnlock()
×
3187

×
3188
                        // If we have enough peers, then we can loop back
×
3189
                        // around to the next round as we're done here.
×
3190
                        if numActivePeers >= numTargetPeers {
×
3191
                                continue
×
3192
                        }
3193

3194
                        // If all of our attempts failed during this last back
3195
                        // off period, then will increase our backoff to 5
3196
                        // minute ceiling to avoid an excessive number of
3197
                        // queries
3198
                        //
3199
                        // TODO(roasbeef): add reverse policy too?
3200

3201
                        if epochAttempts > 0 &&
×
3202
                                atomic.LoadUint32(&epochErrors) >= epochAttempts {
×
3203

×
3204
                                sampleTicker.Stop()
×
3205

×
3206
                                backOff *= 2
×
3207
                                if backOff > bootstrapBackOffCeiling {
×
3208
                                        backOff = bootstrapBackOffCeiling
×
3209
                                }
×
3210

3211
                                srvrLog.Debugf("Backing off peer bootstrapper to "+
×
3212
                                        "%v", backOff)
×
3213
                                sampleTicker = time.NewTicker(backOff)
×
3214
                                continue
×
3215
                        }
3216

3217
                        atomic.StoreUint32(&epochErrors, 0)
×
3218
                        epochAttempts = 0
×
3219

×
3220
                        // Since we know need more peers, we'll compute the
×
3221
                        // exact number we need to reach our threshold.
×
3222
                        numNeeded := numTargetPeers - numActivePeers
×
3223

×
3224
                        srvrLog.Debugf("Attempting to obtain %v more network "+
×
3225
                                "peers", numNeeded)
×
3226

×
3227
                        // With the number of peers we need calculated, we'll
×
3228
                        // query the network bootstrappers to sample a set of
×
3229
                        // random addrs for us.
×
3230
                        //
×
3231
                        // Before we continue, get a copy of the ignore peers
×
3232
                        // map.
×
3233
                        ignoreList = s.createBootstrapIgnorePeers()
×
3234

×
3235
                        peerAddrs, err := discovery.MultiSourceBootstrap(
×
3236
                                ctx, ignoreList, numNeeded*2, bootstrappers...,
×
3237
                        )
×
3238
                        if err != nil {
×
3239
                                srvrLog.Errorf("Unable to retrieve bootstrap "+
×
3240
                                        "peers: %v", err)
×
3241
                                continue
×
3242
                        }
3243

3244
                        // Finally, we'll launch a new goroutine for each
3245
                        // prospective peer candidates.
3246
                        for _, addr := range peerAddrs {
×
3247
                                epochAttempts++
×
3248

×
3249
                                go func(a *lnwire.NetAddress) {
×
3250
                                        // TODO(roasbeef): can do AS, subnet,
×
3251
                                        // country diversity, etc
×
3252
                                        errChan := make(chan error, 1)
×
3253
                                        s.connectToPeer(
×
3254
                                                a, errChan,
×
3255
                                                s.cfg.ConnectionTimeout,
×
3256
                                        )
×
3257
                                        select {
×
3258
                                        case err := <-errChan:
×
3259
                                                if err == nil {
×
3260
                                                        return
×
3261
                                                }
×
3262

3263
                                                srvrLog.Errorf("Unable to "+
×
3264
                                                        "connect to %v: %v",
×
3265
                                                        a, err)
×
3266
                                                atomic.AddUint32(&epochErrors, 1)
×
3267
                                        case <-s.quit:
×
3268
                                        }
3269
                                }(addr)
3270
                        }
3271
                case <-s.quit:
3✔
3272
                        return
3✔
3273
                }
3274
        }
3275
}
3276

3277
// bootstrapBackOffCeiling is the maximum amount of time we'll wait between
3278
// failed attempts to locate a set of bootstrap peers. We'll slowly double our
3279
// query back off each time we encounter a failure.
3280
const bootstrapBackOffCeiling = time.Minute * 5
3281

3282
// initialPeerBootstrap attempts to continuously connect to peers on startup
3283
// until the target number of peers has been reached. This ensures that nodes
3284
// receive an up to date network view as soon as possible.
3285
func (s *server) initialPeerBootstrap(ctx context.Context,
3286
        ignore map[autopilot.NodeID]struct{}, numTargetPeers uint32,
3287
        bootstrappers []discovery.NetworkPeerBootstrapper) {
3✔
3288

3✔
3289
        srvrLog.Debugf("Init bootstrap with targetPeers=%v, bootstrappers=%v, "+
3✔
3290
                "ignore=%v", numTargetPeers, len(bootstrappers), len(ignore))
3✔
3291

3✔
3292
        // We'll start off by waiting 2 seconds between failed attempts, then
3✔
3293
        // double each time we fail until we hit the bootstrapBackOffCeiling.
3✔
3294
        var delaySignal <-chan time.Time
3✔
3295
        delayTime := time.Second * 2
3✔
3296

3✔
3297
        // As want to be more aggressive, we'll use a lower back off celling
3✔
3298
        // then the main peer bootstrap logic.
3✔
3299
        backOffCeiling := bootstrapBackOffCeiling / 5
3✔
3300

3✔
3301
        for attempts := 0; ; attempts++ {
6✔
3302
                // Check if the server has been requested to shut down in order
3✔
3303
                // to prevent blocking.
3✔
3304
                if s.Stopped() {
3✔
3305
                        return
×
3306
                }
×
3307

3308
                // We can exit our aggressive initial peer bootstrapping stage
3309
                // if we've reached out target number of peers.
3310
                s.mu.RLock()
3✔
3311
                numActivePeers := uint32(len(s.peersByPub))
3✔
3312
                s.mu.RUnlock()
3✔
3313

3✔
3314
                if numActivePeers >= numTargetPeers {
6✔
3315
                        return
3✔
3316
                }
3✔
3317

3318
                if attempts > 0 {
3✔
UNCOV
3319
                        srvrLog.Debugf("Waiting %v before trying to locate "+
×
UNCOV
3320
                                "bootstrap peers (attempt #%v)", delayTime,
×
UNCOV
3321
                                attempts)
×
UNCOV
3322

×
UNCOV
3323
                        // We've completed at least one iterating and haven't
×
UNCOV
3324
                        // finished, so we'll start to insert a delay period
×
UNCOV
3325
                        // between each attempt.
×
UNCOV
3326
                        delaySignal = time.After(delayTime)
×
UNCOV
3327
                        select {
×
UNCOV
3328
                        case <-delaySignal:
×
3329
                        case <-s.quit:
×
3330
                                return
×
3331
                        }
3332

3333
                        // After our delay, we'll double the time we wait up to
3334
                        // the max back off period.
UNCOV
3335
                        delayTime *= 2
×
UNCOV
3336
                        if delayTime > backOffCeiling {
×
3337
                                delayTime = backOffCeiling
×
3338
                        }
×
3339
                }
3340

3341
                // Otherwise, we'll request for the remaining number of peers
3342
                // in order to reach our target.
3343
                peersNeeded := numTargetPeers - numActivePeers
3✔
3344
                bootstrapAddrs, err := discovery.MultiSourceBootstrap(
3✔
3345
                        ctx, ignore, peersNeeded, bootstrappers...,
3✔
3346
                )
3✔
3347
                if err != nil {
3✔
UNCOV
3348
                        srvrLog.Errorf("Unable to retrieve initial bootstrap "+
×
UNCOV
3349
                                "peers: %v", err)
×
UNCOV
3350
                        continue
×
3351
                }
3352

3353
                // Then, we'll attempt to establish a connection to the
3354
                // different peer addresses retrieved by our bootstrappers.
3355
                var wg sync.WaitGroup
3✔
3356
                for _, bootstrapAddr := range bootstrapAddrs {
6✔
3357
                        wg.Add(1)
3✔
3358
                        go func(addr *lnwire.NetAddress) {
6✔
3359
                                defer wg.Done()
3✔
3360

3✔
3361
                                errChan := make(chan error, 1)
3✔
3362
                                go s.connectToPeer(
3✔
3363
                                        addr, errChan, s.cfg.ConnectionTimeout,
3✔
3364
                                )
3✔
3365

3✔
3366
                                // We'll only allow this connection attempt to
3✔
3367
                                // take up to 3 seconds. This allows us to move
3✔
3368
                                // quickly by discarding peers that are slowing
3✔
3369
                                // us down.
3✔
3370
                                select {
3✔
3371
                                case err := <-errChan:
3✔
3372
                                        if err == nil {
6✔
3373
                                                return
3✔
3374
                                        }
3✔
3375
                                        srvrLog.Errorf("Unable to connect to "+
×
3376
                                                "%v: %v", addr, err)
×
3377
                                // TODO: tune timeout? 3 seconds might be *too*
3378
                                // aggressive but works well.
3379
                                case <-time.After(3 * time.Second):
×
3380
                                        srvrLog.Tracef("Skipping peer %v due "+
×
3381
                                                "to not establishing a "+
×
3382
                                                "connection within 3 seconds",
×
3383
                                                addr)
×
3384
                                case <-s.quit:
×
3385
                                }
3386
                        }(bootstrapAddr)
3387
                }
3388

3389
                wg.Wait()
3✔
3390
        }
3391
}
3392

3393
// createNewHiddenService automatically sets up a v2 or v3 onion service in
3394
// order to listen for inbound connections over Tor.
3395
func (s *server) createNewHiddenService(ctx context.Context) error {
×
3396
        // Determine the different ports the server is listening on. The onion
×
3397
        // service's virtual port will map to these ports and one will be picked
×
3398
        // at random when the onion service is being accessed.
×
3399
        listenPorts := make([]int, 0, len(s.listenAddrs))
×
3400
        for _, listenAddr := range s.listenAddrs {
×
3401
                port := listenAddr.(*net.TCPAddr).Port
×
3402
                listenPorts = append(listenPorts, port)
×
3403
        }
×
3404

3405
        encrypter, err := lnencrypt.KeyRingEncrypter(s.cc.KeyRing)
×
3406
        if err != nil {
×
3407
                return err
×
3408
        }
×
3409

3410
        // Once the port mapping has been set, we can go ahead and automatically
3411
        // create our onion service. The service's private key will be saved to
3412
        // disk in order to regain access to this service when restarting `lnd`.
3413
        onionCfg := tor.AddOnionConfig{
×
3414
                VirtualPort: defaultPeerPort,
×
3415
                TargetPorts: listenPorts,
×
3416
                Store: tor.NewOnionFile(
×
3417
                        s.cfg.Tor.PrivateKeyPath, 0600, s.cfg.Tor.EncryptKey,
×
3418
                        encrypter,
×
3419
                ),
×
3420
        }
×
3421

×
3422
        switch {
×
3423
        case s.cfg.Tor.V2:
×
3424
                onionCfg.Type = tor.V2
×
3425
        case s.cfg.Tor.V3:
×
3426
                onionCfg.Type = tor.V3
×
3427
        }
3428

3429
        addr, err := s.torController.AddOnion(onionCfg)
×
3430
        if err != nil {
×
3431
                return err
×
3432
        }
×
3433

3434
        // Now that the onion service has been created, we'll add the onion
3435
        // address it can be reached at to our list of advertised addresses.
3436
        newNodeAnn, err := s.genNodeAnnouncement(
×
3437
                nil, func(currentAnn *lnwire.NodeAnnouncement) {
×
3438
                        currentAnn.Addresses = append(currentAnn.Addresses, addr)
×
3439
                },
×
3440
        )
3441
        if err != nil {
×
3442
                return fmt.Errorf("unable to generate new node "+
×
3443
                        "announcement: %v", err)
×
3444
        }
×
3445

3446
        // Finally, we'll update the on-disk version of our announcement so it
3447
        // will eventually propagate to nodes in the network.
3448
        selfNode := &models.LightningNode{
×
3449
                HaveNodeAnnouncement: true,
×
3450
                LastUpdate:           time.Unix(int64(newNodeAnn.Timestamp), 0),
×
3451
                Addresses:            newNodeAnn.Addresses,
×
3452
                Alias:                newNodeAnn.Alias.String(),
×
3453
                Features: lnwire.NewFeatureVector(
×
3454
                        newNodeAnn.Features, lnwire.Features,
×
3455
                ),
×
3456
                Color:        newNodeAnn.RGBColor,
×
3457
                AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(),
×
3458
        }
×
3459
        copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed())
×
3460
        if err := s.graphDB.SetSourceNode(ctx, selfNode); err != nil {
×
3461
                return fmt.Errorf("can't set self node: %w", err)
×
3462
        }
×
3463

3464
        return nil
×
3465
}
3466

3467
// findChannel finds a channel given a public key and ChannelID. It is an
3468
// optimization that is quicker than seeking for a channel given only the
3469
// ChannelID.
3470
func (s *server) findChannel(node *btcec.PublicKey, chanID lnwire.ChannelID) (
3471
        *channeldb.OpenChannel, error) {
3✔
3472

3✔
3473
        nodeChans, err := s.chanStateDB.FetchOpenChannels(node)
3✔
3474
        if err != nil {
3✔
3475
                return nil, err
×
3476
        }
×
3477

3478
        for _, channel := range nodeChans {
6✔
3479
                if chanID.IsChanPoint(&channel.FundingOutpoint) {
6✔
3480
                        return channel, nil
3✔
3481
                }
3✔
3482
        }
3483

3484
        return nil, fmt.Errorf("unable to find channel")
3✔
3485
}
3486

3487
// getNodeAnnouncement fetches the current, fully signed node announcement.
3488
func (s *server) getNodeAnnouncement() lnwire.NodeAnnouncement {
3✔
3489
        s.mu.Lock()
3✔
3490
        defer s.mu.Unlock()
3✔
3491

3✔
3492
        return *s.currentNodeAnn
3✔
3493
}
3✔
3494

3495
// genNodeAnnouncement generates and returns the current fully signed node
3496
// announcement. The time stamp of the announcement will be updated in order
3497
// to ensure it propagates through the network.
3498
func (s *server) genNodeAnnouncement(features *lnwire.RawFeatureVector,
3499
        modifiers ...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error) {
3✔
3500

3✔
3501
        s.mu.Lock()
3✔
3502
        defer s.mu.Unlock()
3✔
3503

3✔
3504
        // Create a shallow copy of the current node announcement to work on.
3✔
3505
        // This ensures the original announcement remains unchanged
3✔
3506
        // until the new announcement is fully signed and valid.
3✔
3507
        newNodeAnn := *s.currentNodeAnn
3✔
3508

3✔
3509
        // First, try to update our feature manager with the updated set of
3✔
3510
        // features.
3✔
3511
        if features != nil {
6✔
3512
                proposedFeatures := map[feature.Set]*lnwire.RawFeatureVector{
3✔
3513
                        feature.SetNodeAnn: features,
3✔
3514
                }
3✔
3515
                err := s.featureMgr.UpdateFeatureSets(proposedFeatures)
3✔
3516
                if err != nil {
6✔
3517
                        return lnwire.NodeAnnouncement{}, err
3✔
3518
                }
3✔
3519

3520
                // If we could successfully update our feature manager, add
3521
                // an update modifier to include these new features to our
3522
                // set.
3523
                modifiers = append(
3✔
3524
                        modifiers, netann.NodeAnnSetFeatures(features),
3✔
3525
                )
3✔
3526
        }
3527

3528
        // Always update the timestamp when refreshing to ensure the update
3529
        // propagates.
3530
        modifiers = append(modifiers, netann.NodeAnnSetTimestamp)
3✔
3531

3✔
3532
        // Apply the requested changes to the node announcement.
3✔
3533
        for _, modifier := range modifiers {
6✔
3534
                modifier(&newNodeAnn)
3✔
3535
        }
3✔
3536

3537
        // Sign a new update after applying all of the passed modifiers.
3538
        err := netann.SignNodeAnnouncement(
3✔
3539
                s.nodeSigner, s.identityKeyLoc, &newNodeAnn,
3✔
3540
        )
3✔
3541
        if err != nil {
3✔
3542
                return lnwire.NodeAnnouncement{}, err
×
3543
        }
×
3544

3545
        // If signing succeeds, update the current announcement.
3546
        *s.currentNodeAnn = newNodeAnn
3✔
3547

3✔
3548
        return *s.currentNodeAnn, nil
3✔
3549
}
3550

3551
// updateAndBroadcastSelfNode generates a new node announcement
3552
// applying the giving modifiers and updating the time stamp
3553
// to ensure it propagates through the network. Then it broadcasts
3554
// it to the network.
3555
func (s *server) updateAndBroadcastSelfNode(ctx context.Context,
3556
        features *lnwire.RawFeatureVector,
3557
        modifiers ...netann.NodeAnnModifier) error {
3✔
3558

3✔
3559
        newNodeAnn, err := s.genNodeAnnouncement(features, modifiers...)
3✔
3560
        if err != nil {
6✔
3561
                return fmt.Errorf("unable to generate new node "+
3✔
3562
                        "announcement: %v", err)
3✔
3563
        }
3✔
3564

3565
        // Update the on-disk version of our announcement.
3566
        // Load and modify self node istead of creating anew instance so we
3567
        // don't risk overwriting any existing values.
3568
        selfNode, err := s.graphDB.SourceNode(ctx)
3✔
3569
        if err != nil {
3✔
3570
                return fmt.Errorf("unable to get current source node: %w", err)
×
3571
        }
×
3572

3573
        selfNode.HaveNodeAnnouncement = true
3✔
3574
        selfNode.LastUpdate = time.Unix(int64(newNodeAnn.Timestamp), 0)
3✔
3575
        selfNode.Addresses = newNodeAnn.Addresses
3✔
3576
        selfNode.Alias = newNodeAnn.Alias.String()
3✔
3577
        selfNode.Features = s.featureMgr.Get(feature.SetNodeAnn)
3✔
3578
        selfNode.Color = newNodeAnn.RGBColor
3✔
3579
        selfNode.AuthSigBytes = newNodeAnn.Signature.ToSignatureBytes()
3✔
3580

3✔
3581
        copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed())
3✔
3582

3✔
3583
        if err := s.graphDB.SetSourceNode(ctx, selfNode); err != nil {
3✔
3584
                return fmt.Errorf("can't set self node: %w", err)
×
3585
        }
×
3586

3587
        // Finally, propagate it to the nodes in the network.
3588
        err = s.BroadcastMessage(nil, &newNodeAnn)
3✔
3589
        if err != nil {
3✔
3590
                rpcsLog.Debugf("Unable to broadcast new node "+
×
3591
                        "announcement to peers: %v", err)
×
3592
                return err
×
3593
        }
×
3594

3595
        return nil
3✔
3596
}
3597

3598
type nodeAddresses struct {
3599
        pubKey    *btcec.PublicKey
3600
        addresses []net.Addr
3601
}
3602

3603
// establishPersistentConnections attempts to establish persistent connections
3604
// to all our direct channel collaborators. In order to promote liveness of our
3605
// active channels, we instruct the connection manager to attempt to establish
3606
// and maintain persistent connections to all our direct channel counterparties.
3607
func (s *server) establishPersistentConnections(ctx context.Context) error {
3✔
3608
        // nodeAddrsMap stores the combination of node public keys and addresses
3✔
3609
        // that we'll attempt to reconnect to. PubKey strings are used as keys
3✔
3610
        // since other PubKey forms can't be compared.
3✔
3611
        nodeAddrsMap := make(map[string]*nodeAddresses)
3✔
3612

3✔
3613
        // Iterate through the list of LinkNodes to find addresses we should
3✔
3614
        // attempt to connect to based on our set of previous connections. Set
3✔
3615
        // the reconnection port to the default peer port.
3✔
3616
        linkNodes, err := s.chanStateDB.LinkNodeDB().FetchAllLinkNodes()
3✔
3617
        if err != nil && !errors.Is(err, channeldb.ErrLinkNodesNotFound) {
3✔
3618
                return fmt.Errorf("failed to fetch all link nodes: %w", err)
×
3619
        }
×
3620

3621
        for _, node := range linkNodes {
6✔
3622
                pubStr := string(node.IdentityPub.SerializeCompressed())
3✔
3623
                nodeAddrs := &nodeAddresses{
3✔
3624
                        pubKey:    node.IdentityPub,
3✔
3625
                        addresses: node.Addresses,
3✔
3626
                }
3✔
3627
                nodeAddrsMap[pubStr] = nodeAddrs
3✔
3628
        }
3✔
3629

3630
        // After checking our previous connections for addresses to connect to,
3631
        // iterate through the nodes in our channel graph to find addresses
3632
        // that have been added via NodeAnnouncement messages.
3633
        // TODO(roasbeef): instead iterate over link nodes and query graph for
3634
        // each of the nodes.
3635
        graphAddrs := make(map[string]*nodeAddresses)
3✔
3636
        forEachSrcNodeChan := func(chanPoint wire.OutPoint,
3✔
3637
                havePolicy bool, channelPeer *models.LightningNode) error {
6✔
3638

3✔
3639
                // If the remote party has announced the channel to us, but we
3✔
3640
                // haven't yet, then we won't have a policy. However, we don't
3✔
3641
                // need this to connect to the peer, so we'll log it and move on.
3✔
3642
                if !havePolicy {
3✔
3643
                        srvrLog.Warnf("No channel policy found for "+
×
3644
                                "ChannelPoint(%v): ", chanPoint)
×
3645
                }
×
3646

3647
                pubStr := string(channelPeer.PubKeyBytes[:])
3✔
3648

3✔
3649
                // Add all unique addresses from channel
3✔
3650
                // graph/NodeAnnouncements to the list of addresses we'll
3✔
3651
                // connect to for this peer.
3✔
3652
                addrSet := make(map[string]net.Addr)
3✔
3653
                for _, addr := range channelPeer.Addresses {
6✔
3654
                        switch addr.(type) {
3✔
3655
                        case *net.TCPAddr:
3✔
3656
                                addrSet[addr.String()] = addr
3✔
3657

3658
                        // We'll only attempt to connect to Tor addresses if Tor
3659
                        // outbound support is enabled.
3660
                        case *tor.OnionAddr:
×
3661
                                if s.cfg.Tor.Active {
×
3662
                                        addrSet[addr.String()] = addr
×
3663
                                }
×
3664
                        }
3665
                }
3666

3667
                // If this peer is also recorded as a link node, we'll add any
3668
                // additional addresses that have not already been selected.
3669
                linkNodeAddrs, ok := nodeAddrsMap[pubStr]
3✔
3670
                if ok {
6✔
3671
                        for _, lnAddress := range linkNodeAddrs.addresses {
6✔
3672
                                switch lnAddress.(type) {
3✔
3673
                                case *net.TCPAddr:
3✔
3674
                                        addrSet[lnAddress.String()] = lnAddress
3✔
3675

3676
                                // We'll only attempt to connect to Tor
3677
                                // addresses if Tor outbound support is enabled.
3678
                                case *tor.OnionAddr:
×
3679
                                        if s.cfg.Tor.Active {
×
3680
                                                //nolint:ll
×
3681
                                                addrSet[lnAddress.String()] = lnAddress
×
3682
                                        }
×
3683
                                }
3684
                        }
3685
                }
3686

3687
                // Construct a slice of the deduped addresses.
3688
                var addrs []net.Addr
3✔
3689
                for _, addr := range addrSet {
6✔
3690
                        addrs = append(addrs, addr)
3✔
3691
                }
3✔
3692

3693
                n := &nodeAddresses{
3✔
3694
                        addresses: addrs,
3✔
3695
                }
3✔
3696
                n.pubKey, err = channelPeer.PubKey()
3✔
3697
                if err != nil {
3✔
3698
                        return err
×
3699
                }
×
3700

3701
                graphAddrs[pubStr] = n
3✔
3702
                return nil
3✔
3703
        }
3704
        err = s.graphDB.ForEachSourceNodeChannel(
3✔
3705
                ctx, forEachSrcNodeChan, func() {
6✔
3706
                        clear(graphAddrs)
3✔
3707
                },
3✔
3708
        )
3709
        if err != nil {
3✔
3710
                srvrLog.Errorf("Failed to iterate over source node channels: "+
×
3711
                        "%v", err)
×
3712

×
3713
                if !errors.Is(err, graphdb.ErrGraphNoEdgesFound) &&
×
3714
                        !errors.Is(err, graphdb.ErrEdgeNotFound) {
×
3715

×
3716
                        return err
×
3717
                }
×
3718
        }
3719

3720
        // Combine the addresses from the link nodes and the channel graph.
3721
        for pubStr, nodeAddr := range graphAddrs {
6✔
3722
                nodeAddrsMap[pubStr] = nodeAddr
3✔
3723
        }
3✔
3724

3725
        srvrLog.Debugf("Establishing %v persistent connections on start",
3✔
3726
                len(nodeAddrsMap))
3✔
3727

3✔
3728
        // Acquire and hold server lock until all persistent connection requests
3✔
3729
        // have been recorded and sent to the connection manager.
3✔
3730
        s.mu.Lock()
3✔
3731
        defer s.mu.Unlock()
3✔
3732

3✔
3733
        // Iterate through the combined list of addresses from prior links and
3✔
3734
        // node announcements and attempt to reconnect to each node.
3✔
3735
        var numOutboundConns int
3✔
3736
        for pubStr, nodeAddr := range nodeAddrsMap {
6✔
3737
                // Add this peer to the set of peers we should maintain a
3✔
3738
                // persistent connection with. We set the value to false to
3✔
3739
                // indicate that we should not continue to reconnect if the
3✔
3740
                // number of channels returns to zero, since this peer has not
3✔
3741
                // been requested as perm by the user.
3✔
3742
                s.persistentPeers[pubStr] = false
3✔
3743
                if _, ok := s.persistentPeersBackoff[pubStr]; !ok {
6✔
3744
                        s.persistentPeersBackoff[pubStr] = s.cfg.MinBackoff
3✔
3745
                }
3✔
3746

3747
                for _, address := range nodeAddr.addresses {
6✔
3748
                        // Create a wrapper address which couples the IP and
3✔
3749
                        // the pubkey so the brontide authenticated connection
3✔
3750
                        // can be established.
3✔
3751
                        lnAddr := &lnwire.NetAddress{
3✔
3752
                                IdentityKey: nodeAddr.pubKey,
3✔
3753
                                Address:     address,
3✔
3754
                        }
3✔
3755

3✔
3756
                        s.persistentPeerAddrs[pubStr] = append(
3✔
3757
                                s.persistentPeerAddrs[pubStr], lnAddr)
3✔
3758
                }
3✔
3759

3760
                // We'll connect to the first 10 peers immediately, then
3761
                // randomly stagger any remaining connections if the
3762
                // stagger initial reconnect flag is set. This ensures
3763
                // that mobile nodes or nodes with a small number of
3764
                // channels obtain connectivity quickly, but larger
3765
                // nodes are able to disperse the costs of connecting to
3766
                // all peers at once.
3767
                if numOutboundConns < numInstantInitReconnect ||
3✔
3768
                        !s.cfg.StaggerInitialReconnect {
6✔
3769

3✔
3770
                        go s.connectToPersistentPeer(pubStr)
3✔
3771
                } else {
3✔
3772
                        go s.delayInitialReconnect(pubStr)
×
3773
                }
×
3774

3775
                numOutboundConns++
3✔
3776
        }
3777

3778
        return nil
3✔
3779
}
3780

3781
// delayInitialReconnect will attempt a reconnection to the given peer after
3782
// sampling a value for the delay between 0s and the maxInitReconnectDelay.
3783
//
3784
// NOTE: This method MUST be run as a goroutine.
3785
func (s *server) delayInitialReconnect(pubStr string) {
×
3786
        delay := time.Duration(prand.Intn(maxInitReconnectDelay)) * time.Second
×
3787
        select {
×
3788
        case <-time.After(delay):
×
3789
                s.connectToPersistentPeer(pubStr)
×
3790
        case <-s.quit:
×
3791
        }
3792
}
3793

3794
// prunePersistentPeerConnection removes all internal state related to
3795
// persistent connections to a peer within the server. This is used to avoid
3796
// persistent connection retries to peers we do not have any open channels with.
3797
func (s *server) prunePersistentPeerConnection(compressedPubKey [33]byte) {
3✔
3798
        pubKeyStr := string(compressedPubKey[:])
3✔
3799

3✔
3800
        s.mu.Lock()
3✔
3801
        if perm, ok := s.persistentPeers[pubKeyStr]; ok && !perm {
6✔
3802
                delete(s.persistentPeers, pubKeyStr)
3✔
3803
                delete(s.persistentPeersBackoff, pubKeyStr)
3✔
3804
                delete(s.persistentPeerAddrs, pubKeyStr)
3✔
3805
                s.cancelConnReqs(pubKeyStr, nil)
3✔
3806
                s.mu.Unlock()
3✔
3807

3✔
3808
                srvrLog.Infof("Pruned peer %x from persistent connections, "+
3✔
3809
                        "peer has no open channels", compressedPubKey)
3✔
3810

3✔
3811
                return
3✔
3812
        }
3✔
3813
        s.mu.Unlock()
3✔
3814
}
3815

3816
// bannedPersistentPeerConnection does not actually "ban" a persistent peer. It
3817
// is instead used to remove persistent peer state for a peer that has been
3818
// disconnected for good cause by the server. Currently, a gossip ban from
3819
// sending garbage and the server running out of restricted-access
3820
// (i.e. "free") connection slots are the only way this logic gets hit. In the
3821
// future, this function may expand when more ban criteria is added.
3822
//
3823
// NOTE: The server's write lock MUST be held when this is called.
3824
func (s *server) bannedPersistentPeerConnection(remotePub string) {
×
3825
        if perm, ok := s.persistentPeers[remotePub]; ok && !perm {
×
3826
                delete(s.persistentPeers, remotePub)
×
3827
                delete(s.persistentPeersBackoff, remotePub)
×
3828
                delete(s.persistentPeerAddrs, remotePub)
×
3829
                s.cancelConnReqs(remotePub, nil)
×
3830
        }
×
3831
}
3832

3833
// BroadcastMessage sends a request to the server to broadcast a set of
3834
// messages to all peers other than the one specified by the `skips` parameter.
3835
// All messages sent via BroadcastMessage will be queued for lazy delivery to
3836
// the target peers.
3837
//
3838
// NOTE: This function is safe for concurrent access.
3839
func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
3840
        msgs ...lnwire.Message) error {
3✔
3841

3✔
3842
        // Filter out peers found in the skips map. We synchronize access to
3✔
3843
        // peersByPub throughout this process to ensure we deliver messages to
3✔
3844
        // exact set of peers present at the time of invocation.
3✔
3845
        s.mu.RLock()
3✔
3846
        peers := make([]*peer.Brontide, 0, len(s.peersByPub))
3✔
3847
        for pubStr, sPeer := range s.peersByPub {
6✔
3848
                if skips != nil {
6✔
3849
                        if _, ok := skips[sPeer.PubKey()]; ok {
6✔
3850
                                srvrLog.Tracef("Skipping %x in broadcast with "+
3✔
3851
                                        "pubStr=%x", sPeer.PubKey(), pubStr)
3✔
3852
                                continue
3✔
3853
                        }
3854
                }
3855

3856
                peers = append(peers, sPeer)
3✔
3857
        }
3858
        s.mu.RUnlock()
3✔
3859

3✔
3860
        // Iterate over all known peers, dispatching a go routine to enqueue
3✔
3861
        // all messages to each of peers.
3✔
3862
        var wg sync.WaitGroup
3✔
3863
        for _, sPeer := range peers {
6✔
3864
                srvrLog.Debugf("Sending %v messages to peer %x", len(msgs),
3✔
3865
                        sPeer.PubKey())
3✔
3866

3✔
3867
                // Dispatch a go routine to enqueue all messages to this peer.
3✔
3868
                wg.Add(1)
3✔
3869
                s.wg.Add(1)
3✔
3870
                go func(p lnpeer.Peer) {
6✔
3871
                        defer s.wg.Done()
3✔
3872
                        defer wg.Done()
3✔
3873

3✔
3874
                        p.SendMessageLazy(false, msgs...)
3✔
3875
                }(sPeer)
3✔
3876
        }
3877

3878
        // Wait for all messages to have been dispatched before returning to
3879
        // caller.
3880
        wg.Wait()
3✔
3881

3✔
3882
        return nil
3✔
3883
}
3884

3885
// NotifyWhenOnline can be called by other subsystems to get notified when a
3886
// particular peer comes online. The peer itself is sent across the peerChan.
3887
//
3888
// NOTE: This function is safe for concurrent access.
3889
func (s *server) NotifyWhenOnline(peerKey [33]byte,
3890
        peerChan chan<- lnpeer.Peer) {
3✔
3891

3✔
3892
        s.mu.Lock()
3✔
3893

3✔
3894
        // Compute the target peer's identifier.
3✔
3895
        pubStr := string(peerKey[:])
3✔
3896

3✔
3897
        // Check if peer is connected.
3✔
3898
        peer, ok := s.peersByPub[pubStr]
3✔
3899
        if ok {
6✔
3900
                // Unlock here so that the mutex isn't held while we are
3✔
3901
                // waiting for the peer to become active.
3✔
3902
                s.mu.Unlock()
3✔
3903

3✔
3904
                // Wait until the peer signals that it is actually active
3✔
3905
                // rather than only in the server's maps.
3✔
3906
                select {
3✔
3907
                case <-peer.ActiveSignal():
3✔
3908
                case <-peer.QuitSignal():
×
3909
                        // The peer quit, so we'll add the channel to the slice
×
3910
                        // and return.
×
3911
                        s.mu.Lock()
×
3912
                        s.peerConnectedListeners[pubStr] = append(
×
3913
                                s.peerConnectedListeners[pubStr], peerChan,
×
3914
                        )
×
3915
                        s.mu.Unlock()
×
3916
                        return
×
3917
                }
3918

3919
                // Connected, can return early.
3920
                srvrLog.Debugf("Notifying that peer %x is online", peerKey)
3✔
3921

3✔
3922
                select {
3✔
3923
                case peerChan <- peer:
3✔
UNCOV
3924
                case <-s.quit:
×
3925
                }
3926

3927
                return
3✔
3928
        }
3929

3930
        // Not connected, store this listener such that it can be notified when
3931
        // the peer comes online.
3932
        s.peerConnectedListeners[pubStr] = append(
3✔
3933
                s.peerConnectedListeners[pubStr], peerChan,
3✔
3934
        )
3✔
3935
        s.mu.Unlock()
3✔
3936
}
3937

3938
// NotifyWhenOffline delivers a notification to the caller of when the peer with
3939
// the given public key has been disconnected. The notification is signaled by
3940
// closing the channel returned.
3941
func (s *server) NotifyWhenOffline(peerPubKey [33]byte) <-chan struct{} {
3✔
3942
        s.mu.Lock()
3✔
3943
        defer s.mu.Unlock()
3✔
3944

3✔
3945
        c := make(chan struct{})
3✔
3946

3✔
3947
        // If the peer is already offline, we can immediately trigger the
3✔
3948
        // notification.
3✔
3949
        peerPubKeyStr := string(peerPubKey[:])
3✔
3950
        if _, ok := s.peersByPub[peerPubKeyStr]; !ok {
3✔
3951
                srvrLog.Debugf("Notifying that peer %x is offline", peerPubKey)
×
3952
                close(c)
×
3953
                return c
×
3954
        }
×
3955

3956
        // Otherwise, the peer is online, so we'll keep track of the channel to
3957
        // trigger the notification once the server detects the peer
3958
        // disconnects.
3959
        s.peerDisconnectedListeners[peerPubKeyStr] = append(
3✔
3960
                s.peerDisconnectedListeners[peerPubKeyStr], c,
3✔
3961
        )
3✔
3962

3✔
3963
        return c
3✔
3964
}
3965

3966
// FindPeer will return the peer that corresponds to the passed in public key.
3967
// This function is used by the funding manager, allowing it to update the
3968
// daemon's local representation of the remote peer.
3969
//
3970
// NOTE: This function is safe for concurrent access.
3971
func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer.Brontide, error) {
3✔
3972
        s.mu.RLock()
3✔
3973
        defer s.mu.RUnlock()
3✔
3974

3✔
3975
        pubStr := string(peerKey.SerializeCompressed())
3✔
3976

3✔
3977
        return s.findPeerByPubStr(pubStr)
3✔
3978
}
3✔
3979

3980
// FindPeerByPubStr will return the peer that corresponds to the passed peerID,
3981
// which should be a string representation of the peer's serialized, compressed
3982
// public key.
3983
//
3984
// NOTE: This function is safe for concurrent access.
3985
func (s *server) FindPeerByPubStr(pubStr string) (*peer.Brontide, error) {
3✔
3986
        s.mu.RLock()
3✔
3987
        defer s.mu.RUnlock()
3✔
3988

3✔
3989
        return s.findPeerByPubStr(pubStr)
3✔
3990
}
3✔
3991

3992
// findPeerByPubStr is an internal method that retrieves the specified peer from
3993
// the server's internal state using.
3994
func (s *server) findPeerByPubStr(pubStr string) (*peer.Brontide, error) {
3✔
3995
        peer, ok := s.peersByPub[pubStr]
3✔
3996
        if !ok {
6✔
3997
                return nil, ErrPeerNotConnected
3✔
3998
        }
3✔
3999

4000
        return peer, nil
3✔
4001
}
4002

4003
// nextPeerBackoff computes the next backoff duration for a peer's pubkey using
4004
// exponential backoff. If no previous backoff was known, the default is
4005
// returned.
4006
func (s *server) nextPeerBackoff(pubStr string,
4007
        startTime time.Time) time.Duration {
3✔
4008

3✔
4009
        // Now, determine the appropriate backoff to use for the retry.
3✔
4010
        backoff, ok := s.persistentPeersBackoff[pubStr]
3✔
4011
        if !ok {
6✔
4012
                // If an existing backoff was unknown, use the default.
3✔
4013
                return s.cfg.MinBackoff
3✔
4014
        }
3✔
4015

4016
        // If the peer failed to start properly, we'll just use the previous
4017
        // backoff to compute the subsequent randomized exponential backoff
4018
        // duration. This will roughly double on average.
4019
        if startTime.IsZero() {
3✔
4020
                return computeNextBackoff(backoff, s.cfg.MaxBackoff)
×
4021
        }
×
4022

4023
        // The peer succeeded in starting. If the connection didn't last long
4024
        // enough to be considered stable, we'll continue to back off retries
4025
        // with this peer.
4026
        connDuration := time.Since(startTime)
3✔
4027
        if connDuration < defaultStableConnDuration {
6✔
4028
                return computeNextBackoff(backoff, s.cfg.MaxBackoff)
3✔
4029
        }
3✔
4030

4031
        // The peer succeed in starting and this was stable peer, so we'll
4032
        // reduce the timeout duration by the length of the connection after
4033
        // applying randomized exponential backoff. We'll only apply this in the
4034
        // case that:
4035
        //   reb(curBackoff) - connDuration > cfg.MinBackoff
4036
        relaxedBackoff := computeNextBackoff(backoff, s.cfg.MaxBackoff) - connDuration
×
4037
        if relaxedBackoff > s.cfg.MinBackoff {
×
4038
                return relaxedBackoff
×
4039
        }
×
4040

4041
        // Lastly, if reb(currBackoff) - connDuration <= cfg.MinBackoff, meaning
4042
        // the stable connection lasted much longer than our previous backoff.
4043
        // To reward such good behavior, we'll reconnect after the default
4044
        // timeout.
4045
        return s.cfg.MinBackoff
×
4046
}
4047

4048
// shouldDropLocalConnection determines if our local connection to a remote peer
4049
// should be dropped in the case of concurrent connection establishment. In
4050
// order to deterministically decide which connection should be dropped, we'll
4051
// utilize the ordering of the local and remote public key. If we didn't use
4052
// such a tie breaker, then we risk _both_ connections erroneously being
4053
// dropped.
4054
func shouldDropLocalConnection(local, remote *btcec.PublicKey) bool {
×
4055
        localPubBytes := local.SerializeCompressed()
×
4056
        remotePubPbytes := remote.SerializeCompressed()
×
4057

×
4058
        // The connection that comes from the node with a "smaller" pubkey
×
4059
        // should be kept. Therefore, if our pubkey is "greater" than theirs, we
×
4060
        // should drop our established connection.
×
4061
        return bytes.Compare(localPubBytes, remotePubPbytes) > 0
×
4062
}
×
4063

4064
// InboundPeerConnected initializes a new peer in response to a new inbound
4065
// connection.
4066
//
4067
// NOTE: This function is safe for concurrent access.
4068
func (s *server) InboundPeerConnected(conn net.Conn) {
3✔
4069
        // Exit early if we have already been instructed to shutdown, this
3✔
4070
        // prevents any delayed callbacks from accidentally registering peers.
3✔
4071
        if s.Stopped() {
3✔
4072
                return
×
4073
        }
×
4074

4075
        nodePub := conn.(*brontide.Conn).RemotePub()
3✔
4076
        pubSer := nodePub.SerializeCompressed()
3✔
4077
        pubStr := string(pubSer)
3✔
4078

3✔
4079
        var pubBytes [33]byte
3✔
4080
        copy(pubBytes[:], pubSer)
3✔
4081

3✔
4082
        s.mu.Lock()
3✔
4083
        defer s.mu.Unlock()
3✔
4084

3✔
4085
        // If we already have an outbound connection to this peer, then ignore
3✔
4086
        // this new connection.
3✔
4087
        if p, ok := s.outboundPeers[pubStr]; ok {
6✔
4088
                srvrLog.Debugf("Already have outbound connection for %v, "+
3✔
4089
                        "ignoring inbound connection from local=%v, remote=%v",
3✔
4090
                        p, conn.LocalAddr(), conn.RemoteAddr())
3✔
4091

3✔
4092
                conn.Close()
3✔
4093
                return
3✔
4094
        }
3✔
4095

4096
        // If we already have a valid connection that is scheduled to take
4097
        // precedence once the prior peer has finished disconnecting, we'll
4098
        // ignore this connection.
4099
        if p, ok := s.scheduledPeerConnection[pubStr]; ok {
3✔
4100
                srvrLog.Debugf("Ignoring connection from %v, peer %v already "+
×
4101
                        "scheduled", conn.RemoteAddr(), p)
×
4102
                conn.Close()
×
4103
                return
×
4104
        }
×
4105

4106
        srvrLog.Infof("New inbound connection from %v", conn.RemoteAddr())
3✔
4107

3✔
4108
        // Check to see if we already have a connection with this peer. If so,
3✔
4109
        // we may need to drop our existing connection. This prevents us from
3✔
4110
        // having duplicate connections to the same peer. We forgo adding a
3✔
4111
        // default case as we expect these to be the only error values returned
3✔
4112
        // from findPeerByPubStr.
3✔
4113
        connectedPeer, err := s.findPeerByPubStr(pubStr)
3✔
4114
        switch err {
3✔
4115
        case ErrPeerNotConnected:
3✔
4116
                // We were unable to locate an existing connection with the
3✔
4117
                // target peer, proceed to connect.
3✔
4118
                s.cancelConnReqs(pubStr, nil)
3✔
4119
                s.peerConnected(conn, nil, true)
3✔
4120

4121
        case nil:
3✔
4122
                ctx := btclog.WithCtx(
3✔
4123
                        context.TODO(),
3✔
4124
                        lnutils.LogPubKey("peer", connectedPeer.IdentityKey()),
3✔
4125
                )
3✔
4126

3✔
4127
                // We already have a connection with the incoming peer. If the
3✔
4128
                // connection we've already established should be kept and is
3✔
4129
                // not of the same type of the new connection (inbound), then
3✔
4130
                // we'll close out the new connection s.t there's only a single
3✔
4131
                // connection between us.
3✔
4132
                localPub := s.identityECDH.PubKey()
3✔
4133
                if !connectedPeer.Inbound() &&
3✔
4134
                        !shouldDropLocalConnection(localPub, nodePub) {
3✔
4135

×
4136
                        srvrLog.WarnS(ctx, "Received inbound connection from "+
×
4137
                                "peer, but already have outbound "+
×
4138
                                "connection, dropping conn",
×
4139
                                fmt.Errorf("already have outbound conn"))
×
4140
                        conn.Close()
×
4141
                        return
×
4142
                }
×
4143

4144
                // Otherwise, if we should drop the connection, then we'll
4145
                // disconnect our already connected peer.
4146
                srvrLog.DebugS(ctx, "Disconnecting stale connection")
3✔
4147

3✔
4148
                s.cancelConnReqs(pubStr, nil)
3✔
4149

3✔
4150
                // Remove the current peer from the server's internal state and
3✔
4151
                // signal that the peer termination watcher does not need to
3✔
4152
                // execute for this peer.
3✔
4153
                s.removePeerUnsafe(ctx, connectedPeer)
3✔
4154
                s.ignorePeerTermination[connectedPeer] = struct{}{}
3✔
4155
                s.scheduledPeerConnection[pubStr] = func() {
6✔
4156
                        s.peerConnected(conn, nil, true)
3✔
4157
                }
3✔
4158
        }
4159
}
4160

4161
// OutboundPeerConnected initializes a new peer in response to a new outbound
4162
// connection.
4163
// NOTE: This function is safe for concurrent access.
4164
func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) {
3✔
4165
        // Exit early if we have already been instructed to shutdown, this
3✔
4166
        // prevents any delayed callbacks from accidentally registering peers.
3✔
4167
        if s.Stopped() {
3✔
4168
                return
×
4169
        }
×
4170

4171
        nodePub := conn.(*brontide.Conn).RemotePub()
3✔
4172
        pubSer := nodePub.SerializeCompressed()
3✔
4173
        pubStr := string(pubSer)
3✔
4174

3✔
4175
        var pubBytes [33]byte
3✔
4176
        copy(pubBytes[:], pubSer)
3✔
4177

3✔
4178
        s.mu.Lock()
3✔
4179
        defer s.mu.Unlock()
3✔
4180

3✔
4181
        // If we already have an inbound connection to this peer, then ignore
3✔
4182
        // this new connection.
3✔
4183
        if p, ok := s.inboundPeers[pubStr]; ok {
6✔
4184
                srvrLog.Debugf("Already have inbound connection for %v, "+
3✔
4185
                        "ignoring outbound connection from local=%v, remote=%v",
3✔
4186
                        p, conn.LocalAddr(), conn.RemoteAddr())
3✔
4187

3✔
4188
                if connReq != nil {
6✔
4189
                        s.connMgr.Remove(connReq.ID())
3✔
4190
                }
3✔
4191
                conn.Close()
3✔
4192
                return
3✔
4193
        }
4194
        if _, ok := s.persistentConnReqs[pubStr]; !ok && connReq != nil {
3✔
4195
                srvrLog.Debugf("Ignoring canceled outbound connection")
×
4196
                s.connMgr.Remove(connReq.ID())
×
4197
                conn.Close()
×
4198
                return
×
4199
        }
×
4200

4201
        // If we already have a valid connection that is scheduled to take
4202
        // precedence once the prior peer has finished disconnecting, we'll
4203
        // ignore this connection.
4204
        if _, ok := s.scheduledPeerConnection[pubStr]; ok {
3✔
4205
                srvrLog.Debugf("Ignoring connection, peer already scheduled")
×
4206

×
4207
                if connReq != nil {
×
4208
                        s.connMgr.Remove(connReq.ID())
×
4209
                }
×
4210

4211
                conn.Close()
×
4212
                return
×
4213
        }
4214

4215
        srvrLog.Infof("Established outbound connection to: %x@%v", pubStr,
3✔
4216
                conn.RemoteAddr())
3✔
4217

3✔
4218
        if connReq != nil {
6✔
4219
                // A successful connection was returned by the connmgr.
3✔
4220
                // Immediately cancel all pending requests, excluding the
3✔
4221
                // outbound connection we just established.
3✔
4222
                ignore := connReq.ID()
3✔
4223
                s.cancelConnReqs(pubStr, &ignore)
3✔
4224
        } else {
6✔
4225
                // This was a successful connection made by some other
3✔
4226
                // subsystem. Remove all requests being managed by the connmgr.
3✔
4227
                s.cancelConnReqs(pubStr, nil)
3✔
4228
        }
3✔
4229

4230
        // If we already have a connection with this peer, decide whether or not
4231
        // we need to drop the stale connection. We forgo adding a default case
4232
        // as we expect these to be the only error values returned from
4233
        // findPeerByPubStr.
4234
        connectedPeer, err := s.findPeerByPubStr(pubStr)
3✔
4235
        switch err {
3✔
4236
        case ErrPeerNotConnected:
3✔
4237
                // We were unable to locate an existing connection with the
3✔
4238
                // target peer, proceed to connect.
3✔
4239
                s.peerConnected(conn, connReq, false)
3✔
4240

4241
        case nil:
3✔
4242
                ctx := btclog.WithCtx(
3✔
4243
                        context.TODO(),
3✔
4244
                        lnutils.LogPubKey("peer", connectedPeer.IdentityKey()),
3✔
4245
                )
3✔
4246

3✔
4247
                // We already have a connection with the incoming peer. If the
3✔
4248
                // connection we've already established should be kept and is
3✔
4249
                // not of the same type of the new connection (outbound), then
3✔
4250
                // we'll close out the new connection s.t there's only a single
3✔
4251
                // connection between us.
3✔
4252
                localPub := s.identityECDH.PubKey()
3✔
4253
                if connectedPeer.Inbound() &&
3✔
4254
                        shouldDropLocalConnection(localPub, nodePub) {
3✔
4255

×
4256
                        srvrLog.WarnS(ctx, "Established outbound connection "+
×
4257
                                "to peer, but already have inbound "+
×
4258
                                "connection, dropping conn",
×
4259
                                fmt.Errorf("already have inbound conn"))
×
4260
                        if connReq != nil {
×
4261
                                s.connMgr.Remove(connReq.ID())
×
4262
                        }
×
4263
                        conn.Close()
×
4264
                        return
×
4265
                }
4266

4267
                // Otherwise, _their_ connection should be dropped. So we'll
4268
                // disconnect the peer and send the now obsolete peer to the
4269
                // server for garbage collection.
4270
                srvrLog.DebugS(ctx, "Disconnecting stale connection")
3✔
4271

3✔
4272
                // Remove the current peer from the server's internal state and
3✔
4273
                // signal that the peer termination watcher does not need to
3✔
4274
                // execute for this peer.
3✔
4275
                s.removePeerUnsafe(ctx, connectedPeer)
3✔
4276
                s.ignorePeerTermination[connectedPeer] = struct{}{}
3✔
4277
                s.scheduledPeerConnection[pubStr] = func() {
6✔
4278
                        s.peerConnected(conn, connReq, false)
3✔
4279
                }
3✔
4280
        }
4281
}
4282

4283
// UnassignedConnID is the default connection ID that a request can have before
4284
// it actually is submitted to the connmgr.
4285
// TODO(conner): move into connmgr package, or better, add connmgr method for
4286
// generating atomic IDs
4287
const UnassignedConnID uint64 = 0
4288

4289
// cancelConnReqs stops all persistent connection requests for a given pubkey.
4290
// Any attempts initiated by the peerTerminationWatcher are canceled first.
4291
// Afterwards, each connection request removed from the connmgr. The caller can
4292
// optionally specify a connection ID to ignore, which prevents us from
4293
// canceling a successful request. All persistent connreqs for the provided
4294
// pubkey are discarded after the operationjw.
4295
func (s *server) cancelConnReqs(pubStr string, skip *uint64) {
3✔
4296
        // First, cancel any lingering persistent retry attempts, which will
3✔
4297
        // prevent retries for any with backoffs that are still maturing.
3✔
4298
        if cancelChan, ok := s.persistentRetryCancels[pubStr]; ok {
6✔
4299
                close(cancelChan)
3✔
4300
                delete(s.persistentRetryCancels, pubStr)
3✔
4301
        }
3✔
4302

4303
        // Next, check to see if we have any outstanding persistent connection
4304
        // requests to this peer. If so, then we'll remove all of these
4305
        // connection requests, and also delete the entry from the map.
4306
        connReqs, ok := s.persistentConnReqs[pubStr]
3✔
4307
        if !ok {
6✔
4308
                return
3✔
4309
        }
3✔
4310

4311
        for _, connReq := range connReqs {
6✔
4312
                srvrLog.Tracef("Canceling %s:", connReqs)
3✔
4313

3✔
4314
                // Atomically capture the current request identifier.
3✔
4315
                connID := connReq.ID()
3✔
4316

3✔
4317
                // Skip any zero IDs, this indicates the request has not
3✔
4318
                // yet been schedule.
3✔
4319
                if connID == UnassignedConnID {
3✔
4320
                        continue
×
4321
                }
4322

4323
                // Skip a particular connection ID if instructed.
4324
                if skip != nil && connID == *skip {
6✔
4325
                        continue
3✔
4326
                }
4327

4328
                s.connMgr.Remove(connID)
3✔
4329
        }
4330

4331
        delete(s.persistentConnReqs, pubStr)
3✔
4332
}
4333

4334
// handleCustomMessage dispatches an incoming custom peers message to
4335
// subscribers.
4336
func (s *server) handleCustomMessage(peer [33]byte, msg *lnwire.Custom) error {
3✔
4337
        srvrLog.Debugf("Custom message received: peer=%x, type=%d",
3✔
4338
                peer, msg.Type)
3✔
4339

3✔
4340
        return s.customMessageServer.SendUpdate(&CustomMessage{
3✔
4341
                Peer: peer,
3✔
4342
                Msg:  msg,
3✔
4343
        })
3✔
4344
}
3✔
4345

4346
// SubscribeCustomMessages subscribes to a stream of incoming custom peer
4347
// messages.
4348
func (s *server) SubscribeCustomMessages() (*subscribe.Client, error) {
3✔
4349
        return s.customMessageServer.Subscribe()
3✔
4350
}
3✔
4351

4352
// notifyOpenChannelPeerEvent updates the access manager's maps and then calls
4353
// the channelNotifier's NotifyOpenChannelEvent.
4354
func (s *server) notifyOpenChannelPeerEvent(op wire.OutPoint,
4355
        remotePub *btcec.PublicKey) {
3✔
4356

3✔
4357
        // Call newOpenChan to update the access manager's maps for this peer.
3✔
4358
        if err := s.peerAccessMan.newOpenChan(remotePub); err != nil {
6✔
4359
                srvrLog.Errorf("Failed to update peer[%x] access status after "+
3✔
4360
                        "channel[%v] open", remotePub.SerializeCompressed(), op)
3✔
4361
        }
3✔
4362

4363
        // Notify subscribers about this open channel event.
4364
        s.channelNotifier.NotifyOpenChannelEvent(op)
3✔
4365
}
4366

4367
// notifyPendingOpenChannelPeerEvent updates the access manager's maps and then
4368
// calls the channelNotifier's NotifyPendingOpenChannelEvent.
4369
func (s *server) notifyPendingOpenChannelPeerEvent(op wire.OutPoint,
4370
        pendingChan *channeldb.OpenChannel, remotePub *btcec.PublicKey) {
3✔
4371

3✔
4372
        // Call newPendingOpenChan to update the access manager's maps for this
3✔
4373
        // peer.
3✔
4374
        if err := s.peerAccessMan.newPendingOpenChan(remotePub); err != nil {
3✔
4375
                srvrLog.Errorf("Failed to update peer[%x] access status after "+
×
4376
                        "channel[%v] pending open",
×
4377
                        remotePub.SerializeCompressed(), op)
×
4378
        }
×
4379

4380
        // Notify subscribers about this event.
4381
        s.channelNotifier.NotifyPendingOpenChannelEvent(op, pendingChan)
3✔
4382
}
4383

4384
// notifyFundingTimeoutPeerEvent updates the access manager's maps and then
4385
// calls the channelNotifier's NotifyFundingTimeout.
4386
func (s *server) notifyFundingTimeoutPeerEvent(op wire.OutPoint,
4387
        remotePub *btcec.PublicKey) {
3✔
4388

3✔
4389
        // Call newPendingCloseChan to potentially demote the peer.
3✔
4390
        err := s.peerAccessMan.newPendingCloseChan(remotePub)
3✔
4391
        if err != nil {
3✔
4392
                srvrLog.Errorf("Failed to update peer[%x] access status after "+
×
4393
                        "channel[%v] pending close",
×
4394
                        remotePub.SerializeCompressed(), op)
×
4395
        }
×
4396

4397
        if errors.Is(err, ErrNoMoreRestrictedAccessSlots) {
3✔
4398
                // If we encounter an error while attempting to disconnect the
×
4399
                // peer, log the error.
×
4400
                if dcErr := s.DisconnectPeer(remotePub); dcErr != nil {
×
4401
                        srvrLog.Errorf("Unable to disconnect peer: %v\n", err)
×
4402
                }
×
4403
        }
4404

4405
        // Notify subscribers about this event.
4406
        s.channelNotifier.NotifyFundingTimeout(op)
3✔
4407
}
4408

4409
// peerConnected is a function that handles initialization a newly connected
4410
// peer by adding it to the server's global list of all active peers, and
4411
// starting all the goroutines the peer needs to function properly. The inbound
4412
// boolean should be true if the peer initiated the connection to us.
4413
func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
4414
        inbound bool) {
3✔
4415

3✔
4416
        brontideConn := conn.(*brontide.Conn)
3✔
4417
        addr := conn.RemoteAddr()
3✔
4418
        pubKey := brontideConn.RemotePub()
3✔
4419

3✔
4420
        // Only restrict access for inbound connections, which means if the
3✔
4421
        // remote node's public key is banned or the restricted slots are used
3✔
4422
        // up, we will drop the connection.
3✔
4423
        //
3✔
4424
        // TODO(yy): Consider perform this check in
3✔
4425
        // `peerAccessMan.addPeerAccess`.
3✔
4426
        access, err := s.peerAccessMan.assignPeerPerms(pubKey)
3✔
4427
        if inbound && err != nil {
3✔
4428
                pubSer := pubKey.SerializeCompressed()
×
4429

×
4430
                // Clean up the persistent peer maps if we're dropping this
×
4431
                // connection.
×
4432
                s.bannedPersistentPeerConnection(string(pubSer))
×
4433

×
4434
                srvrLog.Debugf("Dropping connection for %x since we are out "+
×
4435
                        "of restricted-access connection slots: %v.", pubSer,
×
4436
                        err)
×
4437

×
4438
                conn.Close()
×
4439

×
4440
                return
×
4441
        }
×
4442

4443
        srvrLog.Infof("Finalizing connection to %x@%s, inbound=%v",
3✔
4444
                pubKey.SerializeCompressed(), addr, inbound)
3✔
4445

3✔
4446
        peerAddr := &lnwire.NetAddress{
3✔
4447
                IdentityKey: pubKey,
3✔
4448
                Address:     addr,
3✔
4449
                ChainNet:    s.cfg.ActiveNetParams.Net,
3✔
4450
        }
3✔
4451

3✔
4452
        // With the brontide connection established, we'll now craft the feature
3✔
4453
        // vectors to advertise to the remote node.
3✔
4454
        initFeatures := s.featureMgr.Get(feature.SetInit)
3✔
4455
        legacyFeatures := s.featureMgr.Get(feature.SetLegacyGlobal)
3✔
4456

3✔
4457
        // Lookup past error caches for the peer in the server. If no buffer is
3✔
4458
        // found, create a fresh buffer.
3✔
4459
        pkStr := string(peerAddr.IdentityKey.SerializeCompressed())
3✔
4460
        errBuffer, ok := s.peerErrors[pkStr]
3✔
4461
        if !ok {
6✔
4462
                var err error
3✔
4463
                errBuffer, err = queue.NewCircularBuffer(peer.ErrorBufferSize)
3✔
4464
                if err != nil {
3✔
4465
                        srvrLog.Errorf("unable to create peer %v", err)
×
4466
                        return
×
4467
                }
×
4468
        }
4469

4470
        // If we directly set the peer.Config TowerClient member to the
4471
        // s.towerClientMgr then in the case that the s.towerClientMgr is nil,
4472
        // the peer.Config's TowerClient member will not evaluate to nil even
4473
        // though the underlying value is nil. To avoid this gotcha which can
4474
        // cause a panic, we need to explicitly pass nil to the peer.Config's
4475
        // TowerClient if needed.
4476
        var towerClient wtclient.ClientManager
3✔
4477
        if s.towerClientMgr != nil {
6✔
4478
                towerClient = s.towerClientMgr
3✔
4479
        }
3✔
4480

4481
        thresholdSats := btcutil.Amount(s.cfg.MaxFeeExposure)
3✔
4482
        thresholdMSats := lnwire.NewMSatFromSatoshis(thresholdSats)
3✔
4483

3✔
4484
        // Now that we've established a connection, create a peer, and it to the
3✔
4485
        // set of currently active peers. Configure the peer with the incoming
3✔
4486
        // and outgoing broadcast deltas to prevent htlcs from being accepted or
3✔
4487
        // offered that would trigger channel closure. In case of outgoing
3✔
4488
        // htlcs, an extra block is added to prevent the channel from being
3✔
4489
        // closed when the htlc is outstanding and a new block comes in.
3✔
4490
        pCfg := peer.Config{
3✔
4491
                Conn:                    brontideConn,
3✔
4492
                ConnReq:                 connReq,
3✔
4493
                Addr:                    peerAddr,
3✔
4494
                Inbound:                 inbound,
3✔
4495
                Features:                initFeatures,
3✔
4496
                LegacyFeatures:          legacyFeatures,
3✔
4497
                OutgoingCltvRejectDelta: lncfg.DefaultOutgoingCltvRejectDelta,
3✔
4498
                ChanActiveTimeout:       s.cfg.ChanEnableTimeout,
3✔
4499
                ErrorBuffer:             errBuffer,
3✔
4500
                WritePool:               s.writePool,
3✔
4501
                ReadPool:                s.readPool,
3✔
4502
                Switch:                  s.htlcSwitch,
3✔
4503
                InterceptSwitch:         s.interceptableSwitch,
3✔
4504
                ChannelDB:               s.chanStateDB,
3✔
4505
                ChannelGraph:            s.graphDB,
3✔
4506
                ChainArb:                s.chainArb,
3✔
4507
                AuthGossiper:            s.authGossiper,
3✔
4508
                ChanStatusMgr:           s.chanStatusMgr,
3✔
4509
                ChainIO:                 s.cc.ChainIO,
3✔
4510
                FeeEstimator:            s.cc.FeeEstimator,
3✔
4511
                Signer:                  s.cc.Wallet.Cfg.Signer,
3✔
4512
                SigPool:                 s.sigPool,
3✔
4513
                Wallet:                  s.cc.Wallet,
3✔
4514
                ChainNotifier:           s.cc.ChainNotifier,
3✔
4515
                BestBlockView:           s.cc.BestBlockTracker,
3✔
4516
                RoutingPolicy:           s.cc.RoutingPolicy,
3✔
4517
                Sphinx:                  s.sphinx,
3✔
4518
                WitnessBeacon:           s.witnessBeacon,
3✔
4519
                Invoices:                s.invoices,
3✔
4520
                ChannelNotifier:         s.channelNotifier,
3✔
4521
                HtlcNotifier:            s.htlcNotifier,
3✔
4522
                TowerClient:             towerClient,
3✔
4523
                DisconnectPeer:          s.DisconnectPeer,
3✔
4524
                GenNodeAnnouncement: func(...netann.NodeAnnModifier) (
3✔
4525
                        lnwire.NodeAnnouncement, error) {
6✔
4526

3✔
4527
                        return s.genNodeAnnouncement(nil)
3✔
4528
                },
3✔
4529

4530
                PongBuf: s.pongBuf,
4531

4532
                PrunePersistentPeerConnection: s.prunePersistentPeerConnection,
4533

4534
                FetchLastChanUpdate: s.fetchLastChanUpdate(),
4535

4536
                FundingManager: s.fundingMgr,
4537

4538
                Hodl:                    s.cfg.Hodl,
4539
                UnsafeReplay:            s.cfg.UnsafeReplay,
4540
                MaxOutgoingCltvExpiry:   s.cfg.MaxOutgoingCltvExpiry,
4541
                MaxChannelFeeAllocation: s.cfg.MaxChannelFeeAllocation,
4542
                CoopCloseTargetConfs:    s.cfg.CoopCloseTargetConfs,
4543
                MaxAnchorsCommitFeeRate: chainfee.SatPerKVByte(
4544
                        s.cfg.MaxCommitFeeRateAnchors * 1000).FeePerKWeight(),
4545
                ChannelCommitInterval:  s.cfg.ChannelCommitInterval,
4546
                PendingCommitInterval:  s.cfg.PendingCommitInterval,
4547
                ChannelCommitBatchSize: s.cfg.ChannelCommitBatchSize,
4548
                HandleCustomMessage:    s.handleCustomMessage,
4549
                GetAliases:             s.aliasMgr.GetAliases,
4550
                RequestAlias:           s.aliasMgr.RequestAlias,
4551
                AddLocalAlias:          s.aliasMgr.AddLocalAlias,
4552
                DisallowRouteBlinding:  s.cfg.ProtocolOptions.NoRouteBlinding(),
4553
                DisallowQuiescence:     s.cfg.ProtocolOptions.NoQuiescence(),
4554
                QuiescenceTimeout:      s.cfg.Htlcswitch.QuiescenceTimeout,
4555
                MaxFeeExposure:         thresholdMSats,
4556
                Quit:                   s.quit,
4557
                AuxLeafStore:           s.implCfg.AuxLeafStore,
4558
                AuxSigner:              s.implCfg.AuxSigner,
4559
                MsgRouter:              s.implCfg.MsgRouter,
4560
                AuxChanCloser:          s.implCfg.AuxChanCloser,
4561
                AuxResolver:            s.implCfg.AuxContractResolver,
4562
                AuxTrafficShaper:       s.implCfg.TrafficShaper,
4563
                ShouldFwdExpEndorsement: func() bool {
3✔
4564
                        if s.cfg.ProtocolOptions.NoExperimentalEndorsement() {
6✔
4565
                                return false
3✔
4566
                        }
3✔
4567

4568
                        return clock.NewDefaultClock().Now().Before(
3✔
4569
                                EndorsementExperimentEnd,
3✔
4570
                        )
3✔
4571
                },
4572
                NoDisconnectOnPongFailure: s.cfg.NoDisconnectOnPongFailure,
4573
        }
4574

4575
        copy(pCfg.PubKeyBytes[:], peerAddr.IdentityKey.SerializeCompressed())
3✔
4576
        copy(pCfg.ServerPubKey[:], s.identityECDH.PubKey().SerializeCompressed())
3✔
4577

3✔
4578
        p := peer.NewBrontide(pCfg)
3✔
4579

3✔
4580
        // Update the access manager with the access permission for this peer.
3✔
4581
        s.peerAccessMan.addPeerAccess(pubKey, access, inbound)
3✔
4582

3✔
4583
        // TODO(roasbeef): update IP address for link-node
3✔
4584
        //  * also mark last-seen, do it one single transaction?
3✔
4585

3✔
4586
        s.addPeer(p)
3✔
4587

3✔
4588
        // Once we have successfully added the peer to the server, we can
3✔
4589
        // delete the previous error buffer from the server's map of error
3✔
4590
        // buffers.
3✔
4591
        delete(s.peerErrors, pkStr)
3✔
4592

3✔
4593
        // Dispatch a goroutine to asynchronously start the peer. This process
3✔
4594
        // includes sending and receiving Init messages, which would be a DOS
3✔
4595
        // vector if we held the server's mutex throughout the procedure.
3✔
4596
        s.wg.Add(1)
3✔
4597
        go s.peerInitializer(p)
3✔
4598
}
4599

4600
// addPeer adds the passed peer to the server's global state of all active
4601
// peers.
4602
func (s *server) addPeer(p *peer.Brontide) {
3✔
4603
        if p == nil {
3✔
4604
                return
×
4605
        }
×
4606

4607
        pubBytes := p.IdentityKey().SerializeCompressed()
3✔
4608

3✔
4609
        // Ignore new peers if we're shutting down.
3✔
4610
        if s.Stopped() {
3✔
4611
                srvrLog.Infof("Server stopped, skipped adding peer=%x",
×
4612
                        pubBytes)
×
4613
                p.Disconnect(ErrServerShuttingDown)
×
4614

×
4615
                return
×
4616
        }
×
4617

4618
        // Track the new peer in our indexes so we can quickly look it up either
4619
        // according to its public key, or its peer ID.
4620
        // TODO(roasbeef): pipe all requests through to the
4621
        // queryHandler/peerManager
4622

4623
        // NOTE: This pubStr is a raw bytes to string conversion and will NOT
4624
        // be human-readable.
4625
        pubStr := string(pubBytes)
3✔
4626

3✔
4627
        s.peersByPub[pubStr] = p
3✔
4628

3✔
4629
        if p.Inbound() {
6✔
4630
                s.inboundPeers[pubStr] = p
3✔
4631
        } else {
6✔
4632
                s.outboundPeers[pubStr] = p
3✔
4633
        }
3✔
4634

4635
        // Inform the peer notifier of a peer online event so that it can be reported
4636
        // to clients listening for peer events.
4637
        var pubKey [33]byte
3✔
4638
        copy(pubKey[:], pubBytes)
3✔
4639
}
4640

4641
// peerInitializer asynchronously starts a newly connected peer after it has
4642
// been added to the server's peer map. This method sets up a
4643
// peerTerminationWatcher for the given peer, and ensures that it executes even
4644
// if the peer failed to start. In the event of a successful connection, this
4645
// method reads the negotiated, local feature-bits and spawns the appropriate
4646
// graph synchronization method. Any registered clients of NotifyWhenOnline will
4647
// be signaled of the new peer once the method returns.
4648
//
4649
// NOTE: This MUST be launched as a goroutine.
4650
func (s *server) peerInitializer(p *peer.Brontide) {
3✔
4651
        defer s.wg.Done()
3✔
4652

3✔
4653
        pubBytes := p.IdentityKey().SerializeCompressed()
3✔
4654

3✔
4655
        // Avoid initializing peers while the server is exiting.
3✔
4656
        if s.Stopped() {
3✔
4657
                srvrLog.Infof("Server stopped, skipped initializing peer=%x",
×
4658
                        pubBytes)
×
4659
                return
×
4660
        }
×
4661

4662
        // Create a channel that will be used to signal a successful start of
4663
        // the link. This prevents the peer termination watcher from beginning
4664
        // its duty too early.
4665
        ready := make(chan struct{})
3✔
4666

3✔
4667
        // Before starting the peer, launch a goroutine to watch for the
3✔
4668
        // unexpected termination of this peer, which will ensure all resources
3✔
4669
        // are properly cleaned up, and re-establish persistent connections when
3✔
4670
        // necessary. The peer termination watcher will be short circuited if
3✔
4671
        // the peer is ever added to the ignorePeerTermination map, indicating
3✔
4672
        // that the server has already handled the removal of this peer.
3✔
4673
        s.wg.Add(1)
3✔
4674
        go s.peerTerminationWatcher(p, ready)
3✔
4675

3✔
4676
        // Start the peer! If an error occurs, we Disconnect the peer, which
3✔
4677
        // will unblock the peerTerminationWatcher.
3✔
4678
        if err := p.Start(); err != nil {
6✔
4679
                srvrLog.Warnf("Starting peer=%x got error: %v", pubBytes, err)
3✔
4680

3✔
4681
                p.Disconnect(fmt.Errorf("unable to start peer: %w", err))
3✔
4682
                return
3✔
4683
        }
3✔
4684

4685
        // Otherwise, signal to the peerTerminationWatcher that the peer startup
4686
        // was successful, and to begin watching the peer's wait group.
4687
        close(ready)
3✔
4688

3✔
4689
        s.mu.Lock()
3✔
4690
        defer s.mu.Unlock()
3✔
4691

3✔
4692
        // Check if there are listeners waiting for this peer to come online.
3✔
4693
        srvrLog.Debugf("Notifying that peer %v is online", p)
3✔
4694

3✔
4695
        // TODO(guggero): Do a proper conversion to a string everywhere, or use
3✔
4696
        // route.Vertex as the key type of peerConnectedListeners.
3✔
4697
        pubStr := string(pubBytes)
3✔
4698
        for _, peerChan := range s.peerConnectedListeners[pubStr] {
6✔
4699
                select {
3✔
4700
                case peerChan <- p:
3✔
4701
                case <-s.quit:
×
4702
                        return
×
4703
                }
4704
        }
4705
        delete(s.peerConnectedListeners, pubStr)
3✔
4706

3✔
4707
        // Since the peer has been fully initialized, now it's time to notify
3✔
4708
        // the RPC about the peer online event.
3✔
4709
        s.peerNotifier.NotifyPeerOnline([33]byte(pubBytes))
3✔
4710
}
4711

4712
// peerTerminationWatcher waits until a peer has been disconnected unexpectedly,
4713
// and then cleans up all resources allocated to the peer, notifies relevant
4714
// sub-systems of its demise, and finally handles re-connecting to the peer if
4715
// it's persistent. If the server intentionally disconnects a peer, it should
4716
// have a corresponding entry in the ignorePeerTermination map which will cause
4717
// the cleanup routine to exit early. The passed `ready` chan is used to
4718
// synchronize when WaitForDisconnect should begin watching on the peer's
4719
// waitgroup. The ready chan should only be signaled if the peer starts
4720
// successfully, otherwise the peer should be disconnected instead.
4721
//
4722
// NOTE: This MUST be launched as a goroutine.
4723
func (s *server) peerTerminationWatcher(p *peer.Brontide, ready chan struct{}) {
3✔
4724
        defer s.wg.Done()
3✔
4725

3✔
4726
        ctx := btclog.WithCtx(
3✔
4727
                context.TODO(), lnutils.LogPubKey("peer", p.IdentityKey()),
3✔
4728
        )
3✔
4729

3✔
4730
        p.WaitForDisconnect(ready)
3✔
4731

3✔
4732
        srvrLog.DebugS(ctx, "Peer has been disconnected")
3✔
4733

3✔
4734
        // If the server is exiting then we can bail out early ourselves as all
3✔
4735
        // the other sub-systems will already be shutting down.
3✔
4736
        if s.Stopped() {
6✔
4737
                srvrLog.DebugS(ctx, "Server quitting, exit early for peer")
3✔
4738
                return
3✔
4739
        }
3✔
4740

4741
        // Next, we'll cancel all pending funding reservations with this node.
4742
        // If we tried to initiate any funding flows that haven't yet finished,
4743
        // then we need to unlock those committed outputs so they're still
4744
        // available for use.
4745
        s.fundingMgr.CancelPeerReservations(p.PubKey())
3✔
4746

3✔
4747
        pubKey := p.IdentityKey()
3✔
4748

3✔
4749
        // We'll also inform the gossiper that this peer is no longer active,
3✔
4750
        // so we don't need to maintain sync state for it any longer.
3✔
4751
        s.authGossiper.PruneSyncState(p.PubKey())
3✔
4752

3✔
4753
        // Tell the switch to remove all links associated with this peer.
3✔
4754
        // Passing nil as the target link indicates that all links associated
3✔
4755
        // with this interface should be closed.
3✔
4756
        //
3✔
4757
        // TODO(roasbeef): instead add a PurgeInterfaceLinks function?
3✔
4758
        links, err := s.htlcSwitch.GetLinksByInterface(p.PubKey())
3✔
4759
        if err != nil && err != htlcswitch.ErrNoLinksFound {
3✔
4760
                srvrLog.Errorf("Unable to get channel links for %v: %v", p, err)
×
4761
        }
×
4762

4763
        for _, link := range links {
6✔
4764
                s.htlcSwitch.RemoveLink(link.ChanID())
3✔
4765
        }
3✔
4766

4767
        s.mu.Lock()
3✔
4768
        defer s.mu.Unlock()
3✔
4769

3✔
4770
        // If there were any notification requests for when this peer
3✔
4771
        // disconnected, we can trigger them now.
3✔
4772
        srvrLog.DebugS(ctx, "Notifying that peer is offline")
3✔
4773
        pubStr := string(pubKey.SerializeCompressed())
3✔
4774
        for _, offlineChan := range s.peerDisconnectedListeners[pubStr] {
6✔
4775
                close(offlineChan)
3✔
4776
        }
3✔
4777
        delete(s.peerDisconnectedListeners, pubStr)
3✔
4778

3✔
4779
        // If the server has already removed this peer, we can short circuit the
3✔
4780
        // peer termination watcher and skip cleanup.
3✔
4781
        if _, ok := s.ignorePeerTermination[p]; ok {
6✔
4782
                delete(s.ignorePeerTermination, p)
3✔
4783

3✔
4784
                pubKey := p.PubKey()
3✔
4785
                pubStr := string(pubKey[:])
3✔
4786

3✔
4787
                // If a connection callback is present, we'll go ahead and
3✔
4788
                // execute it now that previous peer has fully disconnected. If
3✔
4789
                // the callback is not present, this likely implies the peer was
3✔
4790
                // purposefully disconnected via RPC, and that no reconnect
3✔
4791
                // should be attempted.
3✔
4792
                connCallback, ok := s.scheduledPeerConnection[pubStr]
3✔
4793
                if ok {
6✔
4794
                        delete(s.scheduledPeerConnection, pubStr)
3✔
4795
                        connCallback()
3✔
4796
                }
3✔
4797
                return
3✔
4798
        }
4799

4800
        // First, cleanup any remaining state the server has regarding the peer
4801
        // in question.
4802
        s.removePeerUnsafe(ctx, p)
3✔
4803

3✔
4804
        // Next, check to see if this is a persistent peer or not.
3✔
4805
        if _, ok := s.persistentPeers[pubStr]; !ok {
6✔
4806
                return
3✔
4807
        }
3✔
4808

4809
        // Get the last address that we used to connect to the peer.
4810
        addrs := []net.Addr{
3✔
4811
                p.NetAddress().Address,
3✔
4812
        }
3✔
4813

3✔
4814
        // We'll ensure that we locate all the peers advertised addresses for
3✔
4815
        // reconnection purposes.
3✔
4816
        advertisedAddrs, err := s.fetchNodeAdvertisedAddrs(ctx, pubKey)
3✔
4817
        switch {
3✔
4818
        // We found advertised addresses, so use them.
4819
        case err == nil:
3✔
4820
                addrs = advertisedAddrs
3✔
4821

4822
        // The peer doesn't have an advertised address.
4823
        case err == errNoAdvertisedAddr:
3✔
4824
                // If it is an outbound peer then we fall back to the existing
3✔
4825
                // peer address.
3✔
4826
                if !p.Inbound() {
6✔
4827
                        break
3✔
4828
                }
4829

4830
                // Fall back to the existing peer address if
4831
                // we're not accepting connections over Tor.
4832
                if s.torController == nil {
6✔
4833
                        break
3✔
4834
                }
4835

4836
                // If we are, the peer's address won't be known
4837
                // to us (we'll see a private address, which is
4838
                // the address used by our onion service to dial
4839
                // to lnd), so we don't have enough information
4840
                // to attempt a reconnect.
4841
                srvrLog.DebugS(ctx, "Ignoring reconnection attempt "+
×
4842
                        "to inbound peer without advertised address")
×
4843
                return
×
4844

4845
        // We came across an error retrieving an advertised
4846
        // address, log it, and fall back to the existing peer
4847
        // address.
4848
        default:
3✔
4849
                srvrLog.ErrorS(ctx, "Unable to retrieve advertised "+
3✔
4850
                        "address for peer", err)
3✔
4851
        }
4852

4853
        // Make an easy lookup map so that we can check if an address
4854
        // is already in the address list that we have stored for this peer.
4855
        existingAddrs := make(map[string]bool)
3✔
4856
        for _, addr := range s.persistentPeerAddrs[pubStr] {
6✔
4857
                existingAddrs[addr.String()] = true
3✔
4858
        }
3✔
4859

4860
        // Add any missing addresses for this peer to persistentPeerAddr.
4861
        for _, addr := range addrs {
6✔
4862
                if existingAddrs[addr.String()] {
3✔
4863
                        continue
×
4864
                }
4865

4866
                s.persistentPeerAddrs[pubStr] = append(
3✔
4867
                        s.persistentPeerAddrs[pubStr],
3✔
4868
                        &lnwire.NetAddress{
3✔
4869
                                IdentityKey: p.IdentityKey(),
3✔
4870
                                Address:     addr,
3✔
4871
                                ChainNet:    p.NetAddress().ChainNet,
3✔
4872
                        },
3✔
4873
                )
3✔
4874
        }
4875

4876
        // Record the computed backoff in the backoff map.
4877
        backoff := s.nextPeerBackoff(pubStr, p.StartTime())
3✔
4878
        s.persistentPeersBackoff[pubStr] = backoff
3✔
4879

3✔
4880
        // Initialize a retry canceller for this peer if one does not
3✔
4881
        // exist.
3✔
4882
        cancelChan, ok := s.persistentRetryCancels[pubStr]
3✔
4883
        if !ok {
6✔
4884
                cancelChan = make(chan struct{})
3✔
4885
                s.persistentRetryCancels[pubStr] = cancelChan
3✔
4886
        }
3✔
4887

4888
        // We choose not to wait group this go routine since the Connect
4889
        // call can stall for arbitrarily long if we shutdown while an
4890
        // outbound connection attempt is being made.
4891
        go func() {
6✔
4892
                srvrLog.DebugS(ctx, "Scheduling connection "+
3✔
4893
                        "re-establishment to persistent peer",
3✔
4894
                        "reconnecting_in", backoff)
3✔
4895

3✔
4896
                select {
3✔
4897
                case <-time.After(backoff):
3✔
4898
                case <-cancelChan:
3✔
4899
                        return
3✔
4900
                case <-s.quit:
3✔
4901
                        return
3✔
4902
                }
4903

4904
                srvrLog.DebugS(ctx, "Attempting to re-establish persistent "+
3✔
4905
                        "connection")
3✔
4906

3✔
4907
                s.connectToPersistentPeer(pubStr)
3✔
4908
        }()
4909
}
4910

4911
// connectToPersistentPeer uses all the stored addresses for a peer to attempt
4912
// to connect to the peer. It creates connection requests if there are
4913
// currently none for a given address and it removes old connection requests
4914
// if the associated address is no longer in the latest address list for the
4915
// peer.
4916
func (s *server) connectToPersistentPeer(pubKeyStr string) {
3✔
4917
        s.mu.Lock()
3✔
4918
        defer s.mu.Unlock()
3✔
4919

3✔
4920
        // Create an easy lookup map of the addresses we have stored for the
3✔
4921
        // peer. We will remove entries from this map if we have existing
3✔
4922
        // connection requests for the associated address and then any leftover
3✔
4923
        // entries will indicate which addresses we should create new
3✔
4924
        // connection requests for.
3✔
4925
        addrMap := make(map[string]*lnwire.NetAddress)
3✔
4926
        for _, addr := range s.persistentPeerAddrs[pubKeyStr] {
6✔
4927
                addrMap[addr.String()] = addr
3✔
4928
        }
3✔
4929

4930
        // Go through each of the existing connection requests and
4931
        // check if they correspond to the latest set of addresses. If
4932
        // there is a connection requests that does not use one of the latest
4933
        // advertised addresses then remove that connection request.
4934
        var updatedConnReqs []*connmgr.ConnReq
3✔
4935
        for _, connReq := range s.persistentConnReqs[pubKeyStr] {
6✔
4936
                lnAddr := connReq.Addr.(*lnwire.NetAddress).Address.String()
3✔
4937

3✔
4938
                switch _, ok := addrMap[lnAddr]; ok {
3✔
4939
                // If the existing connection request is using one of the
4940
                // latest advertised addresses for the peer then we add it to
4941
                // updatedConnReqs and remove the associated address from
4942
                // addrMap so that we don't recreate this connReq later on.
4943
                case true:
×
4944
                        updatedConnReqs = append(
×
4945
                                updatedConnReqs, connReq,
×
4946
                        )
×
4947
                        delete(addrMap, lnAddr)
×
4948

4949
                // If the existing connection request is using an address that
4950
                // is not one of the latest advertised addresses for the peer
4951
                // then we remove the connecting request from the connection
4952
                // manager.
4953
                case false:
3✔
4954
                        srvrLog.Info(
3✔
4955
                                "Removing conn req:", connReq.Addr.String(),
3✔
4956
                        )
3✔
4957
                        s.connMgr.Remove(connReq.ID())
3✔
4958
                }
4959
        }
4960

4961
        s.persistentConnReqs[pubKeyStr] = updatedConnReqs
3✔
4962

3✔
4963
        cancelChan, ok := s.persistentRetryCancels[pubKeyStr]
3✔
4964
        if !ok {
6✔
4965
                cancelChan = make(chan struct{})
3✔
4966
                s.persistentRetryCancels[pubKeyStr] = cancelChan
3✔
4967
        }
3✔
4968

4969
        // Any addresses left in addrMap are new ones that we have not made
4970
        // connection requests for. So create new connection requests for those.
4971
        // If there is more than one address in the address map, stagger the
4972
        // creation of the connection requests for those.
4973
        go func() {
6✔
4974
                ticker := time.NewTicker(multiAddrConnectionStagger)
3✔
4975
                defer ticker.Stop()
3✔
4976

3✔
4977
                for _, addr := range addrMap {
6✔
4978
                        // Send the persistent connection request to the
3✔
4979
                        // connection manager, saving the request itself so we
3✔
4980
                        // can cancel/restart the process as needed.
3✔
4981
                        connReq := &connmgr.ConnReq{
3✔
4982
                                Addr:      addr,
3✔
4983
                                Permanent: true,
3✔
4984
                        }
3✔
4985

3✔
4986
                        s.mu.Lock()
3✔
4987
                        s.persistentConnReqs[pubKeyStr] = append(
3✔
4988
                                s.persistentConnReqs[pubKeyStr], connReq,
3✔
4989
                        )
3✔
4990
                        s.mu.Unlock()
3✔
4991

3✔
4992
                        srvrLog.Debugf("Attempting persistent connection to "+
3✔
4993
                                "channel peer %v", addr)
3✔
4994

3✔
4995
                        go s.connMgr.Connect(connReq)
3✔
4996

3✔
4997
                        select {
3✔
4998
                        case <-s.quit:
3✔
4999
                                return
3✔
5000
                        case <-cancelChan:
3✔
5001
                                return
3✔
5002
                        case <-ticker.C:
3✔
5003
                        }
5004
                }
5005
        }()
5006
}
5007

5008
// removePeerUnsafe removes the passed peer from the server's state of all
5009
// active peers.
5010
//
5011
// NOTE: Server mutex must be held when calling this function.
5012
func (s *server) removePeerUnsafe(ctx context.Context, p *peer.Brontide) {
3✔
5013
        if p == nil {
3✔
5014
                return
×
5015
        }
×
5016

5017
        srvrLog.DebugS(ctx, "Removing peer")
3✔
5018

3✔
5019
        // Exit early if we have already been instructed to shutdown, the peers
3✔
5020
        // will be disconnected in the server shutdown process.
3✔
5021
        if s.Stopped() {
3✔
5022
                return
×
5023
        }
×
5024

5025
        // Capture the peer's public key and string representation.
5026
        pKey := p.PubKey()
3✔
5027
        pubSer := pKey[:]
3✔
5028
        pubStr := string(pubSer)
3✔
5029

3✔
5030
        delete(s.peersByPub, pubStr)
3✔
5031

3✔
5032
        if p.Inbound() {
6✔
5033
                delete(s.inboundPeers, pubStr)
3✔
5034
        } else {
6✔
5035
                delete(s.outboundPeers, pubStr)
3✔
5036
        }
3✔
5037

5038
        // When removing the peer we make sure to disconnect it asynchronously
5039
        // to avoid blocking the main server goroutine because it is holding the
5040
        // server's mutex. Disconnecting the peer might block and wait until the
5041
        // peer has fully started up. This can happen if an inbound and outbound
5042
        // race condition occurs.
5043
        s.wg.Add(1)
3✔
5044
        go func() {
6✔
5045
                defer s.wg.Done()
3✔
5046

3✔
5047
                p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p))
3✔
5048

3✔
5049
                // If this peer had an active persistent connection request,
3✔
5050
                // remove it.
3✔
5051
                if p.ConnReq() != nil {
6✔
5052
                        s.connMgr.Remove(p.ConnReq().ID())
3✔
5053
                }
3✔
5054

5055
                // Remove the peer's access permission from the access manager.
5056
                peerPubStr := string(p.IdentityKey().SerializeCompressed())
3✔
5057
                s.peerAccessMan.removePeerAccess(ctx, peerPubStr)
3✔
5058

3✔
5059
                // Copy the peer's error buffer across to the server if it has
3✔
5060
                // any items in it so that we can restore peer errors across
3✔
5061
                // connections. We need to look up the error after the peer has
3✔
5062
                // been disconnected because we write the error in the
3✔
5063
                // `Disconnect` method.
3✔
5064
                s.mu.Lock()
3✔
5065
                if p.ErrorBuffer().Total() > 0 {
6✔
5066
                        s.peerErrors[pubStr] = p.ErrorBuffer()
3✔
5067
                }
3✔
5068
                s.mu.Unlock()
3✔
5069

3✔
5070
                // Inform the peer notifier of a peer offline event so that it
3✔
5071
                // can be reported to clients listening for peer events.
3✔
5072
                var pubKey [33]byte
3✔
5073
                copy(pubKey[:], pubSer)
3✔
5074

3✔
5075
                s.peerNotifier.NotifyPeerOffline(pubKey)
3✔
5076
        }()
5077
}
5078

5079
// ConnectToPeer requests that the server connect to a Lightning Network peer
5080
// at the specified address. This function will *block* until either a
5081
// connection is established, or the initial handshake process fails.
5082
//
5083
// NOTE: This function is safe for concurrent access.
5084
func (s *server) ConnectToPeer(addr *lnwire.NetAddress,
5085
        perm bool, timeout time.Duration) error {
3✔
5086

3✔
5087
        targetPub := string(addr.IdentityKey.SerializeCompressed())
3✔
5088

3✔
5089
        // Acquire mutex, but use explicit unlocking instead of defer for
3✔
5090
        // better granularity.  In certain conditions, this method requires
3✔
5091
        // making an outbound connection to a remote peer, which requires the
3✔
5092
        // lock to be released, and subsequently reacquired.
3✔
5093
        s.mu.Lock()
3✔
5094

3✔
5095
        // Ensure we're not already connected to this peer.
3✔
5096
        peer, err := s.findPeerByPubStr(targetPub)
3✔
5097

3✔
5098
        // When there's no error it means we already have a connection with this
3✔
5099
        // peer. If this is a dev environment with the `--unsafeconnect` flag
3✔
5100
        // set, we will ignore the existing connection and continue.
3✔
5101
        if err == nil && !s.cfg.Dev.GetUnsafeConnect() {
6✔
5102
                s.mu.Unlock()
3✔
5103
                return &errPeerAlreadyConnected{peer: peer}
3✔
5104
        }
3✔
5105

5106
        // Peer was not found, continue to pursue connection with peer.
5107

5108
        // If there's already a pending connection request for this pubkey,
5109
        // then we ignore this request to ensure we don't create a redundant
5110
        // connection.
5111
        if reqs, ok := s.persistentConnReqs[targetPub]; ok {
6✔
5112
                srvrLog.Warnf("Already have %d persistent connection "+
3✔
5113
                        "requests for %v, connecting anyway.", len(reqs), addr)
3✔
5114
        }
3✔
5115

5116
        // If there's not already a pending or active connection to this node,
5117
        // then instruct the connection manager to attempt to establish a
5118
        // persistent connection to the peer.
5119
        srvrLog.Debugf("Connecting to %v", addr)
3✔
5120
        if perm {
6✔
5121
                connReq := &connmgr.ConnReq{
3✔
5122
                        Addr:      addr,
3✔
5123
                        Permanent: true,
3✔
5124
                }
3✔
5125

3✔
5126
                // Since the user requested a permanent connection, we'll set
3✔
5127
                // the entry to true which will tell the server to continue
3✔
5128
                // reconnecting even if the number of channels with this peer is
3✔
5129
                // zero.
3✔
5130
                s.persistentPeers[targetPub] = true
3✔
5131
                if _, ok := s.persistentPeersBackoff[targetPub]; !ok {
6✔
5132
                        s.persistentPeersBackoff[targetPub] = s.cfg.MinBackoff
3✔
5133
                }
3✔
5134
                s.persistentConnReqs[targetPub] = append(
3✔
5135
                        s.persistentConnReqs[targetPub], connReq,
3✔
5136
                )
3✔
5137
                s.mu.Unlock()
3✔
5138

3✔
5139
                go s.connMgr.Connect(connReq)
3✔
5140

3✔
5141
                return nil
3✔
5142
        }
5143
        s.mu.Unlock()
3✔
5144

3✔
5145
        // If we're not making a persistent connection, then we'll attempt to
3✔
5146
        // connect to the target peer. If the we can't make the connection, or
3✔
5147
        // the crypto negotiation breaks down, then return an error to the
3✔
5148
        // caller.
3✔
5149
        errChan := make(chan error, 1)
3✔
5150
        s.connectToPeer(addr, errChan, timeout)
3✔
5151

3✔
5152
        select {
3✔
5153
        case err := <-errChan:
3✔
5154
                return err
3✔
5155
        case <-s.quit:
×
5156
                return ErrServerShuttingDown
×
5157
        }
5158
}
5159

5160
// connectToPeer establishes a connection to a remote peer. errChan is used to
5161
// notify the caller if the connection attempt has failed. Otherwise, it will be
5162
// closed.
5163
func (s *server) connectToPeer(addr *lnwire.NetAddress,
5164
        errChan chan<- error, timeout time.Duration) {
3✔
5165

3✔
5166
        conn, err := brontide.Dial(
3✔
5167
                s.identityECDH, addr, timeout, s.cfg.net.Dial,
3✔
5168
        )
3✔
5169
        if err != nil {
6✔
5170
                srvrLog.Errorf("Unable to connect to %v: %v", addr, err)
3✔
5171
                select {
3✔
5172
                case errChan <- err:
3✔
5173
                case <-s.quit:
×
5174
                }
5175
                return
3✔
5176
        }
5177

5178
        close(errChan)
3✔
5179

3✔
5180
        srvrLog.Tracef("Brontide dialer made local=%v, remote=%v",
3✔
5181
                conn.LocalAddr(), conn.RemoteAddr())
3✔
5182

3✔
5183
        s.OutboundPeerConnected(nil, conn)
3✔
5184
}
5185

5186
// DisconnectPeer sends the request to server to close the connection with peer
5187
// identified by public key.
5188
//
5189
// NOTE: This function is safe for concurrent access.
5190
func (s *server) DisconnectPeer(pubKey *btcec.PublicKey) error {
3✔
5191
        pubBytes := pubKey.SerializeCompressed()
3✔
5192
        pubStr := string(pubBytes)
3✔
5193

3✔
5194
        s.mu.Lock()
3✔
5195
        defer s.mu.Unlock()
3✔
5196

3✔
5197
        // Check that were actually connected to this peer. If not, then we'll
3✔
5198
        // exit in an error as we can't disconnect from a peer that we're not
3✔
5199
        // currently connected to.
3✔
5200
        peer, err := s.findPeerByPubStr(pubStr)
3✔
5201
        if err == ErrPeerNotConnected {
6✔
5202
                return fmt.Errorf("peer %x is not connected", pubBytes)
3✔
5203
        }
3✔
5204

5205
        srvrLog.Infof("Disconnecting from %v", peer)
3✔
5206

3✔
5207
        s.cancelConnReqs(pubStr, nil)
3✔
5208

3✔
5209
        // If this peer was formerly a persistent connection, then we'll remove
3✔
5210
        // them from this map so we don't attempt to re-connect after we
3✔
5211
        // disconnect.
3✔
5212
        delete(s.persistentPeers, pubStr)
3✔
5213
        delete(s.persistentPeersBackoff, pubStr)
3✔
5214

3✔
5215
        // Remove the peer by calling Disconnect. Previously this was done with
3✔
5216
        // removePeerUnsafe, which bypassed the peerTerminationWatcher.
3✔
5217
        //
3✔
5218
        // NOTE: We call it in a goroutine to avoid blocking the main server
3✔
5219
        // goroutine because we might hold the server's mutex.
3✔
5220
        go peer.Disconnect(fmt.Errorf("server: DisconnectPeer called"))
3✔
5221

3✔
5222
        return nil
3✔
5223
}
5224

5225
// OpenChannel sends a request to the server to open a channel to the specified
5226
// peer identified by nodeKey with the passed channel funding parameters.
5227
//
5228
// NOTE: This function is safe for concurrent access.
5229
func (s *server) OpenChannel(
5230
        req *funding.InitFundingMsg) (chan *lnrpc.OpenStatusUpdate, chan error) {
3✔
5231

3✔
5232
        // The updateChan will have a buffer of 2, since we expect a ChanPending
3✔
5233
        // + a ChanOpen update, and we want to make sure the funding process is
3✔
5234
        // not blocked if the caller is not reading the updates.
3✔
5235
        req.Updates = make(chan *lnrpc.OpenStatusUpdate, 2)
3✔
5236
        req.Err = make(chan error, 1)
3✔
5237

3✔
5238
        // First attempt to locate the target peer to open a channel with, if
3✔
5239
        // we're unable to locate the peer then this request will fail.
3✔
5240
        pubKeyBytes := req.TargetPubkey.SerializeCompressed()
3✔
5241
        s.mu.RLock()
3✔
5242
        peer, ok := s.peersByPub[string(pubKeyBytes)]
3✔
5243
        if !ok {
3✔
5244
                s.mu.RUnlock()
×
5245

×
5246
                req.Err <- fmt.Errorf("peer %x is not online", pubKeyBytes)
×
5247
                return req.Updates, req.Err
×
5248
        }
×
5249
        req.Peer = peer
3✔
5250
        s.mu.RUnlock()
3✔
5251

3✔
5252
        // We'll wait until the peer is active before beginning the channel
3✔
5253
        // opening process.
3✔
5254
        select {
3✔
5255
        case <-peer.ActiveSignal():
3✔
5256
        case <-peer.QuitSignal():
×
5257
                req.Err <- fmt.Errorf("peer %x disconnected", pubKeyBytes)
×
5258
                return req.Updates, req.Err
×
5259
        case <-s.quit:
×
5260
                req.Err <- ErrServerShuttingDown
×
5261
                return req.Updates, req.Err
×
5262
        }
5263

5264
        // If the fee rate wasn't specified at this point we fail the funding
5265
        // because of the missing fee rate information. The caller of the
5266
        // `OpenChannel` method needs to make sure that default values for the
5267
        // fee rate are set beforehand.
5268
        if req.FundingFeePerKw == 0 {
3✔
5269
                req.Err <- fmt.Errorf("no FundingFeePerKw specified for " +
×
5270
                        "the channel opening transaction")
×
5271

×
5272
                return req.Updates, req.Err
×
5273
        }
×
5274

5275
        // Spawn a goroutine to send the funding workflow request to the funding
5276
        // manager. This allows the server to continue handling queries instead
5277
        // of blocking on this request which is exported as a synchronous
5278
        // request to the outside world.
5279
        go s.fundingMgr.InitFundingWorkflow(req)
3✔
5280

3✔
5281
        return req.Updates, req.Err
3✔
5282
}
5283

5284
// Peers returns a slice of all active peers.
5285
//
5286
// NOTE: This function is safe for concurrent access.
5287
func (s *server) Peers() []*peer.Brontide {
3✔
5288
        s.mu.RLock()
3✔
5289
        defer s.mu.RUnlock()
3✔
5290

3✔
5291
        peers := make([]*peer.Brontide, 0, len(s.peersByPub))
3✔
5292
        for _, peer := range s.peersByPub {
6✔
5293
                peers = append(peers, peer)
3✔
5294
        }
3✔
5295

5296
        return peers
3✔
5297
}
5298

5299
// computeNextBackoff uses a truncated exponential backoff to compute the next
5300
// backoff using the value of the exiting backoff. The returned duration is
5301
// randomized in either direction by 1/20 to prevent tight loops from
5302
// stabilizing.
5303
func computeNextBackoff(currBackoff, maxBackoff time.Duration) time.Duration {
3✔
5304
        // Double the current backoff, truncating if it exceeds our maximum.
3✔
5305
        nextBackoff := 2 * currBackoff
3✔
5306
        if nextBackoff > maxBackoff {
6✔
5307
                nextBackoff = maxBackoff
3✔
5308
        }
3✔
5309

5310
        // Using 1/10 of our duration as a margin, compute a random offset to
5311
        // avoid the nodes entering connection cycles.
5312
        margin := nextBackoff / 10
3✔
5313

3✔
5314
        var wiggle big.Int
3✔
5315
        wiggle.SetUint64(uint64(margin))
3✔
5316
        if _, err := rand.Int(rand.Reader, &wiggle); err != nil {
3✔
5317
                // Randomizing is not mission critical, so we'll just return the
×
5318
                // current backoff.
×
5319
                return nextBackoff
×
5320
        }
×
5321

5322
        // Otherwise add in our wiggle, but subtract out half of the margin so
5323
        // that the backoff can tweaked by 1/20 in either direction.
5324
        return nextBackoff + (time.Duration(wiggle.Uint64()) - margin/2)
3✔
5325
}
5326

5327
// errNoAdvertisedAddr is an error returned when we attempt to retrieve the
5328
// advertised address of a node, but they don't have one.
5329
var errNoAdvertisedAddr = errors.New("no advertised address found")
5330

5331
// fetchNodeAdvertisedAddrs attempts to fetch the advertised addresses of a node.
5332
func (s *server) fetchNodeAdvertisedAddrs(ctx context.Context,
5333
        pub *btcec.PublicKey) ([]net.Addr, error) {
3✔
5334

3✔
5335
        vertex, err := route.NewVertexFromBytes(pub.SerializeCompressed())
3✔
5336
        if err != nil {
3✔
5337
                return nil, err
×
5338
        }
×
5339

5340
        node, err := s.graphDB.FetchLightningNode(ctx, vertex)
3✔
5341
        if err != nil {
6✔
5342
                return nil, err
3✔
5343
        }
3✔
5344

5345
        if len(node.Addresses) == 0 {
6✔
5346
                return nil, errNoAdvertisedAddr
3✔
5347
        }
3✔
5348

5349
        return node.Addresses, nil
3✔
5350
}
5351

5352
// fetchLastChanUpdate returns a function which is able to retrieve our latest
5353
// channel update for a target channel.
5354
func (s *server) fetchLastChanUpdate() func(lnwire.ShortChannelID) (
5355
        *lnwire.ChannelUpdate1, error) {
3✔
5356

3✔
5357
        ourPubKey := s.identityECDH.PubKey().SerializeCompressed()
3✔
5358
        return func(cid lnwire.ShortChannelID) (*lnwire.ChannelUpdate1, error) {
6✔
5359
                info, edge1, edge2, err := s.graphBuilder.GetChannelByID(cid)
3✔
5360
                if err != nil {
6✔
5361
                        return nil, err
3✔
5362
                }
3✔
5363

5364
                return netann.ExtractChannelUpdate(
3✔
5365
                        ourPubKey[:], info, edge1, edge2,
3✔
5366
                )
3✔
5367
        }
5368
}
5369

5370
// applyChannelUpdate applies the channel update to the different sub-systems of
5371
// the server. The useAlias boolean denotes whether or not to send an alias in
5372
// place of the real SCID.
5373
func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate1,
5374
        op *wire.OutPoint, useAlias bool) error {
3✔
5375

3✔
5376
        var (
3✔
5377
                peerAlias    *lnwire.ShortChannelID
3✔
5378
                defaultAlias lnwire.ShortChannelID
3✔
5379
        )
3✔
5380

3✔
5381
        chanID := lnwire.NewChanIDFromOutPoint(*op)
3✔
5382

3✔
5383
        // Fetch the peer's alias from the lnwire.ChannelID so it can be used
3✔
5384
        // in the ChannelUpdate if it hasn't been announced yet.
3✔
5385
        if useAlias {
6✔
5386
                foundAlias, _ := s.aliasMgr.GetPeerAlias(chanID)
3✔
5387
                if foundAlias != defaultAlias {
6✔
5388
                        peerAlias = &foundAlias
3✔
5389
                }
3✔
5390
        }
5391

5392
        errChan := s.authGossiper.ProcessLocalAnnouncement(
3✔
5393
                update, discovery.RemoteAlias(peerAlias),
3✔
5394
        )
3✔
5395
        select {
3✔
5396
        case err := <-errChan:
3✔
5397
                return err
3✔
5398
        case <-s.quit:
×
5399
                return ErrServerShuttingDown
×
5400
        }
5401
}
5402

5403
// SendCustomMessage sends a custom message to the peer with the specified
5404
// pubkey.
5405
func (s *server) SendCustomMessage(peerPub [33]byte, msgType lnwire.MessageType,
5406
        data []byte) error {
3✔
5407

3✔
5408
        peer, err := s.FindPeerByPubStr(string(peerPub[:]))
3✔
5409
        if err != nil {
6✔
5410
                return err
3✔
5411
        }
3✔
5412

5413
        // We'll wait until the peer is active.
5414
        select {
3✔
5415
        case <-peer.ActiveSignal():
3✔
5416
        case <-peer.QuitSignal():
×
5417
                return fmt.Errorf("peer %x disconnected", peerPub)
×
5418
        case <-s.quit:
×
5419
                return ErrServerShuttingDown
×
5420
        }
5421

5422
        msg, err := lnwire.NewCustom(msgType, data)
3✔
5423
        if err != nil {
6✔
5424
                return err
3✔
5425
        }
3✔
5426

5427
        // Send the message as low-priority. For now we assume that all
5428
        // application-defined message are low priority.
5429
        return peer.SendMessageLazy(true, msg)
3✔
5430
}
5431

5432
// newSweepPkScriptGen creates closure that generates a new public key script
5433
// which should be used to sweep any funds into the on-chain wallet.
5434
// Specifically, the script generated is a version 0, pay-to-witness-pubkey-hash
5435
// (p2wkh) output.
5436
func newSweepPkScriptGen(
5437
        wallet lnwallet.WalletController,
5438
        netParams *chaincfg.Params) func() fn.Result[lnwallet.AddrWithKey] {
3✔
5439

3✔
5440
        return func() fn.Result[lnwallet.AddrWithKey] {
6✔
5441
                sweepAddr, err := wallet.NewAddress(
3✔
5442
                        lnwallet.TaprootPubkey, false,
3✔
5443
                        lnwallet.DefaultAccountName,
3✔
5444
                )
3✔
5445
                if err != nil {
3✔
5446
                        return fn.Err[lnwallet.AddrWithKey](err)
×
5447
                }
×
5448

5449
                addr, err := txscript.PayToAddrScript(sweepAddr)
3✔
5450
                if err != nil {
3✔
5451
                        return fn.Err[lnwallet.AddrWithKey](err)
×
5452
                }
×
5453

5454
                internalKeyDesc, err := lnwallet.InternalKeyForAddr(
3✔
5455
                        wallet, netParams, addr,
3✔
5456
                )
3✔
5457
                if err != nil {
3✔
5458
                        return fn.Err[lnwallet.AddrWithKey](err)
×
5459
                }
×
5460

5461
                return fn.Ok(lnwallet.AddrWithKey{
3✔
5462
                        DeliveryAddress: addr,
3✔
5463
                        InternalKey:     internalKeyDesc,
3✔
5464
                })
3✔
5465
        }
5466
}
5467

5468
// fetchClosedChannelSCIDs returns a set of SCIDs that have their force closing
5469
// finished.
5470
func (s *server) fetchClosedChannelSCIDs() map[lnwire.ShortChannelID]struct{} {
3✔
5471
        // Get a list of closed channels.
3✔
5472
        channels, err := s.chanStateDB.FetchClosedChannels(false)
3✔
5473
        if err != nil {
3✔
5474
                srvrLog.Errorf("Failed to fetch closed channels: %v", err)
×
5475
                return nil
×
5476
        }
×
5477

5478
        // Save the SCIDs in a map.
5479
        closedSCIDs := make(map[lnwire.ShortChannelID]struct{}, len(channels))
3✔
5480
        for _, c := range channels {
6✔
5481
                // If the channel is not pending, its FC has been finalized.
3✔
5482
                if !c.IsPending {
6✔
5483
                        closedSCIDs[c.ShortChanID] = struct{}{}
3✔
5484
                }
3✔
5485
        }
5486

5487
        // Double check whether the reported closed channel has indeed finished
5488
        // closing.
5489
        //
5490
        // NOTE: There are misalignments regarding when a channel's FC is
5491
        // marked as finalized. We double check the pending channels to make
5492
        // sure the returned SCIDs are indeed terminated.
5493
        //
5494
        // TODO(yy): fix the misalignments in `FetchClosedChannels`.
5495
        pendings, err := s.chanStateDB.FetchPendingChannels()
3✔
5496
        if err != nil {
3✔
5497
                srvrLog.Errorf("Failed to fetch pending channels: %v", err)
×
5498
                return nil
×
5499
        }
×
5500

5501
        for _, c := range pendings {
6✔
5502
                if _, ok := closedSCIDs[c.ShortChannelID]; !ok {
6✔
5503
                        continue
3✔
5504
                }
5505

5506
                // If the channel is still reported as pending, remove it from
5507
                // the map.
5508
                delete(closedSCIDs, c.ShortChannelID)
×
5509

×
5510
                srvrLog.Warnf("Channel=%v is prematurely marked as finalized",
×
5511
                        c.ShortChannelID)
×
5512
        }
5513

5514
        return closedSCIDs
3✔
5515
}
5516

5517
// getStartingBeat returns the current beat. This is used during the startup to
5518
// initialize blockbeat consumers.
5519
func (s *server) getStartingBeat() (*chainio.Beat, error) {
3✔
5520
        // beat is the current blockbeat.
3✔
5521
        var beat *chainio.Beat
3✔
5522

3✔
5523
        // If the node is configured with nochainbackend mode (remote signer),
3✔
5524
        // we will skip fetching the best block.
3✔
5525
        if s.cfg.Bitcoin.Node == "nochainbackend" {
3✔
5526
                srvrLog.Info("Skipping block notification for nochainbackend " +
×
5527
                        "mode")
×
5528

×
5529
                return &chainio.Beat{}, nil
×
5530
        }
×
5531

5532
        // We should get a notification with the current best block immediately
5533
        // by passing a nil block.
5534
        blockEpochs, err := s.cc.ChainNotifier.RegisterBlockEpochNtfn(nil)
3✔
5535
        if err != nil {
3✔
5536
                return beat, fmt.Errorf("register block epoch ntfn: %w", err)
×
5537
        }
×
5538
        defer blockEpochs.Cancel()
3✔
5539

3✔
5540
        // We registered for the block epochs with a nil request. The notifier
3✔
5541
        // should send us the current best block immediately. So we need to
3✔
5542
        // wait for it here because we need to know the current best height.
3✔
5543
        select {
3✔
5544
        case bestBlock := <-blockEpochs.Epochs:
3✔
5545
                srvrLog.Infof("Received initial block %v at height %d",
3✔
5546
                        bestBlock.Hash, bestBlock.Height)
3✔
5547

3✔
5548
                // Update the current blockbeat.
3✔
5549
                beat = chainio.NewBeat(*bestBlock)
3✔
5550

5551
        case <-s.quit:
×
5552
                srvrLog.Debug("LND shutting down")
×
5553
        }
5554

5555
        return beat, nil
3✔
5556
}
5557

5558
// ChanHasRbfCoopCloser returns true if the channel as identifier by the channel
5559
// point has an active RBF chan closer.
5560
func (s *server) ChanHasRbfCoopCloser(peerPub *btcec.PublicKey,
5561
        chanPoint wire.OutPoint) bool {
3✔
5562

3✔
5563
        pubBytes := peerPub.SerializeCompressed()
3✔
5564

3✔
5565
        s.mu.RLock()
3✔
5566
        targetPeer, ok := s.peersByPub[string(pubBytes)]
3✔
5567
        s.mu.RUnlock()
3✔
5568
        if !ok {
3✔
5569
                return false
×
5570
        }
×
5571

5572
        return targetPeer.ChanHasRbfCoopCloser(chanPoint)
3✔
5573
}
5574

5575
// attemptCoopRbfFeeBump attempts to look up the active chan closer for a
5576
// channel given the outpoint. If found, we'll attempt to do a fee bump,
5577
// returning channels used for updates. If the channel isn't currently active
5578
// (p2p connection established), then his function will return an error.
5579
func (s *server) attemptCoopRbfFeeBump(ctx context.Context,
5580
        chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
5581
        deliveryScript lnwire.DeliveryAddress) (*peer.CoopCloseUpdates, error) {
3✔
5582

3✔
5583
        // First, we'll attempt to look up the channel based on it's
3✔
5584
        // ChannelPoint.
3✔
5585
        channel, err := s.chanStateDB.FetchChannel(chanPoint)
3✔
5586
        if err != nil {
3✔
5587
                return nil, fmt.Errorf("unable to fetch channel: %w", err)
×
5588
        }
×
5589

5590
        // From the channel, we can now get the pubkey of the peer, then use
5591
        // that to eventually get the chan closer.
5592
        peerPub := channel.IdentityPub.SerializeCompressed()
3✔
5593

3✔
5594
        // Now that we have the peer pub, we can look up the peer itself.
3✔
5595
        s.mu.RLock()
3✔
5596
        targetPeer, ok := s.peersByPub[string(peerPub)]
3✔
5597
        s.mu.RUnlock()
3✔
5598
        if !ok {
3✔
5599
                return nil, fmt.Errorf("peer for ChannelPoint(%v) is "+
×
5600
                        "not online", chanPoint)
×
5601
        }
×
5602

5603
        closeUpdates, err := targetPeer.TriggerCoopCloseRbfBump(
3✔
5604
                ctx, chanPoint, feeRate, deliveryScript,
3✔
5605
        )
3✔
5606
        if err != nil {
3✔
5607
                return nil, fmt.Errorf("unable to trigger coop rbf fee bump: "+
×
5608
                        "%w", err)
×
5609
        }
×
5610

5611
        return closeUpdates, nil
3✔
5612
}
5613

5614
// AttemptRBFCloseUpdate attempts to trigger a new RBF iteration for a co-op
5615
// close update. This route it to be used only if the target channel in question
5616
// is no longer active in the link. This can happen when we restart while we
5617
// already have done a single RBF co-op close iteration.
5618
func (s *server) AttemptRBFCloseUpdate(ctx context.Context,
5619
        chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
5620
        deliveryScript lnwire.DeliveryAddress) (*peer.CoopCloseUpdates, error) {
3✔
5621

3✔
5622
        // If the channel is present in the switch, then the request should flow
3✔
5623
        // through the switch instead.
3✔
5624
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
5625
        if _, err := s.htlcSwitch.GetLink(chanID); err == nil {
3✔
5626
                return nil, fmt.Errorf("ChannelPoint(%v) is active in link, "+
×
5627
                        "invalid request", chanPoint)
×
5628
        }
×
5629

5630
        // At this point, we know that the channel isn't present in the link, so
5631
        // we'll check to see if we have an entry in the active chan closer map.
5632
        updates, err := s.attemptCoopRbfFeeBump(
3✔
5633
                ctx, chanPoint, feeRate, deliveryScript,
3✔
5634
        )
3✔
5635
        if err != nil {
3✔
5636
                return nil, fmt.Errorf("unable to attempt coop rbf fee bump "+
×
5637
                        "ChannelPoint(%v)", chanPoint)
×
5638
        }
×
5639

5640
        return updates, nil
3✔
5641
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc