• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 11954082915

21 Nov 2024 01:20PM UTC coverage: 59.327% (+0.6%) from 58.776%
11954082915

Pull #8754

github

ViktorTigerstrom
itest: wrap deriveCustomScopeAccounts at 80 chars

This commit fixes that word wrapping for the deriveCustomScopeAccounts
function docs, and ensures that it wraps at 80 characters or less.
Pull Request #8754: Add `Outbound` Remote Signer implementation

1940 of 2984 new or added lines in 44 files covered. (65.01%)

226 existing lines in 37 files now uncovered.

135234 of 227947 relevant lines covered (59.33%)

19316.75 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.39
/lnd.go
1
// Copyright (c) 2013-2017 The btcsuite developers
2
// Copyright (c) 2015-2016 The Decred developers
3
// Copyright (C) 2015-2022 The Lightning Network Developers
4

5
package lnd
6

7
import (
8
        "context"
9
        "errors"
10
        "fmt"
11
        "net"
12
        "net/http"
13
        "net/http/pprof"
14
        "os"
15
        "runtime"
16
        runtimePprof "runtime/pprof"
17
        "strings"
18
        "sync"
19
        "time"
20

21
        "github.com/btcsuite/btcd/btcutil"
22
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
23
        "github.com/lightningnetwork/lnd/autopilot"
24
        "github.com/lightningnetwork/lnd/build"
25
        "github.com/lightningnetwork/lnd/chanacceptor"
26
        "github.com/lightningnetwork/lnd/channeldb"
27
        "github.com/lightningnetwork/lnd/cluster"
28
        "github.com/lightningnetwork/lnd/keychain"
29
        "github.com/lightningnetwork/lnd/lncfg"
30
        "github.com/lightningnetwork/lnd/lnrpc"
31
        "github.com/lightningnetwork/lnd/lnwallet"
32
        "github.com/lightningnetwork/lnd/lnwallet/rpcwallet"
33
        "github.com/lightningnetwork/lnd/macaroons"
34
        "github.com/lightningnetwork/lnd/monitoring"
35
        "github.com/lightningnetwork/lnd/rpcperms"
36
        "github.com/lightningnetwork/lnd/signal"
37
        "github.com/lightningnetwork/lnd/tor"
38
        "github.com/lightningnetwork/lnd/walletunlocker"
39
        "github.com/lightningnetwork/lnd/watchtower"
40
        "google.golang.org/grpc"
41
        "google.golang.org/grpc/credentials"
42
        "google.golang.org/grpc/keepalive"
43
        "gopkg.in/macaroon-bakery.v2/bakery"
44
        "gopkg.in/macaroon.v2"
45
)
46

47
const (
48
        // adminMacaroonFilePermissions is the file permission that is used for
49
        // creating the admin macaroon file.
50
        //
51
        // Why 640 is safe:
52
        // Assuming a reasonably secure Linux system, it will have a
53
        // separate group for each user. E.g. a new user lnd gets assigned group
54
        // lnd which nothing else belongs to. A system that does not do this is
55
        // inherently broken already.
56
        //
57
        // Since there is no other user in the group, no other user can read
58
        // admin macaroon unless the administrator explicitly allowed it. Thus
59
        // there's no harm allowing group read.
60
        adminMacaroonFilePermissions = 0640
61

62
        // leaderResignTimeout is the timeout used when resigning from the
63
        // leader role. This is kept short so LND can shut down quickly in case
64
        // of a system failure or network partition making the cluster
65
        // unresponsive. The cluster itself should ensure that the leader is not
66
        // elected again until the previous leader has resigned or the leader
67
        // election timeout has passed.
68
        leaderResignTimeout = 5 * time.Second
69
)
70

71
// AdminAuthOptions returns a list of DialOptions that can be used to
72
// authenticate with the RPC server with admin capabilities.
73
// skipMacaroons=true should be set if we don't want to include macaroons with
74
// the auth options. This is needed for instance for the WalletUnlocker
75
// service, which must be usable also before macaroons are created.
76
//
77
// NOTE: This should only be called after the RPCListener has signaled it is
78
// ready.
79
func AdminAuthOptions(cfg *Config, skipMacaroons bool) ([]grpc.DialOption,
80
        error) {
×
81

×
82
        creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "")
×
83
        if err != nil {
×
84
                return nil, fmt.Errorf("unable to read TLS cert: %w", err)
×
85
        }
×
86

87
        // Create a dial options array.
88
        opts := []grpc.DialOption{
×
89
                grpc.WithTransportCredentials(creds),
×
90
        }
×
91

×
92
        // Get the admin macaroon if macaroons are active.
×
93
        if !skipMacaroons && !cfg.NoMacaroons {
×
94
                // Load the admin macaroon file.
×
95
                macBytes, err := os.ReadFile(cfg.AdminMacPath)
×
96
                if err != nil {
×
97
                        return nil, fmt.Errorf("unable to read macaroon "+
×
98
                                "path (check the network setting!): %v", err)
×
99
                }
×
100

101
                mac := &macaroon.Macaroon{}
×
102
                if err = mac.UnmarshalBinary(macBytes); err != nil {
×
103
                        return nil, fmt.Errorf("unable to decode macaroon: %w",
×
104
                                err)
×
105
                }
×
106

107
                // Now we append the macaroon credentials to the dial options.
108
                cred, err := macaroons.NewMacaroonCredential(mac)
×
109
                if err != nil {
×
110
                        return nil, fmt.Errorf("error cloning mac: %w", err)
×
111
                }
×
112
                opts = append(opts, grpc.WithPerRPCCredentials(cred))
×
113
        }
114

115
        return opts, nil
×
116
}
117

118
// ListenerWithSignal is a net.Listener that has an additional Ready channel
119
// that will be closed when a server starts listening.
120
type ListenerWithSignal struct {
121
        net.Listener
122

123
        // Ready will be closed by the server listening on Listener.
124
        Ready chan struct{}
125

126
        // MacChan is an optional way to pass the admin macaroon to the program
127
        // that started lnd. The channel should be buffered to avoid lnd being
128
        // blocked on sending to the channel.
129
        MacChan chan []byte
130
}
131

132
// ListenerCfg is a wrapper around custom listeners that can be passed to lnd
133
// when calling its main method.
134
type ListenerCfg struct {
135
        // RPCListeners can be set to the listeners to use for the RPC server.
136
        // If empty a regular network listener will be created.
137
        RPCListeners []*ListenerWithSignal
138
}
139

140
var errStreamIsolationWithProxySkip = errors.New(
141
        "while stream isolation is enabled, the TOR proxy may not be skipped",
142
)
143

144
// Main is the true entry point for lnd. It accepts a fully populated and
145
// validated main configuration struct and an optional listener config struct.
146
// This function starts all main system components then blocks until a signal
147
// is received on the shutdownChan at which point everything is shut down again.
148
func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
149
        interceptor signal.Interceptor) error {
4✔
150

4✔
151
        defer func() {
8✔
152
                ltndLog.Info("Shutdown complete\n")
4✔
153
                err := cfg.LogRotator.Close()
4✔
154
                if err != nil {
4✔
155
                        ltndLog.Errorf("Could not close log rotator: %v", err)
×
156
                }
×
157
        }()
158

159
        mkErr := func(format string, args ...interface{}) error {
4✔
160
                ltndLog.Errorf("Shutting down because error in main "+
×
161
                        "method: "+format, args...)
×
162
                return fmt.Errorf(format, args...)
×
163
        }
×
164

165
        // Show version at startup.
166
        ltndLog.Infof("Version: %s commit=%s, build=%s, logging=%s, "+
4✔
167
                "debuglevel=%s", build.Version(), build.Commit,
4✔
168
                build.Deployment, build.LoggingType, cfg.DebugLevel)
4✔
169

4✔
170
        var network string
4✔
171
        switch {
4✔
172
        case cfg.Bitcoin.TestNet3:
×
173
                network = "testnet"
×
174

175
        case cfg.Bitcoin.MainNet:
×
176
                network = "mainnet"
×
177

178
        case cfg.Bitcoin.SimNet:
×
179
                network = "simnet"
×
180

181
        case cfg.Bitcoin.RegTest:
4✔
182
                network = "regtest"
4✔
183

184
        case cfg.Bitcoin.SigNet:
×
185
                network = "signet"
×
186
        }
187

188
        ltndLog.Infof("Active chain: %v (network=%v)",
4✔
189
                strings.Title(BitcoinChainName), network,
4✔
190
        )
4✔
191

4✔
192
        ctx := context.Background()
4✔
193
        ctx, cancel := context.WithCancel(ctx)
4✔
194
        defer cancel()
4✔
195

4✔
196
        // Enable http profiling server if requested.
4✔
197
        if cfg.Pprof.Profile != "" {
4✔
198
                // Create the http handler.
×
199
                pprofMux := http.NewServeMux()
×
200
                pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
×
201
                pprofMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
×
202
                pprofMux.HandleFunc("/debug/pprof/profile", pprof.Profile)
×
203
                pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
×
204
                pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
×
205

×
206
                if cfg.Pprof.BlockingProfile != 0 {
×
207
                        runtime.SetBlockProfileRate(cfg.Pprof.BlockingProfile)
×
208
                }
×
209
                if cfg.Pprof.MutexProfile != 0 {
×
210
                        runtime.SetMutexProfileFraction(cfg.Pprof.MutexProfile)
×
211
                }
×
212

213
                // Redirect all requests to the pprof handler, thus visiting
214
                // `127.0.0.1:6060` will be redirected to
215
                // `127.0.0.1:6060/debug/pprof`.
216
                pprofMux.Handle("/", http.RedirectHandler(
×
217
                        "/debug/pprof/", http.StatusSeeOther,
×
218
                ))
×
219

×
220
                ltndLog.Infof("Pprof listening on %v", cfg.Pprof.Profile)
×
221

×
222
                // Create the pprof server.
×
223
                pprofServer := &http.Server{
×
224
                        Addr:              cfg.Pprof.Profile,
×
225
                        Handler:           pprofMux,
×
226
                        ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
×
227
                }
×
228

×
229
                // Shut the server down when lnd is shutting down.
×
230
                defer func() {
×
231
                        ltndLog.Info("Stopping pprof server...")
×
232
                        err := pprofServer.Shutdown(ctx)
×
233
                        if err != nil {
×
234
                                ltndLog.Errorf("Stop pprof server got err: %v",
×
235
                                        err)
×
236
                        }
×
237
                }()
238

239
                // Start the pprof server.
240
                go func() {
×
241
                        err := pprofServer.ListenAndServe()
×
242
                        if err != nil && !errors.Is(err, http.ErrServerClosed) {
×
243
                                ltndLog.Errorf("Serving pprof got err: %v", err)
×
244
                        }
×
245
                }()
246
        }
247

248
        // Write cpu profile if requested.
249
        if cfg.Pprof.CPUProfile != "" {
4✔
250
                f, err := os.Create(cfg.Pprof.CPUProfile)
×
251
                if err != nil {
×
252
                        return mkErr("unable to create CPU profile: %v", err)
×
253
                }
×
254
                _ = runtimePprof.StartCPUProfile(f)
×
255
                defer func() {
×
256
                        _ = f.Close()
×
257
                }()
×
258
                defer runtimePprof.StopCPUProfile()
×
259
        }
260

261
        // Run configuration dependent DB pre-initialization. Note that this
262
        // needs to be done early and once during the startup process, before
263
        // any DB access.
264
        if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
4✔
265
                return mkErr("error initializing DBs: %v", err)
×
266
        }
×
267

268
        tlsManagerCfg := &TLSManagerCfg{
4✔
269
                TLSCertPath:        cfg.TLSCertPath,
4✔
270
                TLSKeyPath:         cfg.TLSKeyPath,
4✔
271
                TLSEncryptKey:      cfg.TLSEncryptKey,
4✔
272
                TLSExtraIPs:        cfg.TLSExtraIPs,
4✔
273
                TLSExtraDomains:    cfg.TLSExtraDomains,
4✔
274
                TLSAutoRefresh:     cfg.TLSAutoRefresh,
4✔
275
                TLSDisableAutofill: cfg.TLSDisableAutofill,
4✔
276
                TLSCertDuration:    cfg.TLSCertDuration,
4✔
277

4✔
278
                LetsEncryptDir:    cfg.LetsEncryptDir,
4✔
279
                LetsEncryptDomain: cfg.LetsEncryptDomain,
4✔
280
                LetsEncryptListen: cfg.LetsEncryptListen,
4✔
281

4✔
282
                DisableRestTLS: cfg.DisableRestTLS,
4✔
283

4✔
284
                HTTPHeaderTimeout: cfg.HTTPHeaderTimeout,
4✔
285
        }
4✔
286
        tlsManager := NewTLSManager(tlsManagerCfg)
4✔
287
        serverOpts, restDialOpts, restListen, cleanUp,
4✔
288
                err := tlsManager.SetCertificateBeforeUnlock()
4✔
289
        if err != nil {
4✔
290
                return mkErr("error setting cert before unlock: %v", err)
×
291
        }
×
292
        if cleanUp != nil {
8✔
293
                defer cleanUp()
4✔
294
        }
4✔
295

296
        // If we have chosen to start with a dedicated listener for the
297
        // rpc server, we set it directly.
298
        grpcListeners := append([]*ListenerWithSignal{}, lisCfg.RPCListeners...)
4✔
299
        if len(grpcListeners) == 0 {
8✔
300
                // Otherwise we create listeners from the RPCListeners defined
4✔
301
                // in the config.
4✔
302
                for _, grpcEndpoint := range cfg.RPCListeners {
8✔
303
                        // Start a gRPC server listening for HTTP/2
4✔
304
                        // connections.
4✔
305
                        lis, err := lncfg.ListenOnAddress(grpcEndpoint)
4✔
306
                        if err != nil {
4✔
307
                                return mkErr("unable to listen on %s: %v",
×
308
                                        grpcEndpoint, err)
×
309
                        }
×
310
                        defer lis.Close()
4✔
311

4✔
312
                        grpcListeners = append(
4✔
313
                                grpcListeners, &ListenerWithSignal{
4✔
314
                                        Listener: lis,
4✔
315
                                        Ready:    make(chan struct{}),
4✔
316
                                },
4✔
317
                        )
4✔
318
                }
319
        }
320

321
        // Create a new RPC interceptor that we'll add to the GRPC server. This
322
        // will be used to log the API calls invoked on the GRPC server.
323
        interceptorChain := rpcperms.NewInterceptorChain(
4✔
324
                rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
4✔
325
        )
4✔
326
        if err := interceptorChain.Start(); err != nil {
4✔
327
                return mkErr("error starting interceptor chain: %v", err)
×
328
        }
×
329
        defer func() {
8✔
330
                err := interceptorChain.Stop()
4✔
331
                if err != nil {
4✔
332
                        ltndLog.Warnf("error stopping RPC interceptor "+
×
333
                                "chain: %v", err)
×
334
                }
×
335
        }()
336

337
        // Allow the user to overwrite some defaults of the gRPC library related
338
        // to connection keepalive (server side and client side pings).
339
        serverKeepalive := keepalive.ServerParameters{
4✔
340
                Time:    cfg.GRPC.ServerPingTime,
4✔
341
                Timeout: cfg.GRPC.ServerPingTimeout,
4✔
342
        }
4✔
343
        clientKeepalive := keepalive.EnforcementPolicy{
4✔
344
                MinTime:             cfg.GRPC.ClientPingMinWait,
4✔
345
                PermitWithoutStream: cfg.GRPC.ClientAllowPingWithoutStream,
4✔
346
        }
4✔
347

4✔
348
        rpcServerOpts := interceptorChain.CreateServerOpts()
4✔
349
        serverOpts = append(serverOpts, rpcServerOpts...)
4✔
350
        serverOpts = append(
4✔
351
                serverOpts, grpc.MaxRecvMsgSize(lnrpc.MaxGrpcMsgSize),
4✔
352
                grpc.KeepaliveParams(serverKeepalive),
4✔
353
                grpc.KeepaliveEnforcementPolicy(clientKeepalive),
4✔
354
        )
4✔
355

4✔
356
        grpcServer := grpc.NewServer(serverOpts...)
4✔
357
        defer grpcServer.Stop()
4✔
358

4✔
359
        // We'll also register the RPC interceptor chain as the StateServer, as
4✔
360
        // it can be used to query for the current state of the wallet.
4✔
361
        lnrpc.RegisterStateServer(grpcServer, interceptorChain)
4✔
362

4✔
363
        // Initialize, and register our implementation of the gRPC interface
4✔
364
        // exported by the rpcServer.
4✔
365
        rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
4✔
366
        err = rpcServer.RegisterWithGrpcServer(grpcServer)
4✔
367
        if err != nil {
4✔
368
                return mkErr("error registering gRPC server: %v", err)
×
369
        }
×
370

371
        // Now that both the WalletUnlocker and LightningService have been
372
        // registered with the GRPC server, we can start listening.
373
        err = startGrpcListen(cfg, grpcServer, grpcListeners)
4✔
374
        if err != nil {
4✔
375
                return mkErr("error starting gRPC listener: %v", err)
×
376
        }
×
377

378
        // Now start the REST proxy for our gRPC server above. We'll ensure
379
        // we direct LND to connect to its loopback address rather than a
380
        // wildcard to prevent certificate issues when accessing the proxy
381
        // externally.
382
        stopProxy, err := startRestProxy(
4✔
383
                cfg, rpcServer, restDialOpts, restListen,
4✔
384
        )
4✔
385
        if err != nil {
4✔
386
                return mkErr("error starting REST proxy: %v", err)
×
387
        }
×
388
        defer stopProxy()
4✔
389

4✔
390
        // Start leader election if we're running on etcd. Continuation will be
4✔
391
        // blocked until this instance is elected as the current leader or
4✔
392
        // shutting down.
4✔
393
        elected := false
4✔
394
        var leaderElector cluster.LeaderElector
4✔
395
        if cfg.Cluster.EnableLeaderElection {
4✔
396
                electionCtx, cancelElection := context.WithCancel(ctx)
×
397

×
398
                go func() {
×
399
                        <-interceptor.ShutdownChannel()
×
400
                        cancelElection()
×
401
                }()
×
402

403
                ltndLog.Infof("Using %v leader elector",
×
404
                        cfg.Cluster.LeaderElector)
×
405

×
406
                leaderElector, err = cfg.Cluster.MakeLeaderElector(
×
407
                        electionCtx, cfg.DB,
×
408
                )
×
409
                if err != nil {
×
410
                        return err
×
411
                }
×
412

413
                defer func() {
×
414
                        if !elected {
×
415
                                return
×
416
                        }
×
417

418
                        ltndLog.Infof("Attempting to resign from leader role "+
×
419
                                "(%v)", cfg.Cluster.ID)
×
420

×
421
                        // Ensure that we don't block the shutdown process if
×
422
                        // the leader resigning process takes too long. The
×
423
                        // cluster will ensure that the leader is not elected
×
424
                        // again until the previous leader has resigned or the
×
425
                        // leader election timeout has passed.
×
426
                        timeoutCtx, cancel := context.WithTimeout(
×
427
                                ctx, leaderResignTimeout,
×
428
                        )
×
429
                        defer cancel()
×
430

×
431
                        if err := leaderElector.Resign(timeoutCtx); err != nil {
×
432
                                ltndLog.Errorf("Leader elector failed to "+
×
433
                                        "resign: %v", err)
×
434
                        }
×
435
                }()
436

437
                ltndLog.Infof("Starting leadership campaign (%v)",
×
438
                        cfg.Cluster.ID)
×
439

×
440
                if err := leaderElector.Campaign(electionCtx); err != nil {
×
441
                        return mkErr("leadership campaign failed: %v", err)
×
442
                }
×
443

444
                elected = true
×
445
                ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID)
×
446
        }
447

448
        dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
4✔
449
        switch {
4✔
450
        case err == channeldb.ErrDryRunMigrationOK:
×
451
                ltndLog.Infof("%v, exiting", err)
×
452
                return nil
×
453
        case err != nil:
×
454
                return mkErr("unable to open databases: %v", err)
×
455
        }
456

457
        defer cleanUp()
4✔
458

4✔
459
        partialChainControl, walletConfig, cleanUp, err := implCfg.BuildWalletConfig(
4✔
460
                ctx, dbs, &implCfg.AuxComponents, interceptorChain,
4✔
461
                grpcListeners,
4✔
462
        )
4✔
463
        if err != nil {
4✔
464
                return mkErr("error creating wallet config: %v", err)
×
465
        }
×
466

467
        defer cleanUp()
4✔
468

4✔
469
        activeChainControl, cleanUp, err := implCfg.BuildChainControl(
4✔
470
                partialChainControl, walletConfig,
4✔
471
        )
4✔
472
        if err != nil {
4✔
473
                return mkErr("error loading chain control: %v", err)
×
474
        }
×
475

476
        defer cleanUp()
4✔
477

4✔
478
        // Prepare the sub-servers, and insert the permissions required to
4✔
479
        // access them into the interceptor chain. Note that we do not yet have
4✔
480
        // all dependencies required to use all sub-servers, but we need be able
4✔
481
        // to allow a remote signer to connect to lnd before we can derive the
4✔
482
        // keys create the required dependencies.
4✔
483
        err = rpcServer.prepareSubServers(
4✔
484
                interceptorChain.MacaroonService(), cfg.SubRPCServers,
4✔
485
                activeChainControl,
4✔
486
        )
4✔
487
        if err != nil {
4✔
NEW
488
                return mkErr("error adding sub server permissions: %v", err)
×
NEW
489
        }
×
490

491
        defer func() {
8✔
492
                err := rpcServer.Stop()
4✔
493
                if err != nil {
4✔
NEW
494
                        ltndLog.Errorf("Error stopping the RPC server: %v", err)
×
NEW
495
                }
×
496
        }()
497

498
        // To ensure that a potential remote signer can connect to lnd before we
499
        // can handle other requests, we set the interceptor chain to be ready
500
        // accept remote signer connections, if enabled by the cfg.
501
        if cfg.RemoteSigner.Enable &&
4✔
502
                cfg.RemoteSigner.SignerRole == lncfg.OutboundWatchOnlyRole {
8✔
503

4✔
504
                interceptorChain.SetAllowRemoteSigner()
4✔
505
        }
4✔
506

507
        // We'll wait until the wallet is fully ready to be used before we
508
        // proceed to derive keys from it.
509
        select {
4✔
510
        case err = <-activeChainControl.Wallet.WalletController.ReadySignal():
4✔
511
                if err != nil {
4✔
NEW
512
                        return mkErr("error when waiting for wallet to be "+
×
NEW
513
                                "ready: %v", err)
×
NEW
514
                }
×
515

NEW
516
        case <-interceptor.ShutdownChannel():
×
NEW
517
                // If we receive a shutdown signal while waiting for the wallet
×
NEW
518
                // to be ready, we must stop blocking so that all the deferred
×
NEW
519
                // clean up functions can be executed. That will also shutdown
×
NEW
520
                // the wallet.
×
NEW
521
                // We can't continue execute the code below as we can't generate
×
NEW
522
                // any keys which.
×
NEW
523
                return mkErr("Shutdown signal received while waiting for " +
×
NEW
524
                        "wallet to be ready.")
×
525
        }
526

527
        // TODO(roasbeef): add rotation
528
        idKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
4✔
529
                keychain.KeyLocator{
4✔
530
                        Family: keychain.KeyFamilyNodeKey,
4✔
531
                        Index:  0,
4✔
532
                },
4✔
533
        )
4✔
534
        if err != nil {
4✔
535
                return mkErr("error deriving node key: %v", err)
×
536
        }
×
537

538
        if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
4✔
539
                return errStreamIsolationWithProxySkip
×
540
        }
×
541

542
        if cfg.Tor.Active {
4✔
543
                if cfg.Tor.SkipProxyForClearNetTargets {
×
544
                        srvrLog.Info("Onion services are accessible via Tor! " +
×
545
                                "NOTE: Traffic to clearnet services is not " +
×
546
                                "routed via Tor.")
×
547
                } else {
×
548
                        srvrLog.Infof("Proxying all network traffic via Tor "+
×
549
                                "(stream_isolation=%v)! NOTE: Ensure the "+
×
550
                                "backend node is proxying over Tor as well",
×
551
                                cfg.Tor.StreamIsolation)
×
552
                }
×
553
        }
554

555
        // If tor is active and either v2 or v3 onion services have been
556
        // specified, make a tor controller and pass it into both the watchtower
557
        // server and the regular lnd server.
558
        var torController *tor.Controller
4✔
559
        if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) {
4✔
560
                torController = tor.NewController(
×
561
                        cfg.Tor.Control, cfg.Tor.TargetIPAddress,
×
562
                        cfg.Tor.Password,
×
563
                )
×
564

×
565
                // Start the tor controller before giving it to any other
×
566
                // subsystems.
×
567
                if err := torController.Start(); err != nil {
×
568
                        return mkErr("unable to initialize tor controller: %v",
×
569
                                err)
×
570
                }
×
571
                defer func() {
×
572
                        if err := torController.Stop(); err != nil {
×
573
                                ltndLog.Errorf("error stopping tor "+
×
574
                                        "controller: %v", err)
×
575
                        }
×
576
                }()
577
        }
578

579
        var tower *watchtower.Standalone
4✔
580
        if cfg.Watchtower.Active {
8✔
581
                towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
4✔
582
                        keychain.KeyLocator{
4✔
583
                                Family: keychain.KeyFamilyTowerID,
4✔
584
                                Index:  0,
4✔
585
                        },
4✔
586
                )
4✔
587
                if err != nil {
4✔
588
                        return mkErr("error deriving tower key: %v", err)
×
589
                }
×
590

591
                wtCfg := &watchtower.Config{
4✔
592
                        BlockFetcher:   activeChainControl.ChainIO,
4✔
593
                        DB:             dbs.TowerServerDB,
4✔
594
                        EpochRegistrar: activeChainControl.ChainNotifier,
4✔
595
                        Net:            cfg.net,
4✔
596
                        NewAddress: func() (btcutil.Address, error) {
4✔
597
                                return activeChainControl.Wallet.NewAddress(
×
598
                                        lnwallet.TaprootPubkey, false,
×
599
                                        lnwallet.DefaultAccountName,
×
600
                                )
×
601
                        },
×
602
                        NodeKeyECDH: keychain.NewPubKeyECDH(
603
                                towerKeyDesc, activeChainControl.KeyRing,
604
                        ),
605
                        PublishTx: activeChainControl.Wallet.PublishTransaction,
606
                        ChainHash: *cfg.ActiveNetParams.GenesisHash,
607
                }
608

609
                // If there is a tor controller (user wants auto hidden
610
                // services), then store a pointer in the watchtower config.
611
                if torController != nil {
4✔
612
                        wtCfg.TorController = torController
×
613
                        wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath
×
614
                        wtCfg.EncryptKey = cfg.Tor.EncryptKey
×
615
                        wtCfg.KeyRing = activeChainControl.KeyRing
×
616

×
617
                        switch {
×
618
                        case cfg.Tor.V2:
×
619
                                wtCfg.Type = tor.V2
×
620
                        case cfg.Tor.V3:
×
621
                                wtCfg.Type = tor.V3
×
622
                        }
623
                }
624

625
                wtConfig, err := cfg.Watchtower.Apply(
4✔
626
                        wtCfg, lncfg.NormalizeAddresses,
4✔
627
                )
4✔
628
                if err != nil {
4✔
629
                        return mkErr("unable to configure watchtower: %v", err)
×
630
                }
×
631

632
                tower, err = watchtower.New(wtConfig)
4✔
633
                if err != nil {
4✔
634
                        return mkErr("unable to create watchtower: %v", err)
×
635
                }
×
636
        }
637

638
        // Initialize the MultiplexAcceptor. If lnd was started with the
639
        // zero-conf feature bit, then this will be a ZeroConfAcceptor.
640
        // Otherwise, this will be a ChainedAcceptor.
641
        var multiAcceptor chanacceptor.MultiplexAcceptor
4✔
642
        if cfg.ProtocolOptions.ZeroConf() {
8✔
643
                multiAcceptor = chanacceptor.NewZeroConfAcceptor()
4✔
644
        } else {
8✔
645
                multiAcceptor = chanacceptor.NewChainedAcceptor()
4✔
646
        }
4✔
647

648
        // Set up the remote signer client. If the
649
        // cfg.RemoteSigner.SignerRole != lncfg.OutboundSignerRole, this remote
650
        // signer client won't run when the server starts.
651
        rscBuilder := rpcwallet.NewRemoteSignerClientBuilder(cfg.RemoteSigner)
4✔
652

4✔
653
        rsClient, err := rscBuilder.Build(rpcServer.subServers)
4✔
654
        if err != nil {
4✔
NEW
655
                return mkErr("unable to create remote signer client: %v", err)
×
NEW
656
        }
×
657

658
        // Set up the core server which will listen for incoming peer
659
        // connections.
660
        server, err := newServer(
4✔
661
                cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc,
4✔
662
                activeChainControl.Cfg.WalletUnlockParams.ChansToRestore,
4✔
663
                multiAcceptor, torController, tlsManager, leaderElector,
4✔
664
                implCfg, rsClient,
4✔
665
        )
4✔
666
        if err != nil {
4✔
667
                return mkErr("unable to create server: %v", err)
×
668
        }
×
669

670
        // Set up an autopilot manager from the current config. This will be
671
        // used to manage the underlying autopilot agent, starting and stopping
672
        // it at will.
673
        atplCfg, err := initAutoPilot(
4✔
674
                server, cfg.Autopilot, activeChainControl.MinHtlcIn,
4✔
675
                cfg.ActiveNetParams,
4✔
676
        )
4✔
677
        if err != nil {
4✔
678
                return mkErr("unable to initialize autopilot: %v", err)
×
679
        }
×
680

681
        atplManager, err := autopilot.NewManager(atplCfg)
4✔
682
        if err != nil {
4✔
683
                return mkErr("unable to create autopilot manager: %v", err)
×
684
        }
×
685
        if err := atplManager.Start(); err != nil {
4✔
686
                return mkErr("unable to start autopilot manager: %v", err)
×
687
        }
×
688
        defer atplManager.Stop()
4✔
689

4✔
690
        err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
4✔
691
        if err != nil {
4✔
692
                return mkErr("unable to load permanent TLS certificate: %v",
×
693
                        err)
×
694
        }
×
695

696
        // Now we have created all dependencies necessary to be able to use all
697
        // sub-servers, so we add the dependencies to the sub-servers.
698
        err = rpcServer.addDeps(
4✔
699
                server, interceptorChain.MacaroonService(), cfg.SubRPCServers,
4✔
700
                atplManager, server.invoices, tower, multiAcceptor,
4✔
701
                server.invoiceHtlcModifier,
4✔
702
        )
4✔
703
        if err != nil {
4✔
704
                return mkErr("unable to add deps to RPC server: %v", err)
×
705
        }
×
706

707
        // We transition the RPC state to Active, as the sub-servers are now
708
        // ready to be used.
709
        interceptorChain.SetRPCActive()
4✔
710

4✔
711
        if err := interceptor.Notifier.NotifyReady(true); err != nil {
4✔
712
                return mkErr("error notifying ready: %v", err)
×
713
        }
×
714

715
        // We'll wait until we're fully synced to continue the start up of the
716
        // remainder of the daemon. This ensures that we don't accept any
717
        // possibly invalid state transitions, or accept channels with spent
718
        // funds.
719
        _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
4✔
720
        if err != nil {
4✔
721
                return mkErr("unable to determine chain tip: %v", err)
×
722
        }
×
723

724
        ltndLog.Infof("Waiting for chain backend to finish sync, "+
4✔
725
                "start_height=%v", bestHeight)
4✔
726

4✔
727
        type syncResult struct {
4✔
728
                synced        bool
4✔
729
                bestBlockTime int64
4✔
730
                err           error
4✔
731
        }
4✔
732

4✔
733
        var syncedResChan = make(chan syncResult, 1)
4✔
734

4✔
735
        for {
8✔
736
                // We check if the wallet is synced in a separate goroutine as
4✔
737
                // the call is blocking, and we want to be able to interrupt it
4✔
738
                // if the daemon is shutting down.
4✔
739
                go func() {
8✔
740
                        synced, bestBlockTime, err := activeChainControl.Wallet.
4✔
741
                                IsSynced()
4✔
742
                        syncedResChan <- syncResult{synced, bestBlockTime, err}
4✔
743
                }()
4✔
744

745
                select {
4✔
746
                case <-interceptor.ShutdownChannel():
×
747
                        return nil
×
748

749
                case res := <-syncedResChan:
4✔
750
                        if res.err != nil {
4✔
751
                                return mkErr("unable to determine if wallet "+
×
752
                                        "is synced: %v", res.err)
×
753
                        }
×
754

755
                        ltndLog.Debugf("Syncing to block timestamp: %v, is "+
4✔
756
                                "synced=%v", time.Unix(res.bestBlockTime, 0),
4✔
757
                                res.synced)
4✔
758

4✔
759
                        if res.synced {
8✔
760
                                break
4✔
761
                        }
762

763
                        // If we're not yet synced, we'll wait for a second
764
                        // before checking again.
765
                        select {
4✔
766
                        case <-interceptor.ShutdownChannel():
×
767
                                return nil
×
768

769
                        case <-time.After(time.Second):
4✔
770
                                continue
4✔
771
                        }
772
                }
773

774
                break
4✔
775
        }
776

777
        _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
4✔
778
        if err != nil {
4✔
779
                return mkErr("unable to determine chain tip: %v", err)
×
780
        }
×
781

782
        ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
4✔
783
                bestHeight)
4✔
784

4✔
785
        // With all the relevant chains initialized, we can finally start the
4✔
786
        // server itself. We start the server in an asynchronous goroutine so
4✔
787
        // that we are able to interrupt and shutdown the daemon gracefully in
4✔
788
        // case the startup of the subservers do not behave as expected.
4✔
789
        errChan := make(chan error)
4✔
790
        go func() {
8✔
791
                errChan <- server.Start()
4✔
792
        }()
4✔
793

794
        defer func() {
8✔
795
                err := server.Stop()
4✔
796
                if err != nil {
4✔
797
                        ltndLog.Warnf("Stopping the server including all "+
×
798
                                "its subsystems failed with %v", err)
×
799
                }
×
800
        }()
801

802
        select {
4✔
803
        case err := <-errChan:
4✔
804
                if err == nil {
8✔
805
                        break
4✔
806
                }
807

808
                return mkErr("unable to start server: %v", err)
×
809

810
        case <-interceptor.ShutdownChannel():
×
811
                return nil
×
812
        }
813

814
        // We transition the server state to Active, as the server is up.
815
        interceptorChain.SetServerActive()
4✔
816

4✔
817
        // Now that the server has started, if the autopilot mode is currently
4✔
818
        // active, then we'll start the autopilot agent immediately. It will be
4✔
819
        // stopped together with the autopilot service.
4✔
820
        if cfg.Autopilot.Active {
4✔
821
                if err := atplManager.StartAgent(); err != nil {
×
822
                        return mkErr("unable to start autopilot agent: %v", err)
×
823
                }
×
824
        }
825

826
        if cfg.Watchtower.Active {
8✔
827
                if err := tower.Start(); err != nil {
4✔
828
                        return mkErr("unable to start watchtower: %v", err)
×
829
                }
×
830
                defer tower.Stop()
4✔
831
        }
832

833
        // Wait for shutdown signal from either a graceful server stop or from
834
        // the interrupt handler.
835
        <-interceptor.ShutdownChannel()
4✔
836
        return nil
4✔
837
}
838

839
// bakeMacaroon creates a new macaroon with newest version and the given
840
// permissions then returns it binary serialized.
841
func bakeMacaroon(ctx context.Context, svc *macaroons.Service,
842
        permissions []bakery.Op) ([]byte, error) {
4✔
843

4✔
844
        mac, err := svc.NewMacaroon(
4✔
845
                ctx, macaroons.DefaultRootKeyID, permissions...,
4✔
846
        )
4✔
847
        if err != nil {
4✔
848
                return nil, err
×
849
        }
×
850

851
        return mac.M().MarshalBinary()
4✔
852
}
853

854
// saveMacaroon bakes a macaroon with the specified macaroon permissions and
855
// writes it to a file with the given filename and file permissions.
856
func saveMacaroon(ctx context.Context, svc *macaroons.Service, filename string,
857
        macaroonPermissions []bakery.Op, filePermissions os.FileMode) error {
4✔
858

4✔
859
        macaroonBytes, err := bakeMacaroon(ctx, svc, macaroonPermissions)
4✔
860
        if err != nil {
4✔
861
                return err
×
862
        }
×
863
        err = os.WriteFile(filename, macaroonBytes, filePermissions)
4✔
864
        if err != nil {
4✔
865
                _ = os.Remove(filename)
×
866
                return err
×
867
        }
×
868

869
        return nil
4✔
870
}
871

872
// genDefaultMacaroons checks for three default macaroon files and generates
873
// them if they do not exist; one admin-level, one for invoice access and one
874
// read-only. Each macaroon is checked and created independently to ensure all
875
// three exist. The admin macaroon can also be used to generate more granular
876
// macaroons.
877
func genDefaultMacaroons(ctx context.Context, svc *macaroons.Service,
878
        admFile, roFile, invoiceFile string) error {
4✔
879

4✔
880
        // First, we'll generate a macaroon that only allows the caller to
4✔
881
        // access invoice related calls. This is useful for merchants and other
4✔
882
        // services to allow an isolated instance that can only query and
4✔
883
        // modify invoices.
4✔
884
        if !lnrpc.FileExists(invoiceFile) {
8✔
885
                err := saveMacaroon(
4✔
886
                        ctx, svc, invoiceFile, invoicePermissions, 0644,
4✔
887
                )
4✔
888
                if err != nil {
4✔
889
                        return err
×
890
                }
×
891
        }
892

893
        // Generate the read-only macaroon and write it to a file.
894
        if !lnrpc.FileExists(roFile) {
8✔
895
                err := saveMacaroon(
4✔
896
                        ctx, svc, roFile, readPermissions, 0644,
4✔
897
                )
4✔
898
                if err != nil {
4✔
899
                        return err
×
900
                }
×
901
        }
902

903
        // Generate the admin macaroon and write it to a file.
904
        if !lnrpc.FileExists(admFile) {
8✔
905
                err := saveMacaroon(
4✔
906
                        ctx, svc, admFile, adminPermissions(),
4✔
907
                        adminMacaroonFilePermissions,
4✔
908
                )
4✔
909
                if err != nil {
4✔
910
                        return err
×
911
                }
×
912
        }
913

914
        return nil
4✔
915
}
916

917
// adminPermissions returns a list of all permissions in a safe way that doesn't
918
// modify any of the source lists.
919
func adminPermissions() []bakery.Op {
4✔
920
        admin := make([]bakery.Op, len(readPermissions)+len(writePermissions))
4✔
921
        copy(admin[:len(readPermissions)], readPermissions)
4✔
922
        copy(admin[len(readPermissions):], writePermissions)
4✔
923
        return admin
4✔
924
}
4✔
925

926
// createWalletUnlockerService creates a WalletUnlockerService from the passed
927
// config.
928
func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService {
4✔
929
        // The macaroonFiles are passed to the wallet unlocker so they can be
4✔
930
        // deleted and recreated in case the root macaroon key is also changed
4✔
931
        // during the change password operation.
4✔
932
        macaroonFiles := []string{
4✔
933
                cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath,
4✔
934
        }
4✔
935

4✔
936
        return walletunlocker.New(
4✔
937
                cfg.ActiveNetParams.Params, macaroonFiles,
4✔
938
                cfg.ResetWalletTransactions, nil,
4✔
939
        )
4✔
940
}
4✔
941

942
// startGrpcListen starts the GRPC server on the passed listeners.
943
func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
944
        listeners []*ListenerWithSignal) error {
4✔
945

4✔
946
        // Use a WaitGroup so we can be sure the instructions on how to input the
4✔
947
        // password is the last thing to be printed to the console.
4✔
948
        var wg sync.WaitGroup
4✔
949

4✔
950
        for _, lis := range listeners {
8✔
951
                wg.Add(1)
4✔
952
                go func(lis *ListenerWithSignal) {
8✔
953
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
4✔
954

4✔
955
                        // Close the ready chan to indicate we are listening.
4✔
956
                        close(lis.Ready)
4✔
957

4✔
958
                        wg.Done()
4✔
959
                        _ = grpcServer.Serve(lis)
4✔
960
                }(lis)
4✔
961
        }
962

963
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
964
        if cfg.Prometheus.Enabled() {
4✔
965
                err := monitoring.ExportPrometheusMetrics(
×
966
                        grpcServer, cfg.Prometheus,
×
967
                )
×
968
                if err != nil {
×
969
                        return err
×
970
                }
×
971
        }
972

973
        // Wait for gRPC servers to be up running.
974
        wg.Wait()
4✔
975

4✔
976
        return nil
4✔
977
}
978

979
// startRestProxy starts the given REST proxy on the listeners found in the
980
// config.
981
func startRestProxy(cfg *Config, rpcServer *rpcServer, restDialOpts []grpc.DialOption,
982
        restListen func(net.Addr) (net.Listener, error)) (func(), error) {
4✔
983

4✔
984
        // We use the first RPC listener as the destination for our REST proxy.
4✔
985
        // If the listener is set to listen on all interfaces, we replace it
4✔
986
        // with localhost, as we cannot dial it directly.
4✔
987
        restProxyDest := cfg.RPCListeners[0].String()
4✔
988
        switch {
4✔
989
        case strings.Contains(restProxyDest, "0.0.0.0"):
×
990
                restProxyDest = strings.Replace(
×
991
                        restProxyDest, "0.0.0.0", "127.0.0.1", 1,
×
992
                )
×
993

994
        case strings.Contains(restProxyDest, "[::]"):
×
995
                restProxyDest = strings.Replace(
×
996
                        restProxyDest, "[::]", "[::1]", 1,
×
997
                )
×
998
        }
999

1000
        var shutdownFuncs []func()
4✔
1001
        shutdown := func() {
8✔
1002
                for _, shutdownFn := range shutdownFuncs {
8✔
1003
                        shutdownFn()
4✔
1004
                }
4✔
1005
        }
1006

1007
        // Start a REST proxy for our gRPC server.
1008
        ctx := context.Background()
4✔
1009
        ctx, cancel := context.WithCancel(ctx)
4✔
1010
        shutdownFuncs = append(shutdownFuncs, cancel)
4✔
1011

4✔
1012
        // We'll set up a proxy that will forward REST calls to the GRPC
4✔
1013
        // server.
4✔
1014
        //
4✔
1015
        // The default JSON marshaler of the REST proxy only sets OrigName to
4✔
1016
        // true, which instructs it to use the same field names as specified in
4✔
1017
        // the proto file and not switch to camel case. What we also want is
4✔
1018
        // that the marshaler prints all values, even if they are falsey.
4✔
1019
        customMarshalerOption := proxy.WithMarshalerOption(
4✔
1020
                proxy.MIMEWildcard, &proxy.JSONPb{
4✔
1021
                        MarshalOptions:   *lnrpc.RESTJsonMarshalOpts,
4✔
1022
                        UnmarshalOptions: *lnrpc.RESTJsonUnmarshalOpts,
4✔
1023
                },
4✔
1024
        )
4✔
1025
        mux := proxy.NewServeMux(
4✔
1026
                customMarshalerOption,
4✔
1027

4✔
1028
                // Don't allow falling back to other HTTP methods, we want exact
4✔
1029
                // matches only. The actual method to be used can be overwritten
4✔
1030
                // by setting X-HTTP-Method-Override so there should be no
4✔
1031
                // reason for not specifying the correct method in the first
4✔
1032
                // place.
4✔
1033
                proxy.WithDisablePathLengthFallback(),
4✔
1034
        )
4✔
1035

4✔
1036
        // Register our services with the REST proxy.
4✔
1037
        err := rpcServer.RegisterWithRestProxy(
4✔
1038
                ctx, mux, restDialOpts, restProxyDest,
4✔
1039
        )
4✔
1040
        if err != nil {
4✔
1041
                return nil, err
×
1042
        }
×
1043

1044
        // Wrap the default grpc-gateway handler with the WebSocket handler.
1045
        restHandler := lnrpc.NewWebSocketProxy(
4✔
1046
                mux, rpcsLog, cfg.WSPingInterval, cfg.WSPongWait,
4✔
1047
                lnrpc.LndClientStreamingURIs,
4✔
1048
        )
4✔
1049

4✔
1050
        // Use a WaitGroup so we can be sure the instructions on how to input the
4✔
1051
        // password is the last thing to be printed to the console.
4✔
1052
        var wg sync.WaitGroup
4✔
1053

4✔
1054
        // Now spin up a network listener for each requested port and start a
4✔
1055
        // goroutine that serves REST with the created mux there.
4✔
1056
        for _, restEndpoint := range cfg.RESTListeners {
8✔
1057
                lis, err := restListen(restEndpoint)
4✔
1058
                if err != nil {
4✔
1059
                        ltndLog.Errorf("gRPC proxy unable to listen on %s",
×
1060
                                restEndpoint)
×
1061
                        return nil, err
×
1062
                }
×
1063

1064
                shutdownFuncs = append(shutdownFuncs, func() {
8✔
1065
                        err := lis.Close()
4✔
1066
                        if err != nil {
4✔
1067
                                rpcsLog.Errorf("Error closing listener: %v",
×
1068
                                        err)
×
1069
                        }
×
1070
                })
1071

1072
                wg.Add(1)
4✔
1073
                go func() {
8✔
1074
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
4✔
1075

4✔
1076
                        // Create our proxy chain now. A request will pass
4✔
1077
                        // through the following chain:
4✔
1078
                        // req ---> CORS handler --> WS proxy --->
4✔
1079
                        //   REST proxy --> gRPC endpoint
4✔
1080
                        corsHandler := allowCORS(restHandler, cfg.RestCORS)
4✔
1081

4✔
1082
                        wg.Done()
4✔
1083
                        err := http.Serve(lis, corsHandler)
4✔
1084
                        if err != nil && !lnrpc.IsClosedConnError(err) {
4✔
1085
                                rpcsLog.Error(err)
×
1086
                        }
×
1087
                }()
1088
        }
1089

1090
        // Wait for REST servers to be up running.
1091
        wg.Wait()
4✔
1092

4✔
1093
        return shutdown, nil
4✔
1094
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc