• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12585581807

02 Jan 2025 04:32PM UTC coverage: 58.678% (+0.08%) from 58.598%
12585581807

Pull #9395

github

mohamedawnallah
multi: manage shutdown requests with status codes

In this commit, we manage shutdown requests with status
codes. Exits with code 1 for critical errors
(e.g, from exhausted attempts connecting to chain backend),
and code 0 for normal shutdowns (e.g., from Stop Daemon RPC call).

Co-authored-by: Elle Mouton <elle.mouton@gmail.com>
Pull Request #9395: multi: manage shutdown requests with status codes

14 of 27 new or added lines in 5 files covered. (51.85%)

23 existing lines in 9 files now uncovered.

135172 of 230363 relevant lines covered (58.68%)

19162.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.22
/lnd.go
1
// Copyright (c) 2013-2017 The btcsuite developers
2
// Copyright (c) 2015-2016 The Decred developers
3
// Copyright (C) 2015-2022 The Lightning Network Developers
4

5
package lnd
6

7
import (
8
        "context"
9
        "errors"
10
        "fmt"
11
        "log/slog"
12
        "net"
13
        "net/http"
14
        "net/http/pprof"
15
        "os"
16
        "runtime"
17
        runtimePprof "runtime/pprof"
18
        "strings"
19
        "sync"
20
        "time"
21

22
        "github.com/btcsuite/btcd/btcutil"
23
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
24
        "github.com/lightningnetwork/lnd/autopilot"
25
        "github.com/lightningnetwork/lnd/build"
26
        "github.com/lightningnetwork/lnd/chanacceptor"
27
        "github.com/lightningnetwork/lnd/channeldb"
28
        "github.com/lightningnetwork/lnd/cluster"
29
        "github.com/lightningnetwork/lnd/keychain"
30
        "github.com/lightningnetwork/lnd/lncfg"
31
        "github.com/lightningnetwork/lnd/lnrpc"
32
        "github.com/lightningnetwork/lnd/lnwallet"
33
        "github.com/lightningnetwork/lnd/macaroons"
34
        "github.com/lightningnetwork/lnd/monitoring"
35
        "github.com/lightningnetwork/lnd/rpcperms"
36
        "github.com/lightningnetwork/lnd/signal"
37
        "github.com/lightningnetwork/lnd/tor"
38
        "github.com/lightningnetwork/lnd/walletunlocker"
39
        "github.com/lightningnetwork/lnd/watchtower"
40
        "google.golang.org/grpc"
41
        "google.golang.org/grpc/credentials"
42
        "google.golang.org/grpc/keepalive"
43
        "gopkg.in/macaroon-bakery.v2/bakery"
44
        "gopkg.in/macaroon.v2"
45
)
46

47
const (
48
        // adminMacaroonFilePermissions is the file permission that is used for
49
        // creating the admin macaroon file.
50
        //
51
        // Why 640 is safe:
52
        // Assuming a reasonably secure Linux system, it will have a
53
        // separate group for each user. E.g. a new user lnd gets assigned group
54
        // lnd which nothing else belongs to. A system that does not do this is
55
        // inherently broken already.
56
        //
57
        // Since there is no other user in the group, no other user can read
58
        // admin macaroon unless the administrator explicitly allowed it. Thus
59
        // there's no harm allowing group read.
60
        adminMacaroonFilePermissions = 0640
61

62
        // leaderResignTimeout is the timeout used when resigning from the
63
        // leader role. This is kept short so LND can shut down quickly in case
64
        // of a system failure or network partition making the cluster
65
        // unresponsive. The cluster itself should ensure that the leader is not
66
        // elected again until the previous leader has resigned or the leader
67
        // election timeout has passed.
68
        leaderResignTimeout = 5 * time.Second
69
)
70

71
// AdminAuthOptions returns a list of DialOptions that can be used to
72
// authenticate with the RPC server with admin capabilities.
73
// skipMacaroons=true should be set if we don't want to include macaroons with
74
// the auth options. This is needed for instance for the WalletUnlocker
75
// service, which must be usable also before macaroons are created.
76
//
77
// NOTE: This should only be called after the RPCListener has signaled it is
78
// ready.
79
func AdminAuthOptions(cfg *Config, skipMacaroons bool) ([]grpc.DialOption,
80
        error) {
×
81

×
82
        creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "")
×
83
        if err != nil {
×
84
                return nil, fmt.Errorf("unable to read TLS cert: %w", err)
×
85
        }
×
86

87
        // Create a dial options array.
88
        opts := []grpc.DialOption{
×
89
                grpc.WithTransportCredentials(creds),
×
90
        }
×
91

×
92
        // Get the admin macaroon if macaroons are active.
×
93
        if !skipMacaroons && !cfg.NoMacaroons {
×
94
                // Load the admin macaroon file.
×
95
                macBytes, err := os.ReadFile(cfg.AdminMacPath)
×
96
                if err != nil {
×
97
                        return nil, fmt.Errorf("unable to read macaroon "+
×
98
                                "path (check the network setting!): %v", err)
×
99
                }
×
100

101
                mac := &macaroon.Macaroon{}
×
102
                if err = mac.UnmarshalBinary(macBytes); err != nil {
×
103
                        return nil, fmt.Errorf("unable to decode macaroon: %w",
×
104
                                err)
×
105
                }
×
106

107
                // Now we append the macaroon credentials to the dial options.
108
                cred, err := macaroons.NewMacaroonCredential(mac)
×
109
                if err != nil {
×
110
                        return nil, fmt.Errorf("error cloning mac: %w", err)
×
111
                }
×
112
                opts = append(opts, grpc.WithPerRPCCredentials(cred))
×
113
        }
114

115
        return opts, nil
×
116
}
117

118
// ListenerWithSignal is a net.Listener that has an additional Ready channel
119
// that will be closed when a server starts listening.
120
type ListenerWithSignal struct {
121
        net.Listener
122

123
        // Ready will be closed by the server listening on Listener.
124
        Ready chan struct{}
125

126
        // MacChan is an optional way to pass the admin macaroon to the program
127
        // that started lnd. The channel should be buffered to avoid lnd being
128
        // blocked on sending to the channel.
129
        MacChan chan []byte
130
}
131

132
// ListenerCfg is a wrapper around custom listeners that can be passed to lnd
133
// when calling its main method.
134
type ListenerCfg struct {
135
        // RPCListeners can be set to the listeners to use for the RPC server.
136
        // If empty a regular network listener will be created.
137
        RPCListeners []*ListenerWithSignal
138
}
139

140
var errStreamIsolationWithProxySkip = errors.New(
141
        "while stream isolation is enabled, the TOR proxy may not be skipped",
142
)
143

144
// Main is the true entry point for lnd. It accepts a fully populated and
145
// validated main configuration struct and an optional listener config struct.
146
// This function starts all main system components then blocks until a signal
147
// is received on the shutdownChan at which point everything is shut down again.
148
func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
149
        interceptor signal.Interceptor) error {
3✔
150

3✔
151
        defer func() {
6✔
152
                ltndLog.Info("Shutdown complete")
3✔
153
                err := cfg.LogRotator.Close()
3✔
154
                if err != nil {
3✔
155
                        ltndLog.Errorf("Could not close log rotator: %v", err)
×
156
                }
×
157
        }()
158

159
        ctx, cancel := context.WithCancel(context.Background())
3✔
160
        defer cancel()
3✔
161

3✔
162
        ctx, err := build.WithBuildInfo(ctx, cfg.LogConfig)
3✔
163
        if err != nil {
3✔
164
                return fmt.Errorf("unable to add build info to context: %w",
×
165
                        err)
×
166
        }
×
167

168
        mkErr := func(msg string, err error, attrs ...any) error {
3✔
169
                ltndLog.ErrorS(ctx, "Shutting down due to error in main "+
×
170
                        "method", err, attrs...)
×
171

×
172
                var (
×
173
                        params = []any{err}
×
174
                        fmtStr = msg + ": %w"
×
175
                )
×
176
                for _, attr := range attrs {
×
177
                        fmtStr += " %s"
×
178

×
179
                        params = append(params, attr)
×
180
                }
×
181

182
                return fmt.Errorf(fmtStr, params...)
×
183
        }
184

185
        // Show version at startup.
186
        ltndLog.InfoS(ctx, "Version Info",
3✔
187
                slog.String("version", build.Version()),
3✔
188
                slog.String("commit", build.Commit),
3✔
189
                slog.Any("debuglevel", build.Deployment),
3✔
190
                slog.String("logging", cfg.DebugLevel))
3✔
191

3✔
192
        var network string
3✔
193
        switch {
3✔
194
        case cfg.Bitcoin.TestNet3:
×
195
                network = "testnet"
×
196

197
        case cfg.Bitcoin.MainNet:
×
198
                network = "mainnet"
×
199

200
        case cfg.Bitcoin.SimNet:
×
201
                network = "simnet"
×
202

203
        case cfg.Bitcoin.RegTest:
3✔
204
                network = "regtest"
3✔
205

206
        case cfg.Bitcoin.SigNet:
×
207
                network = "signet"
×
208
        }
209

210
        ltndLog.InfoS(ctx, "Network Info",
3✔
211
                "active_chain", strings.Title(BitcoinChainName),
3✔
212
                "network", network)
3✔
213

3✔
214
        // Enable http profiling server if requested.
3✔
215
        if cfg.Pprof.Profile != "" {
3✔
216
                // Create the http handler.
×
217
                pprofMux := http.NewServeMux()
×
218
                pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
×
219
                pprofMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
×
220
                pprofMux.HandleFunc("/debug/pprof/profile", pprof.Profile)
×
221
                pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
×
222
                pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
×
223

×
224
                if cfg.Pprof.BlockingProfile != 0 {
×
225
                        runtime.SetBlockProfileRate(cfg.Pprof.BlockingProfile)
×
226
                }
×
227
                if cfg.Pprof.MutexProfile != 0 {
×
228
                        runtime.SetMutexProfileFraction(cfg.Pprof.MutexProfile)
×
229
                }
×
230

231
                // Redirect all requests to the pprof handler, thus visiting
232
                // `127.0.0.1:6060` will be redirected to
233
                // `127.0.0.1:6060/debug/pprof`.
234
                pprofMux.Handle("/", http.RedirectHandler(
×
235
                        "/debug/pprof/", http.StatusSeeOther,
×
236
                ))
×
237

×
238
                ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)
×
239

×
240
                // Create the pprof server.
×
241
                pprofServer := &http.Server{
×
242
                        Addr:              cfg.Pprof.Profile,
×
243
                        Handler:           pprofMux,
×
244
                        ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
×
245
                }
×
246

×
247
                // Shut the server down when lnd is shutting down.
×
248
                defer func() {
×
249
                        ltndLog.InfoS(ctx, "Stopping pprof server...")
×
250
                        err := pprofServer.Shutdown(ctx)
×
251
                        if err != nil {
×
252
                                ltndLog.ErrorS(ctx, "Stop pprof server", err)
×
253
                        }
×
254
                }()
255

256
                // Start the pprof server.
257
                go func() {
×
258
                        err := pprofServer.ListenAndServe()
×
259
                        if err != nil && !errors.Is(err, http.ErrServerClosed) {
×
260
                                ltndLog.ErrorS(ctx, "Could not serve pprof "+
×
261
                                        "server", err)
×
262
                        }
×
263
                }()
264
        }
265

266
        // Write cpu profile if requested.
267
        if cfg.Pprof.CPUProfile != "" {
3✔
268
                f, err := os.Create(cfg.Pprof.CPUProfile)
×
269
                if err != nil {
×
270
                        return mkErr("unable to create CPU profile", err)
×
271
                }
×
272
                _ = runtimePprof.StartCPUProfile(f)
×
273
                defer func() {
×
274
                        _ = f.Close()
×
275
                }()
×
276
                defer runtimePprof.StopCPUProfile()
×
277
        }
278

279
        // Run configuration dependent DB pre-initialization. Note that this
280
        // needs to be done early and once during the startup process, before
281
        // any DB access.
282
        if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
3✔
283
                return mkErr("error initializing DBs", err)
×
284
        }
×
285

286
        tlsManagerCfg := &TLSManagerCfg{
3✔
287
                TLSCertPath:        cfg.TLSCertPath,
3✔
288
                TLSKeyPath:         cfg.TLSKeyPath,
3✔
289
                TLSEncryptKey:      cfg.TLSEncryptKey,
3✔
290
                TLSExtraIPs:        cfg.TLSExtraIPs,
3✔
291
                TLSExtraDomains:    cfg.TLSExtraDomains,
3✔
292
                TLSAutoRefresh:     cfg.TLSAutoRefresh,
3✔
293
                TLSDisableAutofill: cfg.TLSDisableAutofill,
3✔
294
                TLSCertDuration:    cfg.TLSCertDuration,
3✔
295

3✔
296
                LetsEncryptDir:    cfg.LetsEncryptDir,
3✔
297
                LetsEncryptDomain: cfg.LetsEncryptDomain,
3✔
298
                LetsEncryptListen: cfg.LetsEncryptListen,
3✔
299

3✔
300
                DisableRestTLS: cfg.DisableRestTLS,
3✔
301

3✔
302
                HTTPHeaderTimeout: cfg.HTTPHeaderTimeout,
3✔
303
        }
3✔
304
        tlsManager := NewTLSManager(tlsManagerCfg)
3✔
305
        serverOpts, restDialOpts, restListen, cleanUp,
3✔
306
                err := tlsManager.SetCertificateBeforeUnlock()
3✔
307
        if err != nil {
3✔
308
                return mkErr("error setting cert before unlock", err)
×
309
        }
×
310
        if cleanUp != nil {
6✔
311
                defer cleanUp()
3✔
312
        }
3✔
313

314
        // If we have chosen to start with a dedicated listener for the
315
        // rpc server, we set it directly.
316
        grpcListeners := append([]*ListenerWithSignal{}, lisCfg.RPCListeners...)
3✔
317
        if len(grpcListeners) == 0 {
6✔
318
                // Otherwise we create listeners from the RPCListeners defined
3✔
319
                // in the config.
3✔
320
                for _, grpcEndpoint := range cfg.RPCListeners {
6✔
321
                        // Start a gRPC server listening for HTTP/2
3✔
322
                        // connections.
3✔
323
                        lis, err := lncfg.ListenOnAddress(grpcEndpoint)
3✔
324
                        if err != nil {
3✔
325
                                return mkErr("unable to listen on grpc "+
×
326
                                        "endpoint", err,
×
327
                                        slog.String(
×
328
                                                "endpoint",
×
329
                                                grpcEndpoint.String(),
×
330
                                        ))
×
331
                        }
×
332
                        defer lis.Close()
3✔
333

3✔
334
                        grpcListeners = append(
3✔
335
                                grpcListeners, &ListenerWithSignal{
3✔
336
                                        Listener: lis,
3✔
337
                                        Ready:    make(chan struct{}),
3✔
338
                                },
3✔
339
                        )
3✔
340
                }
341
        }
342

343
        // Create a new RPC interceptor that we'll add to the GRPC server. This
344
        // will be used to log the API calls invoked on the GRPC server.
345
        interceptorChain := rpcperms.NewInterceptorChain(
3✔
346
                rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
3✔
347
        )
3✔
348
        if err := interceptorChain.Start(); err != nil {
3✔
349
                return mkErr("error starting interceptor chain", err)
×
350
        }
×
351
        defer func() {
6✔
352
                err := interceptorChain.Stop()
3✔
353
                if err != nil {
3✔
354
                        ltndLog.Warnf("error stopping RPC interceptor "+
×
355
                                "chain: %v", err)
×
356
                }
×
357
        }()
358

359
        // Allow the user to overwrite some defaults of the gRPC library related
360
        // to connection keepalive (server side and client side pings).
361
        serverKeepalive := keepalive.ServerParameters{
3✔
362
                Time:    cfg.GRPC.ServerPingTime,
3✔
363
                Timeout: cfg.GRPC.ServerPingTimeout,
3✔
364
        }
3✔
365
        clientKeepalive := keepalive.EnforcementPolicy{
3✔
366
                MinTime:             cfg.GRPC.ClientPingMinWait,
3✔
367
                PermitWithoutStream: cfg.GRPC.ClientAllowPingWithoutStream,
3✔
368
        }
3✔
369

3✔
370
        rpcServerOpts := interceptorChain.CreateServerOpts()
3✔
371
        serverOpts = append(serverOpts, rpcServerOpts...)
3✔
372
        serverOpts = append(
3✔
373
                serverOpts, grpc.MaxRecvMsgSize(lnrpc.MaxGrpcMsgSize),
3✔
374
                grpc.KeepaliveParams(serverKeepalive),
3✔
375
                grpc.KeepaliveEnforcementPolicy(clientKeepalive),
3✔
376
        )
3✔
377

3✔
378
        grpcServer := grpc.NewServer(serverOpts...)
3✔
379
        defer grpcServer.Stop()
3✔
380

3✔
381
        // We'll also register the RPC interceptor chain as the StateServer, as
3✔
382
        // it can be used to query for the current state of the wallet.
3✔
383
        lnrpc.RegisterStateServer(grpcServer, interceptorChain)
3✔
384

3✔
385
        // Initialize, and register our implementation of the gRPC interface
3✔
386
        // exported by the rpcServer.
3✔
387
        rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
3✔
388
        err = rpcServer.RegisterWithGrpcServer(grpcServer)
3✔
389
        if err != nil {
3✔
390
                return mkErr("error registering gRPC server", err)
×
391
        }
×
392

393
        // Now that both the WalletUnlocker and LightningService have been
394
        // registered with the GRPC server, we can start listening.
395
        err = startGrpcListen(cfg, grpcServer, grpcListeners)
3✔
396
        if err != nil {
3✔
397
                return mkErr("error starting gRPC listener", err)
×
398
        }
×
399

400
        // Now start the REST proxy for our gRPC server above. We'll ensure
401
        // we direct LND to connect to its loopback address rather than a
402
        // wildcard to prevent certificate issues when accessing the proxy
403
        // externally.
404
        stopProxy, err := startRestProxy(
3✔
405
                ctx, cfg, rpcServer, restDialOpts, restListen,
3✔
406
        )
3✔
407
        if err != nil {
3✔
408
                return mkErr("error starting REST proxy", err)
×
409
        }
×
410
        defer stopProxy()
3✔
411

3✔
412
        // Start leader election if we're running on etcd. Continuation will be
3✔
413
        // blocked until this instance is elected as the current leader or
3✔
414
        // shutting down.
3✔
415
        elected, leaderNormalShutdown := false, false
3✔
416
        var leaderElector cluster.LeaderElector
3✔
417
        if cfg.Cluster.EnableLeaderElection {
3✔
418
                electionCtx, cancelElection := context.WithCancel(ctx)
×
419

×
420
                go func() {
×
NEW
421
                        leaderNormalShutdown = <-interceptor.ShutdownChannel()
×
422
                        cancelElection()
×
423
                }()
×
424

425
                ltndLog.InfoS(ctx, "Using leader elector",
×
426
                        "elector", cfg.Cluster.LeaderElector)
×
427

×
428
                leaderElector, err = cfg.Cluster.MakeLeaderElector(
×
429
                        electionCtx, cfg.DB,
×
430
                )
×
431
                if err != nil {
×
432
                        return err
×
433
                }
×
434

435
                defer func() {
×
436
                        if !elected {
×
437
                                return
×
438
                        }
×
439

440
                        ltndLog.InfoS(ctx, "Attempting to resign from "+
×
441
                                "leader role", "cluster_id", cfg.Cluster.ID)
×
442

×
443
                        // Ensure that we don't block the shutdown process if
×
444
                        // the leader resigning process takes too long. The
×
445
                        // cluster will ensure that the leader is not elected
×
446
                        // again until the previous leader has resigned or the
×
447
                        // leader election timeout has passed.
×
448
                        timeoutCtx, cancel := context.WithTimeout(
×
449
                                ctx, leaderResignTimeout,
×
450
                        )
×
451
                        defer cancel()
×
452

×
453
                        if err := leaderElector.Resign(timeoutCtx); err != nil {
×
454
                                ltndLog.Errorf("Leader elector failed to "+
×
455
                                        "resign: %v", err)
×
456
                        }
×
457
                }()
458

459
                ltndLog.InfoS(ctx, "Starting leadership campaign",
×
460
                        "cluster_id", cfg.Cluster.ID)
×
461

×
462
                if err := leaderElector.Campaign(electionCtx); err != nil {
×
463
                        return mkErr("leadership campaign failed", err)
×
464
                }
×
465

466
                elected = true
×
467
                ltndLog.InfoS(ctx, "Elected as leader",
×
468
                        "cluster_id", cfg.Cluster.ID)
×
469
        }
470

471
        dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
3✔
472
        switch {
3✔
473
        case errors.Is(err, channeldb.ErrDryRunMigrationOK):
×
474
                ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
×
475
                        slog.Any("err", err))
×
476
                return nil
×
477
        case err != nil:
×
478
                return mkErr("unable to open databases", err)
×
479
        }
480

481
        defer cleanUp()
3✔
482

3✔
483
        partialChainControl, walletConfig, cleanUp, err := implCfg.BuildWalletConfig(
3✔
484
                ctx, dbs, &implCfg.AuxComponents, interceptorChain,
3✔
485
                grpcListeners,
3✔
486
        )
3✔
487
        if err != nil {
3✔
488
                return mkErr("error creating wallet config", err)
×
489
        }
×
490

491
        defer cleanUp()
3✔
492

3✔
493
        activeChainControl, cleanUp, err := implCfg.BuildChainControl(
3✔
494
                partialChainControl, walletConfig,
3✔
495
        )
3✔
496
        if err != nil {
3✔
497
                return mkErr("error loading chain control", err)
×
498
        }
×
499

500
        defer cleanUp()
3✔
501

3✔
502
        // TODO(roasbeef): add rotation
3✔
503
        idKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
3✔
504
                keychain.KeyLocator{
3✔
505
                        Family: keychain.KeyFamilyNodeKey,
3✔
506
                        Index:  0,
3✔
507
                },
3✔
508
        )
3✔
509
        if err != nil {
3✔
510
                return mkErr("error deriving node key", err)
×
511
        }
×
512

513
        if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
3✔
514
                return errStreamIsolationWithProxySkip
×
515
        }
×
516

517
        if cfg.Tor.Active {
3✔
518
                if cfg.Tor.SkipProxyForClearNetTargets {
×
519
                        srvrLog.InfoS(ctx, "Onion services are accessible "+
×
520
                                "via Tor! NOTE: Traffic to clearnet services "+
×
521
                                "is not routed via Tor.")
×
522
                } else {
×
523
                        srvrLog.InfoS(ctx, "Proxying all network traffic "+
×
524
                                "via Tor! NOTE: Ensure the backend node is "+
×
525
                                "proxying over Tor as well",
×
526
                                "stream_isolation", cfg.Tor.StreamIsolation)
×
527
                }
×
528
        }
529

530
        // If tor is active and either v2 or v3 onion services have been
531
        // specified, make a tor controller and pass it into both the watchtower
532
        // server and the regular lnd server.
533
        var torController *tor.Controller
3✔
534
        if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) {
3✔
535
                torController = tor.NewController(
×
536
                        cfg.Tor.Control, cfg.Tor.TargetIPAddress,
×
537
                        cfg.Tor.Password,
×
538
                )
×
539

×
540
                // Start the tor controller before giving it to any other
×
541
                // subsystems.
×
542
                if err := torController.Start(); err != nil {
×
543
                        return mkErr("unable to initialize tor controller",
×
544
                                err)
×
545
                }
×
546
                defer func() {
×
547
                        if err := torController.Stop(); err != nil {
×
548
                                ltndLog.ErrorS(ctx, "Error stopping tor "+
×
549
                                        "controller", err)
×
550
                        }
×
551
                }()
552
        }
553

554
        var tower *watchtower.Standalone
3✔
555
        if cfg.Watchtower.Active {
6✔
556
                towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
3✔
557
                        keychain.KeyLocator{
3✔
558
                                Family: keychain.KeyFamilyTowerID,
3✔
559
                                Index:  0,
3✔
560
                        },
3✔
561
                )
3✔
562
                if err != nil {
3✔
563
                        return mkErr("error deriving tower key", err)
×
564
                }
×
565

566
                wtCfg := &watchtower.Config{
3✔
567
                        BlockFetcher:   activeChainControl.ChainIO,
3✔
568
                        DB:             dbs.TowerServerDB,
3✔
569
                        EpochRegistrar: activeChainControl.ChainNotifier,
3✔
570
                        Net:            cfg.net,
3✔
571
                        NewAddress: func() (btcutil.Address, error) {
3✔
572
                                return activeChainControl.Wallet.NewAddress(
×
573
                                        lnwallet.TaprootPubkey, false,
×
574
                                        lnwallet.DefaultAccountName,
×
575
                                )
×
576
                        },
×
577
                        NodeKeyECDH: keychain.NewPubKeyECDH(
578
                                towerKeyDesc, activeChainControl.KeyRing,
579
                        ),
580
                        PublishTx: activeChainControl.Wallet.PublishTransaction,
581
                        ChainHash: *cfg.ActiveNetParams.GenesisHash,
582
                }
583

584
                // If there is a tor controller (user wants auto hidden
585
                // services), then store a pointer in the watchtower config.
586
                if torController != nil {
3✔
587
                        wtCfg.TorController = torController
×
588
                        wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath
×
589
                        wtCfg.EncryptKey = cfg.Tor.EncryptKey
×
590
                        wtCfg.KeyRing = activeChainControl.KeyRing
×
591

×
592
                        switch {
×
593
                        case cfg.Tor.V2:
×
594
                                wtCfg.Type = tor.V2
×
595
                        case cfg.Tor.V3:
×
596
                                wtCfg.Type = tor.V3
×
597
                        }
598
                }
599

600
                wtConfig, err := cfg.Watchtower.Apply(
3✔
601
                        wtCfg, lncfg.NormalizeAddresses,
3✔
602
                )
3✔
603
                if err != nil {
3✔
604
                        return mkErr("unable to configure watchtower", err)
×
605
                }
×
606

607
                tower, err = watchtower.New(wtConfig)
3✔
608
                if err != nil {
3✔
609
                        return mkErr("unable to create watchtower", err)
×
610
                }
×
611
        }
612

613
        // Initialize the MultiplexAcceptor. If lnd was started with the
614
        // zero-conf feature bit, then this will be a ZeroConfAcceptor.
615
        // Otherwise, this will be a ChainedAcceptor.
616
        var multiAcceptor chanacceptor.MultiplexAcceptor
3✔
617
        if cfg.ProtocolOptions.ZeroConf() {
6✔
618
                multiAcceptor = chanacceptor.NewZeroConfAcceptor()
3✔
619
        } else {
6✔
620
                multiAcceptor = chanacceptor.NewChainedAcceptor()
3✔
621
        }
3✔
622

623
        // Set up the core server which will listen for incoming peer
624
        // connections.
625
        server, err := newServer(
3✔
626
                cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc,
3✔
627
                activeChainControl.Cfg.WalletUnlockParams.ChansToRestore,
3✔
628
                multiAcceptor, torController, tlsManager, leaderElector,
3✔
629
                implCfg,
3✔
630
        )
3✔
631
        if err != nil {
3✔
632
                return mkErr("unable to create server", err)
×
633
        }
×
634

635
        // Set up an autopilot manager from the current config. This will be
636
        // used to manage the underlying autopilot agent, starting and stopping
637
        // it at will.
638
        atplCfg, err := initAutoPilot(
3✔
639
                server, cfg.Autopilot, activeChainControl.MinHtlcIn,
3✔
640
                cfg.ActiveNetParams,
3✔
641
        )
3✔
642
        if err != nil {
3✔
643
                return mkErr("unable to initialize autopilot", err)
×
644
        }
×
645

646
        atplManager, err := autopilot.NewManager(atplCfg)
3✔
647
        if err != nil {
3✔
648
                return mkErr("unable to create autopilot manager", err)
×
649
        }
×
650
        if err := atplManager.Start(); err != nil {
3✔
651
                return mkErr("unable to start autopilot manager", err)
×
652
        }
×
653
        defer atplManager.Stop()
3✔
654

3✔
655
        err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
3✔
656
        if err != nil {
3✔
657
                return mkErr("unable to load permanent TLS certificate", err)
×
658
        }
×
659

660
        // Now we have created all dependencies necessary to populate and
661
        // start the RPC server.
662
        err = rpcServer.addDeps(
3✔
663
                server, interceptorChain.MacaroonService(), cfg.SubRPCServers,
3✔
664
                atplManager, server.invoices, tower, multiAcceptor,
3✔
665
                server.invoiceHtlcModifier,
3✔
666
        )
3✔
667
        if err != nil {
3✔
668
                return mkErr("unable to add deps to RPC server", err)
×
669
        }
×
670
        if err := rpcServer.Start(); err != nil {
3✔
671
                return mkErr("unable to start RPC server", err)
×
672
        }
×
673
        defer rpcServer.Stop()
3✔
674

3✔
675
        // We transition the RPC state to Active, as the RPC server is up.
3✔
676
        interceptorChain.SetRPCActive()
3✔
677

3✔
678
        if err := interceptor.Notifier.NotifyReady(true); err != nil {
3✔
679
                return mkErr("error notifying ready", err)
×
680
        }
×
681

682
        // We'll wait until we're fully synced to continue the start up of the
683
        // remainder of the daemon. This ensures that we don't accept any
684
        // possibly invalid state transitions, or accept channels with spent
685
        // funds.
686
        _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
3✔
687
        if err != nil {
3✔
688
                return mkErr("unable to determine chain tip", err)
×
689
        }
×
690

691
        ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
3✔
692
                slog.Int64("start_height", int64(bestHeight)))
3✔
693

3✔
694
        type syncResult struct {
3✔
695
                synced        bool
3✔
696
                bestBlockTime int64
3✔
697
                err           error
3✔
698
        }
3✔
699

3✔
700
        var syncedResChan = make(chan syncResult, 1)
3✔
701

3✔
702
        for {
6✔
703
                // We check if the wallet is synced in a separate goroutine as
3✔
704
                // the call is blocking, and we want to be able to interrupt it
3✔
705
                // if the daemon is shutting down.
3✔
706
                go func() {
6✔
707
                        synced, bestBlockTime, err := activeChainControl.Wallet.
3✔
708
                                IsSynced()
3✔
709
                        syncedResChan <- syncResult{synced, bestBlockTime, err}
3✔
710
                }()
3✔
711

712
                select {
3✔
713
                case <-interceptor.ShutdownChannel():
×
714
                        return nil
×
715

716
                case res := <-syncedResChan:
3✔
717
                        if res.err != nil {
3✔
718
                                return mkErr("unable to determine if wallet "+
×
719
                                        "is synced", res.err)
×
720
                        }
×
721

722
                        ltndLog.DebugS(ctx, "Syncing to block chain",
3✔
723
                                "best_block_time", time.Unix(res.bestBlockTime, 0),
3✔
724
                                "is_synced", res.synced)
3✔
725

3✔
726
                        if res.synced {
6✔
727
                                break
3✔
728
                        }
729

730
                        // If we're not yet synced, we'll wait for a second
731
                        // before checking again.
732
                        select {
3✔
733
                        case <-interceptor.ShutdownChannel():
×
734
                                return nil
×
735

736
                        case <-time.After(time.Second):
3✔
737
                                continue
3✔
738
                        }
739
                }
740

741
                break
3✔
742
        }
743

744
        _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
3✔
745
        if err != nil {
3✔
746
                return mkErr("unable to determine chain tip", err)
×
747
        }
×
748

749
        ltndLog.InfoS(ctx, "Chain backend is fully synced!",
3✔
750
                "end_height", bestHeight)
3✔
751

3✔
752
        // With all the relevant chains initialized, we can finally start the
3✔
753
        // server itself. We start the server in an asynchronous goroutine so
3✔
754
        // that we are able to interrupt and shutdown the daemon gracefully in
3✔
755
        // case the startup of the subservers do not behave as expected.
3✔
756
        errChan := make(chan error)
3✔
757
        go func() {
6✔
758
                errChan <- server.Start()
3✔
759
        }()
3✔
760

761
        defer func() {
6✔
762
                err := server.Stop()
3✔
763
                if err != nil {
3✔
764
                        ltndLog.WarnS(ctx, "Stopping the server including all "+
×
765
                                "its subsystems failed with", err)
×
766
                }
×
767
        }()
768

769
        select {
3✔
770
        case err := <-errChan:
3✔
771
                if err == nil {
6✔
772
                        break
3✔
773
                }
774

775
                return mkErr("unable to start server", err)
×
776

777
        case <-interceptor.ShutdownChannel():
×
778
                return nil
×
779
        }
780

781
        // We transition the server state to Active, as the server is up.
782
        interceptorChain.SetServerActive()
3✔
783

3✔
784
        // Now that the server has started, if the autopilot mode is currently
3✔
785
        // active, then we'll start the autopilot agent immediately. It will be
3✔
786
        // stopped together with the autopilot service.
3✔
787
        if cfg.Autopilot.Active {
3✔
788
                if err := atplManager.StartAgent(); err != nil {
×
789
                        return mkErr("unable to start autopilot agent", err)
×
790
                }
×
791
        }
792

793
        if cfg.Watchtower.Active {
6✔
794
                if err := tower.Start(); err != nil {
3✔
795
                        return mkErr("unable to start watchtower", err)
×
796
                }
×
797
                defer tower.Stop()
3✔
798
        }
799

800
        // Wait for shutdown signal from either a graceful server stop or from
801
        // the interrupt handler.
802
        normalShutdown := <-interceptor.ShutdownChannel()
3✔
803
        if !(normalShutdown || leaderNormalShutdown) {
3✔
NEW
804
                return errors.New("LND shut down with an error")
×
NEW
805
        }
×
806

807
        return nil
3✔
808
}
809

810
// bakeMacaroon creates a new macaroon with newest version and the given
811
// permissions then returns it binary serialized.
812
func bakeMacaroon(ctx context.Context, svc *macaroons.Service,
813
        permissions []bakery.Op) ([]byte, error) {
3✔
814

3✔
815
        mac, err := svc.NewMacaroon(
3✔
816
                ctx, macaroons.DefaultRootKeyID, permissions...,
3✔
817
        )
3✔
818
        if err != nil {
3✔
819
                return nil, err
×
820
        }
×
821

822
        return mac.M().MarshalBinary()
3✔
823
}
824

825
// saveMacaroon bakes a macaroon with the specified macaroon permissions and
826
// writes it to a file with the given filename and file permissions.
827
func saveMacaroon(ctx context.Context, svc *macaroons.Service, filename string,
828
        macaroonPermissions []bakery.Op, filePermissions os.FileMode) error {
3✔
829

3✔
830
        macaroonBytes, err := bakeMacaroon(ctx, svc, macaroonPermissions)
3✔
831
        if err != nil {
3✔
832
                return err
×
833
        }
×
834
        err = os.WriteFile(filename, macaroonBytes, filePermissions)
3✔
835
        if err != nil {
3✔
836
                _ = os.Remove(filename)
×
837
                return err
×
838
        }
×
839

840
        return nil
3✔
841
}
842

843
// genDefaultMacaroons checks for three default macaroon files and generates
844
// them if they do not exist; one admin-level, one for invoice access and one
845
// read-only. Each macaroon is checked and created independently to ensure all
846
// three exist. The admin macaroon can also be used to generate more granular
847
// macaroons.
848
func genDefaultMacaroons(ctx context.Context, svc *macaroons.Service,
849
        admFile, roFile, invoiceFile string) error {
3✔
850

3✔
851
        // First, we'll generate a macaroon that only allows the caller to
3✔
852
        // access invoice related calls. This is useful for merchants and other
3✔
853
        // services to allow an isolated instance that can only query and
3✔
854
        // modify invoices.
3✔
855
        if !lnrpc.FileExists(invoiceFile) {
6✔
856
                err := saveMacaroon(
3✔
857
                        ctx, svc, invoiceFile, invoicePermissions, 0644,
3✔
858
                )
3✔
859
                if err != nil {
3✔
860
                        return err
×
861
                }
×
862
        }
863

864
        // Generate the read-only macaroon and write it to a file.
865
        if !lnrpc.FileExists(roFile) {
6✔
866
                err := saveMacaroon(
3✔
867
                        ctx, svc, roFile, readPermissions, 0644,
3✔
868
                )
3✔
869
                if err != nil {
3✔
870
                        return err
×
871
                }
×
872
        }
873

874
        // Generate the admin macaroon and write it to a file.
875
        if !lnrpc.FileExists(admFile) {
6✔
876
                err := saveMacaroon(
3✔
877
                        ctx, svc, admFile, adminPermissions(),
3✔
878
                        adminMacaroonFilePermissions,
3✔
879
                )
3✔
880
                if err != nil {
3✔
881
                        return err
×
882
                }
×
883
        }
884

885
        return nil
3✔
886
}
887

888
// adminPermissions returns a list of all permissions in a safe way that doesn't
889
// modify any of the source lists.
890
func adminPermissions() []bakery.Op {
3✔
891
        admin := make([]bakery.Op, len(readPermissions)+len(writePermissions))
3✔
892
        copy(admin[:len(readPermissions)], readPermissions)
3✔
893
        copy(admin[len(readPermissions):], writePermissions)
3✔
894
        return admin
3✔
895
}
3✔
896

897
// createWalletUnlockerService creates a WalletUnlockerService from the passed
898
// config.
899
func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService {
3✔
900
        // The macaroonFiles are passed to the wallet unlocker so they can be
3✔
901
        // deleted and recreated in case the root macaroon key is also changed
3✔
902
        // during the change password operation.
3✔
903
        macaroonFiles := []string{
3✔
904
                cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath,
3✔
905
        }
3✔
906

3✔
907
        return walletunlocker.New(
3✔
908
                cfg.ActiveNetParams.Params, macaroonFiles,
3✔
909
                cfg.ResetWalletTransactions, nil,
3✔
910
        )
3✔
911
}
3✔
912

913
// startGrpcListen starts the GRPC server on the passed listeners.
914
func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
915
        listeners []*ListenerWithSignal) error {
3✔
916

3✔
917
        // Use a WaitGroup so we can be sure the instructions on how to input the
3✔
918
        // password is the last thing to be printed to the console.
3✔
919
        var wg sync.WaitGroup
3✔
920

3✔
921
        for _, lis := range listeners {
6✔
922
                wg.Add(1)
3✔
923
                go func(lis *ListenerWithSignal) {
6✔
924
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
3✔
925

3✔
926
                        // Close the ready chan to indicate we are listening.
3✔
927
                        close(lis.Ready)
3✔
928

3✔
929
                        wg.Done()
3✔
930
                        _ = grpcServer.Serve(lis)
3✔
931
                }(lis)
3✔
932
        }
933

934
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
935
        if cfg.Prometheus.Enabled() {
3✔
936
                err := monitoring.ExportPrometheusMetrics(
×
937
                        grpcServer, cfg.Prometheus,
×
938
                )
×
939
                if err != nil {
×
940
                        return err
×
941
                }
×
942
        }
943

944
        // Wait for gRPC servers to be up running.
945
        wg.Wait()
3✔
946

3✔
947
        return nil
3✔
948
}
949

950
// startRestProxy starts the given REST proxy on the listeners found in the
951
// config.
952
func startRestProxy(ctx context.Context, cfg *Config, rpcServer *rpcServer,
953
        restDialOpts []grpc.DialOption,
954
        restListen func(net.Addr) (net.Listener, error)) (func(), error) {
3✔
955

3✔
956
        // We use the first RPC listener as the destination for our REST proxy.
3✔
957
        // If the listener is set to listen on all interfaces, we replace it
3✔
958
        // with localhost, as we cannot dial it directly.
3✔
959
        restProxyDest := cfg.RPCListeners[0].String()
3✔
960
        switch {
3✔
961
        case strings.Contains(restProxyDest, "0.0.0.0"):
×
962
                restProxyDest = strings.Replace(
×
963
                        restProxyDest, "0.0.0.0", "127.0.0.1", 1,
×
964
                )
×
965

966
        case strings.Contains(restProxyDest, "[::]"):
×
967
                restProxyDest = strings.Replace(
×
968
                        restProxyDest, "[::]", "[::1]", 1,
×
969
                )
×
970
        }
971

972
        var shutdownFuncs []func()
3✔
973
        shutdown := func() {
6✔
974
                for _, shutdownFn := range shutdownFuncs {
6✔
975
                        shutdownFn()
3✔
976
                }
3✔
977
        }
978

979
        // Start a REST proxy for our gRPC server.
980
        ctx, cancel := context.WithCancel(ctx)
3✔
981
        shutdownFuncs = append(shutdownFuncs, cancel)
3✔
982

3✔
983
        // We'll set up a proxy that will forward REST calls to the GRPC
3✔
984
        // server.
3✔
985
        //
3✔
986
        // The default JSON marshaler of the REST proxy only sets OrigName to
3✔
987
        // true, which instructs it to use the same field names as specified in
3✔
988
        // the proto file and not switch to camel case. What we also want is
3✔
989
        // that the marshaler prints all values, even if they are falsey.
3✔
990
        customMarshalerOption := proxy.WithMarshalerOption(
3✔
991
                proxy.MIMEWildcard, &proxy.JSONPb{
3✔
992
                        MarshalOptions:   *lnrpc.RESTJsonMarshalOpts,
3✔
993
                        UnmarshalOptions: *lnrpc.RESTJsonUnmarshalOpts,
3✔
994
                },
3✔
995
        )
3✔
996
        mux := proxy.NewServeMux(
3✔
997
                customMarshalerOption,
3✔
998

3✔
999
                // Don't allow falling back to other HTTP methods, we want exact
3✔
1000
                // matches only. The actual method to be used can be overwritten
3✔
1001
                // by setting X-HTTP-Method-Override so there should be no
3✔
1002
                // reason for not specifying the correct method in the first
3✔
1003
                // place.
3✔
1004
                proxy.WithDisablePathLengthFallback(),
3✔
1005
        )
3✔
1006

3✔
1007
        // Register our services with the REST proxy.
3✔
1008
        err := rpcServer.RegisterWithRestProxy(
3✔
1009
                ctx, mux, restDialOpts, restProxyDest,
3✔
1010
        )
3✔
1011
        if err != nil {
3✔
1012
                return nil, err
×
1013
        }
×
1014

1015
        // Wrap the default grpc-gateway handler with the WebSocket handler.
1016
        restHandler := lnrpc.NewWebSocketProxy(
3✔
1017
                mux, rpcsLog, cfg.WSPingInterval, cfg.WSPongWait,
3✔
1018
                lnrpc.LndClientStreamingURIs,
3✔
1019
        )
3✔
1020

3✔
1021
        // Use a WaitGroup so we can be sure the instructions on how to input the
3✔
1022
        // password is the last thing to be printed to the console.
3✔
1023
        var wg sync.WaitGroup
3✔
1024

3✔
1025
        // Now spin up a network listener for each requested port and start a
3✔
1026
        // goroutine that serves REST with the created mux there.
3✔
1027
        for _, restEndpoint := range cfg.RESTListeners {
6✔
1028
                lis, err := restListen(restEndpoint)
3✔
1029
                if err != nil {
3✔
1030
                        ltndLog.Errorf("gRPC proxy unable to listen on %s",
×
1031
                                restEndpoint)
×
1032
                        return nil, err
×
1033
                }
×
1034

1035
                shutdownFuncs = append(shutdownFuncs, func() {
6✔
1036
                        err := lis.Close()
3✔
1037
                        if err != nil {
3✔
1038
                                rpcsLog.Errorf("Error closing listener: %v",
×
1039
                                        err)
×
1040
                        }
×
1041
                })
1042

1043
                wg.Add(1)
3✔
1044
                go func() {
6✔
1045
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
3✔
1046

3✔
1047
                        // Create our proxy chain now. A request will pass
3✔
1048
                        // through the following chain:
3✔
1049
                        // req ---> CORS handler --> WS proxy --->
3✔
1050
                        //   REST proxy --> gRPC endpoint
3✔
1051
                        corsHandler := allowCORS(restHandler, cfg.RestCORS)
3✔
1052

3✔
1053
                        wg.Done()
3✔
1054
                        err := http.Serve(lis, corsHandler)
3✔
1055
                        if err != nil && !lnrpc.IsClosedConnError(err) {
3✔
1056
                                rpcsLog.Error(err)
×
1057
                        }
×
1058
                }()
1059
        }
1060

1061
        // Wait for REST servers to be up running.
1062
        wg.Wait()
3✔
1063

3✔
1064
        return shutdown, nil
3✔
1065
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc