• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14402160929

11 Apr 2025 11:30AM UTC coverage: 69.06%. First build
14402160929

Pull #9704

github

web-flow
Merge 41eee0ca6 into e214b579e
Pull Request #9704: discovery+autopilot: revert passing contexts to `Start` methods

15 of 19 new or added lines in 9 files covered. (78.95%)

133514 of 193331 relevant lines covered (69.06%)

22222.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.16
/lnd.go
1
// Copyright (c) 2013-2017 The btcsuite developers
2
// Copyright (c) 2015-2016 The Decred developers
3
// Copyright (C) 2015-2022 The Lightning Network Developers
4

5
package lnd
6

7
import (
8
        "context"
9
        "errors"
10
        "fmt"
11
        "log/slog"
12
        "net"
13
        "net/http"
14
        "net/http/pprof"
15
        "os"
16
        "runtime"
17
        runtimePprof "runtime/pprof"
18
        "strings"
19
        "sync"
20
        "time"
21

22
        "github.com/btcsuite/btcd/btcutil"
23
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
24
        "github.com/lightningnetwork/lnd/autopilot"
25
        "github.com/lightningnetwork/lnd/build"
26
        "github.com/lightningnetwork/lnd/chanacceptor"
27
        "github.com/lightningnetwork/lnd/channeldb"
28
        "github.com/lightningnetwork/lnd/cluster"
29
        "github.com/lightningnetwork/lnd/keychain"
30
        "github.com/lightningnetwork/lnd/lncfg"
31
        "github.com/lightningnetwork/lnd/lnrpc"
32
        "github.com/lightningnetwork/lnd/lnwallet"
33
        "github.com/lightningnetwork/lnd/macaroons"
34
        "github.com/lightningnetwork/lnd/monitoring"
35
        "github.com/lightningnetwork/lnd/rpcperms"
36
        "github.com/lightningnetwork/lnd/signal"
37
        "github.com/lightningnetwork/lnd/tor"
38
        "github.com/lightningnetwork/lnd/walletunlocker"
39
        "github.com/lightningnetwork/lnd/watchtower"
40
        "google.golang.org/grpc"
41
        "google.golang.org/grpc/credentials"
42
        "google.golang.org/grpc/keepalive"
43
        "gopkg.in/macaroon-bakery.v2/bakery"
44
        "gopkg.in/macaroon.v2"
45
)
46

47
const (
48
        // adminMacaroonFilePermissions is the file permission that is used for
49
        // creating the admin macaroon file.
50
        //
51
        // Why 640 is safe:
52
        // Assuming a reasonably secure Linux system, it will have a
53
        // separate group for each user. E.g. a new user lnd gets assigned group
54
        // lnd which nothing else belongs to. A system that does not do this is
55
        // inherently broken already.
56
        //
57
        // Since there is no other user in the group, no other user can read
58
        // admin macaroon unless the administrator explicitly allowed it. Thus
59
        // there's no harm allowing group read.
60
        adminMacaroonFilePermissions = 0640
61

62
        // leaderResignTimeout is the timeout used when resigning from the
63
        // leader role. This is kept short so LND can shut down quickly in case
64
        // of a system failure or network partition making the cluster
65
        // unresponsive. The cluster itself should ensure that the leader is not
66
        // elected again until the previous leader has resigned or the leader
67
        // election timeout has passed.
68
        leaderResignTimeout = 5 * time.Second
69
)
70

71
// AdminAuthOptions returns a list of DialOptions that can be used to
72
// authenticate with the RPC server with admin capabilities.
73
// skipMacaroons=true should be set if we don't want to include macaroons with
74
// the auth options. This is needed for instance for the WalletUnlocker
75
// service, which must be usable also before macaroons are created.
76
//
77
// NOTE: This should only be called after the RPCListener has signaled it is
78
// ready.
79
func AdminAuthOptions(cfg *Config, skipMacaroons bool) ([]grpc.DialOption,
80
        error) {
×
81

×
82
        creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "")
×
83
        if err != nil {
×
84
                return nil, fmt.Errorf("unable to read TLS cert: %w", err)
×
85
        }
×
86

87
        // Create a dial options array.
88
        opts := []grpc.DialOption{
×
89
                grpc.WithTransportCredentials(creds),
×
90
        }
×
91

×
92
        // Get the admin macaroon if macaroons are active.
×
93
        if !skipMacaroons && !cfg.NoMacaroons {
×
94
                // Load the admin macaroon file.
×
95
                macBytes, err := os.ReadFile(cfg.AdminMacPath)
×
96
                if err != nil {
×
97
                        return nil, fmt.Errorf("unable to read macaroon "+
×
98
                                "path (check the network setting!): %v", err)
×
99
                }
×
100

101
                mac := &macaroon.Macaroon{}
×
102
                if err = mac.UnmarshalBinary(macBytes); err != nil {
×
103
                        return nil, fmt.Errorf("unable to decode macaroon: %w",
×
104
                                err)
×
105
                }
×
106

107
                // Now we append the macaroon credentials to the dial options.
108
                cred, err := macaroons.NewMacaroonCredential(mac)
×
109
                if err != nil {
×
110
                        return nil, fmt.Errorf("error cloning mac: %w", err)
×
111
                }
×
112
                opts = append(opts, grpc.WithPerRPCCredentials(cred))
×
113
        }
114

115
        return opts, nil
×
116
}
117

118
// ListenerWithSignal is a net.Listener that has an additional Ready channel
119
// that will be closed when a server starts listening.
120
type ListenerWithSignal struct {
121
        net.Listener
122

123
        // Ready will be closed by the server listening on Listener.
124
        Ready chan struct{}
125

126
        // MacChan is an optional way to pass the admin macaroon to the program
127
        // that started lnd. The channel should be buffered to avoid lnd being
128
        // blocked on sending to the channel.
129
        MacChan chan []byte
130
}
131

132
// ListenerCfg is a wrapper around custom listeners that can be passed to lnd
133
// when calling its main method.
134
type ListenerCfg struct {
135
        // RPCListeners can be set to the listeners to use for the RPC server.
136
        // If empty a regular network listener will be created.
137
        RPCListeners []*ListenerWithSignal
138
}
139

140
var errStreamIsolationWithProxySkip = errors.New(
141
        "while stream isolation is enabled, the TOR proxy may not be skipped",
142
)
143

144
// Main is the true entry point for lnd. It accepts a fully populated and
145
// validated main configuration struct and an optional listener config struct.
146
// This function starts all main system components then blocks until a signal
147
// is received on the shutdownChan at which point everything is shut down again.
148
func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
149
        interceptor signal.Interceptor) error {
3✔
150

3✔
151
        defer func() {
6✔
152
                ltndLog.Info("Shutdown complete")
3✔
153
                err := cfg.LogRotator.Close()
3✔
154
                if err != nil {
3✔
155
                        ltndLog.Errorf("Could not close log rotator: %v", err)
×
156
                }
×
157
        }()
158

159
        ctx, cancel := context.WithCancel(context.Background())
3✔
160
        defer cancel()
3✔
161

3✔
162
        ctx, err := build.WithBuildInfo(ctx, cfg.LogConfig)
3✔
163
        if err != nil {
3✔
164
                return fmt.Errorf("unable to add build info to context: %w",
×
165
                        err)
×
166
        }
×
167

168
        mkErr := func(msg string, err error, attrs ...any) error {
3✔
169
                ltndLog.ErrorS(ctx, "Shutting down due to error in main "+
×
170
                        "method", err, attrs...)
×
171

×
172
                var (
×
173
                        params = []any{err}
×
174
                        fmtStr = msg + ": %w"
×
175
                )
×
176
                for _, attr := range attrs {
×
177
                        fmtStr += " %s"
×
178

×
179
                        params = append(params, attr)
×
180
                }
×
181

182
                return fmt.Errorf(fmtStr, params...)
×
183
        }
184

185
        // Show version at startup.
186
        ltndLog.InfoS(ctx, "Version Info",
3✔
187
                slog.String("version", build.Version()),
3✔
188
                slog.String("commit", build.Commit),
3✔
189
                slog.Any("debuglevel", build.Deployment),
3✔
190
                slog.String("logging", cfg.DebugLevel))
3✔
191

3✔
192
        var network string
3✔
193
        switch {
3✔
194
        case cfg.Bitcoin.TestNet3:
×
195
                network = "testnet"
×
196

197
        case cfg.Bitcoin.TestNet4:
×
198
                network = "testnet4"
×
199

200
        case cfg.Bitcoin.MainNet:
×
201
                network = "mainnet"
×
202

203
        case cfg.Bitcoin.SimNet:
×
204
                network = "simnet"
×
205

206
        case cfg.Bitcoin.RegTest:
3✔
207
                network = "regtest"
3✔
208

209
        case cfg.Bitcoin.SigNet:
×
210
                network = "signet"
×
211
        }
212

213
        ltndLog.InfoS(ctx, "Network Info",
3✔
214
                "active_chain", strings.Title(BitcoinChainName),
3✔
215
                "network", network)
3✔
216

3✔
217
        // Enable http profiling server if requested.
3✔
218
        if cfg.Pprof.Profile != "" {
3✔
219
                // Create the http handler.
×
220
                pprofMux := http.NewServeMux()
×
221
                pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
×
222
                pprofMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
×
223
                pprofMux.HandleFunc("/debug/pprof/profile", pprof.Profile)
×
224
                pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
×
225
                pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
×
226

×
227
                if cfg.Pprof.BlockingProfile != 0 {
×
228
                        runtime.SetBlockProfileRate(cfg.Pprof.BlockingProfile)
×
229
                }
×
230
                if cfg.Pprof.MutexProfile != 0 {
×
231
                        runtime.SetMutexProfileFraction(cfg.Pprof.MutexProfile)
×
232
                }
×
233

234
                // Redirect all requests to the pprof handler, thus visiting
235
                // `127.0.0.1:6060` will be redirected to
236
                // `127.0.0.1:6060/debug/pprof`.
237
                pprofMux.Handle("/", http.RedirectHandler(
×
238
                        "/debug/pprof/", http.StatusSeeOther,
×
239
                ))
×
240

×
241
                ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)
×
242

×
243
                // Create the pprof server.
×
244
                pprofServer := &http.Server{
×
245
                        Addr:              cfg.Pprof.Profile,
×
246
                        Handler:           pprofMux,
×
247
                        ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
×
248
                }
×
249

×
250
                // Shut the server down when lnd is shutting down.
×
251
                defer func() {
×
252
                        ltndLog.InfoS(ctx, "Stopping pprof server...")
×
253
                        err := pprofServer.Shutdown(ctx)
×
254
                        if err != nil {
×
255
                                ltndLog.ErrorS(ctx, "Stop pprof server", err)
×
256
                        }
×
257
                }()
258

259
                // Start the pprof server.
260
                go func() {
×
261
                        err := pprofServer.ListenAndServe()
×
262
                        if err != nil && !errors.Is(err, http.ErrServerClosed) {
×
263
                                ltndLog.ErrorS(ctx, "Could not serve pprof "+
×
264
                                        "server", err)
×
265
                        }
×
266
                }()
267
        }
268

269
        // Write cpu profile if requested.
270
        if cfg.Pprof.CPUProfile != "" {
3✔
271
                f, err := os.Create(cfg.Pprof.CPUProfile)
×
272
                if err != nil {
×
273
                        return mkErr("unable to create CPU profile", err)
×
274
                }
×
275
                _ = runtimePprof.StartCPUProfile(f)
×
276
                defer func() {
×
277
                        _ = f.Close()
×
278
                }()
×
279
                defer runtimePprof.StopCPUProfile()
×
280
        }
281

282
        // Run configuration dependent DB pre-initialization. Note that this
283
        // needs to be done early and once during the startup process, before
284
        // any DB access.
285
        if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
3✔
286
                return mkErr("error initializing DBs", err)
×
287
        }
×
288

289
        tlsManagerCfg := &TLSManagerCfg{
3✔
290
                TLSCertPath:        cfg.TLSCertPath,
3✔
291
                TLSKeyPath:         cfg.TLSKeyPath,
3✔
292
                TLSEncryptKey:      cfg.TLSEncryptKey,
3✔
293
                TLSExtraIPs:        cfg.TLSExtraIPs,
3✔
294
                TLSExtraDomains:    cfg.TLSExtraDomains,
3✔
295
                TLSAutoRefresh:     cfg.TLSAutoRefresh,
3✔
296
                TLSDisableAutofill: cfg.TLSDisableAutofill,
3✔
297
                TLSCertDuration:    cfg.TLSCertDuration,
3✔
298

3✔
299
                LetsEncryptDir:    cfg.LetsEncryptDir,
3✔
300
                LetsEncryptDomain: cfg.LetsEncryptDomain,
3✔
301
                LetsEncryptListen: cfg.LetsEncryptListen,
3✔
302

3✔
303
                DisableRestTLS: cfg.DisableRestTLS,
3✔
304

3✔
305
                HTTPHeaderTimeout: cfg.HTTPHeaderTimeout,
3✔
306
        }
3✔
307
        tlsManager := NewTLSManager(tlsManagerCfg)
3✔
308
        serverOpts, restDialOpts, restListen, cleanUp,
3✔
309
                err := tlsManager.SetCertificateBeforeUnlock()
3✔
310
        if err != nil {
3✔
311
                return mkErr("error setting cert before unlock", err)
×
312
        }
×
313
        if cleanUp != nil {
6✔
314
                defer cleanUp()
3✔
315
        }
3✔
316

317
        // If we have chosen to start with a dedicated listener for the
318
        // rpc server, we set it directly.
319
        grpcListeners := append([]*ListenerWithSignal{}, lisCfg.RPCListeners...)
3✔
320
        if len(grpcListeners) == 0 {
6✔
321
                // Otherwise we create listeners from the RPCListeners defined
3✔
322
                // in the config.
3✔
323
                for _, grpcEndpoint := range cfg.RPCListeners {
6✔
324
                        // Start a gRPC server listening for HTTP/2
3✔
325
                        // connections.
3✔
326
                        lis, err := lncfg.ListenOnAddress(grpcEndpoint)
3✔
327
                        if err != nil {
3✔
328
                                return mkErr("unable to listen on grpc "+
×
329
                                        "endpoint", err,
×
330
                                        slog.String(
×
331
                                                "endpoint",
×
332
                                                grpcEndpoint.String(),
×
333
                                        ))
×
334
                        }
×
335
                        defer lis.Close()
3✔
336

3✔
337
                        grpcListeners = append(
3✔
338
                                grpcListeners, &ListenerWithSignal{
3✔
339
                                        Listener: lis,
3✔
340
                                        Ready:    make(chan struct{}),
3✔
341
                                },
3✔
342
                        )
3✔
343
                }
344
        }
345

346
        // Create a new RPC interceptor that we'll add to the GRPC server. This
347
        // will be used to log the API calls invoked on the GRPC server.
348
        interceptorChain := rpcperms.NewInterceptorChain(
3✔
349
                rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
3✔
350
        )
3✔
351
        if err := interceptorChain.Start(); err != nil {
3✔
352
                return mkErr("error starting interceptor chain", err)
×
353
        }
×
354
        defer func() {
6✔
355
                err := interceptorChain.Stop()
3✔
356
                if err != nil {
3✔
357
                        ltndLog.Warnf("error stopping RPC interceptor "+
×
358
                                "chain: %v", err)
×
359
                }
×
360
        }()
361

362
        // Allow the user to overwrite some defaults of the gRPC library related
363
        // to connection keepalive (server side and client side pings).
364
        serverKeepalive := keepalive.ServerParameters{
3✔
365
                Time:    cfg.GRPC.ServerPingTime,
3✔
366
                Timeout: cfg.GRPC.ServerPingTimeout,
3✔
367
        }
3✔
368
        clientKeepalive := keepalive.EnforcementPolicy{
3✔
369
                MinTime:             cfg.GRPC.ClientPingMinWait,
3✔
370
                PermitWithoutStream: cfg.GRPC.ClientAllowPingWithoutStream,
3✔
371
        }
3✔
372

3✔
373
        rpcServerOpts := interceptorChain.CreateServerOpts()
3✔
374
        serverOpts = append(serverOpts, rpcServerOpts...)
3✔
375
        serverOpts = append(
3✔
376
                serverOpts, grpc.MaxRecvMsgSize(lnrpc.MaxGrpcMsgSize),
3✔
377
                grpc.KeepaliveParams(serverKeepalive),
3✔
378
                grpc.KeepaliveEnforcementPolicy(clientKeepalive),
3✔
379
        )
3✔
380

3✔
381
        grpcServer := grpc.NewServer(serverOpts...)
3✔
382
        defer grpcServer.Stop()
3✔
383

3✔
384
        // We'll also register the RPC interceptor chain as the StateServer, as
3✔
385
        // it can be used to query for the current state of the wallet.
3✔
386
        lnrpc.RegisterStateServer(grpcServer, interceptorChain)
3✔
387

3✔
388
        // Initialize, and register our implementation of the gRPC interface
3✔
389
        // exported by the rpcServer.
3✔
390
        rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
3✔
391
        err = rpcServer.RegisterWithGrpcServer(grpcServer)
3✔
392
        if err != nil {
3✔
393
                return mkErr("error registering gRPC server", err)
×
394
        }
×
395

396
        // Now that both the WalletUnlocker and LightningService have been
397
        // registered with the GRPC server, we can start listening.
398
        err = startGrpcListen(cfg, grpcServer, grpcListeners)
3✔
399
        if err != nil {
3✔
400
                return mkErr("error starting gRPC listener", err)
×
401
        }
×
402

403
        // Now start the REST proxy for our gRPC server above. We'll ensure
404
        // we direct LND to connect to its loopback address rather than a
405
        // wildcard to prevent certificate issues when accessing the proxy
406
        // externally.
407
        stopProxy, err := startRestProxy(
3✔
408
                ctx, cfg, rpcServer, restDialOpts, restListen,
3✔
409
        )
3✔
410
        if err != nil {
3✔
411
                return mkErr("error starting REST proxy", err)
×
412
        }
×
413
        defer stopProxy()
3✔
414

3✔
415
        // Start leader election if we're running on etcd. Continuation will be
3✔
416
        // blocked until this instance is elected as the current leader or
3✔
417
        // shutting down.
3✔
418
        elected := false
3✔
419
        var leaderElector cluster.LeaderElector
3✔
420
        if cfg.Cluster.EnableLeaderElection {
3✔
421
                electionCtx, cancelElection := context.WithCancel(ctx)
×
422

×
423
                go func() {
×
424
                        <-interceptor.ShutdownChannel()
×
425
                        cancelElection()
×
426
                }()
×
427

428
                ltndLog.InfoS(ctx, "Using leader elector",
×
429
                        "elector", cfg.Cluster.LeaderElector)
×
430

×
431
                leaderElector, err = cfg.Cluster.MakeLeaderElector(
×
432
                        electionCtx, cfg.DB,
×
433
                )
×
434
                if err != nil {
×
435
                        return err
×
436
                }
×
437

438
                defer func() {
×
439
                        if !elected {
×
440
                                return
×
441
                        }
×
442

443
                        ltndLog.InfoS(ctx, "Attempting to resign from "+
×
444
                                "leader role", "cluster_id", cfg.Cluster.ID)
×
445

×
446
                        // Ensure that we don't block the shutdown process if
×
447
                        // the leader resigning process takes too long. The
×
448
                        // cluster will ensure that the leader is not elected
×
449
                        // again until the previous leader has resigned or the
×
450
                        // leader election timeout has passed.
×
451
                        timeoutCtx, cancel := context.WithTimeout(
×
452
                                ctx, leaderResignTimeout,
×
453
                        )
×
454
                        defer cancel()
×
455

×
456
                        if err := leaderElector.Resign(timeoutCtx); err != nil {
×
457
                                ltndLog.Errorf("Leader elector failed to "+
×
458
                                        "resign: %v", err)
×
459
                        }
×
460
                }()
461

462
                ltndLog.InfoS(ctx, "Starting leadership campaign",
×
463
                        "cluster_id", cfg.Cluster.ID)
×
464

×
465
                if err := leaderElector.Campaign(electionCtx); err != nil {
×
466
                        return mkErr("leadership campaign failed", err)
×
467
                }
×
468

469
                elected = true
×
470
                ltndLog.InfoS(ctx, "Elected as leader",
×
471
                        "cluster_id", cfg.Cluster.ID)
×
472
        }
473

474
        dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
3✔
475
        switch {
3✔
476
        case errors.Is(err, channeldb.ErrDryRunMigrationOK):
×
477
                ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
×
478
                        slog.Any("err", err))
×
479
                return nil
×
480
        case err != nil:
×
481
                return mkErr("unable to open databases", err)
×
482
        }
483

484
        defer cleanUp()
3✔
485

3✔
486
        partialChainControl, walletConfig, cleanUp, err := implCfg.BuildWalletConfig(
3✔
487
                ctx, dbs, &implCfg.AuxComponents, interceptorChain,
3✔
488
                grpcListeners,
3✔
489
        )
3✔
490
        if err != nil {
3✔
491
                return mkErr("error creating wallet config", err)
×
492
        }
×
493

494
        defer cleanUp()
3✔
495

3✔
496
        activeChainControl, cleanUp, err := implCfg.BuildChainControl(
3✔
497
                partialChainControl, walletConfig,
3✔
498
        )
3✔
499
        if err != nil {
3✔
500
                return mkErr("error loading chain control", err)
×
501
        }
×
502

503
        defer cleanUp()
3✔
504

3✔
505
        // TODO(roasbeef): add rotation
3✔
506
        idKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
3✔
507
                keychain.KeyLocator{
3✔
508
                        Family: keychain.KeyFamilyNodeKey,
3✔
509
                        Index:  0,
3✔
510
                },
3✔
511
        )
3✔
512
        if err != nil {
3✔
513
                return mkErr("error deriving node key", err)
×
514
        }
×
515

516
        if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
3✔
517
                return errStreamIsolationWithProxySkip
×
518
        }
×
519

520
        if cfg.Tor.Active {
3✔
521
                if cfg.Tor.SkipProxyForClearNetTargets {
×
522
                        srvrLog.InfoS(ctx, "Onion services are accessible "+
×
523
                                "via Tor! NOTE: Traffic to clearnet services "+
×
524
                                "is not routed via Tor.")
×
525
                } else {
×
526
                        srvrLog.InfoS(ctx, "Proxying all network traffic "+
×
527
                                "via Tor! NOTE: Ensure the backend node is "+
×
528
                                "proxying over Tor as well",
×
529
                                "stream_isolation", cfg.Tor.StreamIsolation)
×
530
                }
×
531
        }
532

533
        // If tor is active and either v2 or v3 onion services have been
534
        // specified, make a tor controller and pass it into both the watchtower
535
        // server and the regular lnd server.
536
        var torController *tor.Controller
3✔
537
        if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) {
3✔
538
                torController = tor.NewController(
×
539
                        cfg.Tor.Control, cfg.Tor.TargetIPAddress,
×
540
                        cfg.Tor.Password,
×
541
                )
×
542

×
543
                // Start the tor controller before giving it to any other
×
544
                // subsystems.
×
545
                if err := torController.Start(); err != nil {
×
546
                        return mkErr("unable to initialize tor controller",
×
547
                                err)
×
548
                }
×
549
                defer func() {
×
550
                        if err := torController.Stop(); err != nil {
×
551
                                ltndLog.ErrorS(ctx, "Error stopping tor "+
×
552
                                        "controller", err)
×
553
                        }
×
554
                }()
555
        }
556

557
        var tower *watchtower.Standalone
3✔
558
        if cfg.Watchtower.Active {
6✔
559
                towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
3✔
560
                        keychain.KeyLocator{
3✔
561
                                Family: keychain.KeyFamilyTowerID,
3✔
562
                                Index:  0,
3✔
563
                        },
3✔
564
                )
3✔
565
                if err != nil {
3✔
566
                        return mkErr("error deriving tower key", err)
×
567
                }
×
568

569
                wtCfg := &watchtower.Config{
3✔
570
                        BlockFetcher:   activeChainControl.ChainIO,
3✔
571
                        DB:             dbs.TowerServerDB,
3✔
572
                        EpochRegistrar: activeChainControl.ChainNotifier,
3✔
573
                        Net:            cfg.net,
3✔
574
                        NewAddress: func() (btcutil.Address, error) {
3✔
575
                                return activeChainControl.Wallet.NewAddress(
×
576
                                        lnwallet.TaprootPubkey, false,
×
577
                                        lnwallet.DefaultAccountName,
×
578
                                )
×
579
                        },
×
580
                        NodeKeyECDH: keychain.NewPubKeyECDH(
581
                                towerKeyDesc, activeChainControl.KeyRing,
582
                        ),
583
                        PublishTx: activeChainControl.Wallet.PublishTransaction,
584
                        ChainHash: *cfg.ActiveNetParams.GenesisHash,
585
                }
586

587
                // If there is a tor controller (user wants auto hidden
588
                // services), then store a pointer in the watchtower config.
589
                if torController != nil {
3✔
590
                        wtCfg.TorController = torController
×
591
                        wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath
×
592
                        wtCfg.EncryptKey = cfg.Tor.EncryptKey
×
593
                        wtCfg.KeyRing = activeChainControl.KeyRing
×
594

×
595
                        switch {
×
596
                        case cfg.Tor.V2:
×
597
                                wtCfg.Type = tor.V2
×
598
                        case cfg.Tor.V3:
×
599
                                wtCfg.Type = tor.V3
×
600
                        }
601
                }
602

603
                wtConfig, err := cfg.Watchtower.Apply(
3✔
604
                        wtCfg, lncfg.NormalizeAddresses,
3✔
605
                )
3✔
606
                if err != nil {
3✔
607
                        return mkErr("unable to configure watchtower", err)
×
608
                }
×
609

610
                tower, err = watchtower.New(wtConfig)
3✔
611
                if err != nil {
3✔
612
                        return mkErr("unable to create watchtower", err)
×
613
                }
×
614
        }
615

616
        // Initialize the MultiplexAcceptor. If lnd was started with the
617
        // zero-conf feature bit, then this will be a ZeroConfAcceptor.
618
        // Otherwise, this will be a ChainedAcceptor.
619
        var multiAcceptor chanacceptor.MultiplexAcceptor
3✔
620
        if cfg.ProtocolOptions.ZeroConf() {
6✔
621
                multiAcceptor = chanacceptor.NewZeroConfAcceptor()
3✔
622
        } else {
6✔
623
                multiAcceptor = chanacceptor.NewChainedAcceptor()
3✔
624
        }
3✔
625

626
        // Set up the core server which will listen for incoming peer
627
        // connections.
628
        server, err := newServer(
3✔
629
                ctx, cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc,
3✔
630
                activeChainControl.Cfg.WalletUnlockParams.ChansToRestore,
3✔
631
                multiAcceptor, torController, tlsManager, leaderElector,
3✔
632
                implCfg,
3✔
633
        )
3✔
634
        if err != nil {
3✔
635
                return mkErr("unable to create server", err)
×
636
        }
×
637

638
        // Set up an autopilot manager from the current config. This will be
639
        // used to manage the underlying autopilot agent, starting and stopping
640
        // it at will.
641
        atplCfg, err := initAutoPilot(
3✔
642
                server, cfg.Autopilot, activeChainControl.MinHtlcIn,
3✔
643
                cfg.ActiveNetParams,
3✔
644
        )
3✔
645
        if err != nil {
3✔
646
                return mkErr("unable to initialize autopilot", err)
×
647
        }
×
648

649
        atplManager, err := autopilot.NewManager(atplCfg)
3✔
650
        if err != nil {
3✔
651
                return mkErr("unable to create autopilot manager", err)
×
652
        }
×
653
        if err := atplManager.Start(); err != nil {
3✔
654
                return mkErr("unable to start autopilot manager", err)
×
655
        }
×
656
        defer atplManager.Stop()
3✔
657

3✔
658
        err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
3✔
659
        if err != nil {
3✔
660
                return mkErr("unable to load permanent TLS certificate", err)
×
661
        }
×
662

663
        // Now we have created all dependencies necessary to populate and
664
        // start the RPC server.
665
        err = rpcServer.addDeps(
3✔
666
                server, interceptorChain.MacaroonService(), cfg.SubRPCServers,
3✔
667
                atplManager, server.invoices, tower, multiAcceptor,
3✔
668
                server.invoiceHtlcModifier,
3✔
669
        )
3✔
670
        if err != nil {
3✔
671
                return mkErr("unable to add deps to RPC server", err)
×
672
        }
×
673
        if err := rpcServer.Start(); err != nil {
3✔
674
                return mkErr("unable to start RPC server", err)
×
675
        }
×
676
        defer rpcServer.Stop()
3✔
677

3✔
678
        // We transition the RPC state to Active, as the RPC server is up.
3✔
679
        interceptorChain.SetRPCActive()
3✔
680

3✔
681
        if err := interceptor.Notifier.NotifyReady(true); err != nil {
3✔
682
                return mkErr("error notifying ready", err)
×
683
        }
×
684

685
        // We'll wait until we're fully synced to continue the start up of the
686
        // remainder of the daemon. This ensures that we don't accept any
687
        // possibly invalid state transitions, or accept channels with spent
688
        // funds.
689
        _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
3✔
690
        if err != nil {
3✔
691
                return mkErr("unable to determine chain tip", err)
×
692
        }
×
693

694
        ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
3✔
695
                slog.Int64("start_height", int64(bestHeight)))
3✔
696

3✔
697
        type syncResult struct {
3✔
698
                synced        bool
3✔
699
                bestBlockTime int64
3✔
700
                err           error
3✔
701
        }
3✔
702

3✔
703
        var syncedResChan = make(chan syncResult, 1)
3✔
704

3✔
705
        for {
6✔
706
                // We check if the wallet is synced in a separate goroutine as
3✔
707
                // the call is blocking, and we want to be able to interrupt it
3✔
708
                // if the daemon is shutting down.
3✔
709
                go func() {
6✔
710
                        synced, bestBlockTime, err := activeChainControl.Wallet.
3✔
711
                                IsSynced()
3✔
712
                        syncedResChan <- syncResult{synced, bestBlockTime, err}
3✔
713
                }()
3✔
714

715
                select {
3✔
716
                case <-interceptor.ShutdownChannel():
×
717
                        return nil
×
718

719
                case res := <-syncedResChan:
3✔
720
                        if res.err != nil {
3✔
721
                                return mkErr("unable to determine if wallet "+
×
722
                                        "is synced", res.err)
×
723
                        }
×
724

725
                        ltndLog.DebugS(ctx, "Syncing to block chain",
3✔
726
                                "best_block_time", time.Unix(res.bestBlockTime, 0),
3✔
727
                                "is_synced", res.synced)
3✔
728

3✔
729
                        if res.synced {
6✔
730
                                break
3✔
731
                        }
732

733
                        // If we're not yet synced, we'll wait for a second
734
                        // before checking again.
735
                        select {
3✔
736
                        case <-interceptor.ShutdownChannel():
×
737
                                return nil
×
738

739
                        case <-time.After(time.Second):
3✔
740
                                continue
3✔
741
                        }
742
                }
743

744
                break
3✔
745
        }
746

747
        _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
3✔
748
        if err != nil {
3✔
749
                return mkErr("unable to determine chain tip", err)
×
750
        }
×
751

752
        ltndLog.InfoS(ctx, "Chain backend is fully synced!",
3✔
753
                "end_height", bestHeight)
3✔
754

3✔
755
        // With all the relevant chains initialized, we can finally start the
3✔
756
        // server itself. We start the server in an asynchronous goroutine so
3✔
757
        // that we are able to interrupt and shutdown the daemon gracefully in
3✔
758
        // case the startup of the subservers do not behave as expected.
3✔
759
        errChan := make(chan error)
3✔
760
        go func() {
6✔
761
                errChan <- server.Start(ctx)
3✔
762
        }()
3✔
763

764
        defer func() {
6✔
765
                err := server.Stop()
3✔
766
                if err != nil {
3✔
767
                        ltndLog.WarnS(ctx, "Stopping the server including all "+
×
768
                                "its subsystems failed with", err)
×
769
                }
×
770
        }()
771

772
        select {
3✔
773
        case err := <-errChan:
3✔
774
                if err == nil {
6✔
775
                        break
3✔
776
                }
777

778
                return mkErr("unable to start server", err)
×
779

780
        case <-interceptor.ShutdownChannel():
×
781
                return nil
×
782
        }
783

784
        // We transition the server state to Active, as the server is up.
785
        interceptorChain.SetServerActive()
3✔
786

3✔
787
        // Now that the server has started, if the autopilot mode is currently
3✔
788
        // active, then we'll start the autopilot agent immediately. It will be
3✔
789
        // stopped together with the autopilot service.
3✔
790
        if cfg.Autopilot.Active {
3✔
NEW
791
                if err := atplManager.StartAgent(); err != nil {
×
792
                        return mkErr("unable to start autopilot agent", err)
×
793
                }
×
794
        }
795

796
        if cfg.Watchtower.Active {
6✔
797
                if err := tower.Start(); err != nil {
3✔
798
                        return mkErr("unable to start watchtower", err)
×
799
                }
×
800
                defer tower.Stop()
3✔
801
        }
802

803
        // Wait for shutdown signal from either a graceful server stop or from
804
        // the interrupt handler.
805
        <-interceptor.ShutdownChannel()
3✔
806
        return nil
3✔
807
}
808

809
// bakeMacaroon creates a new macaroon with newest version and the given
810
// permissions then returns it binary serialized.
811
func bakeMacaroon(ctx context.Context, svc *macaroons.Service,
812
        permissions []bakery.Op) ([]byte, error) {
3✔
813

3✔
814
        mac, err := svc.NewMacaroon(
3✔
815
                ctx, macaroons.DefaultRootKeyID, permissions...,
3✔
816
        )
3✔
817
        if err != nil {
3✔
818
                return nil, err
×
819
        }
×
820

821
        return mac.M().MarshalBinary()
3✔
822
}
823

824
// saveMacaroon bakes a macaroon with the specified macaroon permissions and
825
// writes it to a file with the given filename and file permissions.
826
func saveMacaroon(ctx context.Context, svc *macaroons.Service, filename string,
827
        macaroonPermissions []bakery.Op, filePermissions os.FileMode) error {
3✔
828

3✔
829
        macaroonBytes, err := bakeMacaroon(ctx, svc, macaroonPermissions)
3✔
830
        if err != nil {
3✔
831
                return err
×
832
        }
×
833
        err = os.WriteFile(filename, macaroonBytes, filePermissions)
3✔
834
        if err != nil {
3✔
835
                _ = os.Remove(filename)
×
836
                return err
×
837
        }
×
838

839
        return nil
3✔
840
}
841

842
// genDefaultMacaroons checks for three default macaroon files and generates
843
// them if they do not exist; one admin-level, one for invoice access and one
844
// read-only. Each macaroon is checked and created independently to ensure all
845
// three exist. The admin macaroon can also be used to generate more granular
846
// macaroons.
847
func genDefaultMacaroons(ctx context.Context, svc *macaroons.Service,
848
        admFile, roFile, invoiceFile string) error {
3✔
849

3✔
850
        // First, we'll generate a macaroon that only allows the caller to
3✔
851
        // access invoice related calls. This is useful for merchants and other
3✔
852
        // services to allow an isolated instance that can only query and
3✔
853
        // modify invoices.
3✔
854
        if !lnrpc.FileExists(invoiceFile) {
6✔
855
                err := saveMacaroon(
3✔
856
                        ctx, svc, invoiceFile, invoicePermissions, 0644,
3✔
857
                )
3✔
858
                if err != nil {
3✔
859
                        return err
×
860
                }
×
861
        }
862

863
        // Generate the read-only macaroon and write it to a file.
864
        if !lnrpc.FileExists(roFile) {
6✔
865
                err := saveMacaroon(
3✔
866
                        ctx, svc, roFile, readPermissions, 0644,
3✔
867
                )
3✔
868
                if err != nil {
3✔
869
                        return err
×
870
                }
×
871
        }
872

873
        // Generate the admin macaroon and write it to a file.
874
        if !lnrpc.FileExists(admFile) {
6✔
875
                err := saveMacaroon(
3✔
876
                        ctx, svc, admFile, adminPermissions(),
3✔
877
                        adminMacaroonFilePermissions,
3✔
878
                )
3✔
879
                if err != nil {
3✔
880
                        return err
×
881
                }
×
882
        }
883

884
        return nil
3✔
885
}
886

887
// adminPermissions returns a list of all permissions in a safe way that doesn't
888
// modify any of the source lists.
889
func adminPermissions() []bakery.Op {
3✔
890
        admin := make([]bakery.Op, len(readPermissions)+len(writePermissions))
3✔
891
        copy(admin[:len(readPermissions)], readPermissions)
3✔
892
        copy(admin[len(readPermissions):], writePermissions)
3✔
893
        return admin
3✔
894
}
3✔
895

896
// createWalletUnlockerService creates a WalletUnlockerService from the passed
897
// config.
898
func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService {
3✔
899
        // The macaroonFiles are passed to the wallet unlocker so they can be
3✔
900
        // deleted and recreated in case the root macaroon key is also changed
3✔
901
        // during the change password operation.
3✔
902
        macaroonFiles := []string{
3✔
903
                cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath,
3✔
904
        }
3✔
905

3✔
906
        return walletunlocker.New(
3✔
907
                cfg.ActiveNetParams.Params, macaroonFiles,
3✔
908
                cfg.ResetWalletTransactions, nil,
3✔
909
        )
3✔
910
}
3✔
911

912
// startGrpcListen starts the GRPC server on the passed listeners.
913
func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
914
        listeners []*ListenerWithSignal) error {
3✔
915

3✔
916
        // Use a WaitGroup so we can be sure the instructions on how to input the
3✔
917
        // password is the last thing to be printed to the console.
3✔
918
        var wg sync.WaitGroup
3✔
919

3✔
920
        for _, lis := range listeners {
6✔
921
                wg.Add(1)
3✔
922
                go func(lis *ListenerWithSignal) {
6✔
923
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
3✔
924

3✔
925
                        // Close the ready chan to indicate we are listening.
3✔
926
                        close(lis.Ready)
3✔
927

3✔
928
                        wg.Done()
3✔
929
                        _ = grpcServer.Serve(lis)
3✔
930
                }(lis)
3✔
931
        }
932

933
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
934
        if cfg.Prometheus.Enabled() {
3✔
935
                err := monitoring.ExportPrometheusMetrics(
×
936
                        grpcServer, cfg.Prometheus,
×
937
                )
×
938
                if err != nil {
×
939
                        return err
×
940
                }
×
941
        }
942

943
        // Wait for gRPC servers to be up running.
944
        wg.Wait()
3✔
945

3✔
946
        return nil
3✔
947
}
948

949
// startRestProxy starts the given REST proxy on the listeners found in the
950
// config.
951
func startRestProxy(ctx context.Context, cfg *Config, rpcServer *rpcServer,
952
        restDialOpts []grpc.DialOption,
953
        restListen func(net.Addr) (net.Listener, error)) (func(), error) {
3✔
954

3✔
955
        // We use the first RPC listener as the destination for our REST proxy.
3✔
956
        // If the listener is set to listen on all interfaces, we replace it
3✔
957
        // with localhost, as we cannot dial it directly.
3✔
958
        restProxyDest := cfg.RPCListeners[0].String()
3✔
959
        switch {
3✔
960
        case strings.Contains(restProxyDest, "0.0.0.0"):
×
961
                restProxyDest = strings.Replace(
×
962
                        restProxyDest, "0.0.0.0", "127.0.0.1", 1,
×
963
                )
×
964

965
        case strings.Contains(restProxyDest, "[::]"):
×
966
                restProxyDest = strings.Replace(
×
967
                        restProxyDest, "[::]", "[::1]", 1,
×
968
                )
×
969
        }
970

971
        var shutdownFuncs []func()
3✔
972
        shutdown := func() {
6✔
973
                for _, shutdownFn := range shutdownFuncs {
6✔
974
                        shutdownFn()
3✔
975
                }
3✔
976
        }
977

978
        // Start a REST proxy for our gRPC server.
979
        ctx, cancel := context.WithCancel(ctx)
3✔
980
        shutdownFuncs = append(shutdownFuncs, cancel)
3✔
981

3✔
982
        // We'll set up a proxy that will forward REST calls to the GRPC
3✔
983
        // server.
3✔
984
        //
3✔
985
        // The default JSON marshaler of the REST proxy only sets OrigName to
3✔
986
        // true, which instructs it to use the same field names as specified in
3✔
987
        // the proto file and not switch to camel case. What we also want is
3✔
988
        // that the marshaler prints all values, even if they are falsey.
3✔
989
        customMarshalerOption := proxy.WithMarshalerOption(
3✔
990
                proxy.MIMEWildcard, &proxy.JSONPb{
3✔
991
                        MarshalOptions:   *lnrpc.RESTJsonMarshalOpts,
3✔
992
                        UnmarshalOptions: *lnrpc.RESTJsonUnmarshalOpts,
3✔
993
                },
3✔
994
        )
3✔
995
        mux := proxy.NewServeMux(
3✔
996
                customMarshalerOption,
3✔
997

3✔
998
                // Don't allow falling back to other HTTP methods, we want exact
3✔
999
                // matches only. The actual method to be used can be overwritten
3✔
1000
                // by setting X-HTTP-Method-Override so there should be no
3✔
1001
                // reason for not specifying the correct method in the first
3✔
1002
                // place.
3✔
1003
                proxy.WithDisablePathLengthFallback(),
3✔
1004
        )
3✔
1005

3✔
1006
        // Register our services with the REST proxy.
3✔
1007
        err := rpcServer.RegisterWithRestProxy(
3✔
1008
                ctx, mux, restDialOpts, restProxyDest,
3✔
1009
        )
3✔
1010
        if err != nil {
3✔
1011
                return nil, err
×
1012
        }
×
1013

1014
        // Wrap the default grpc-gateway handler with the WebSocket handler.
1015
        restHandler := lnrpc.NewWebSocketProxy(
3✔
1016
                mux, rpcsLog, cfg.WSPingInterval, cfg.WSPongWait,
3✔
1017
                lnrpc.LndClientStreamingURIs,
3✔
1018
        )
3✔
1019

3✔
1020
        // Use a WaitGroup so we can be sure the instructions on how to input the
3✔
1021
        // password is the last thing to be printed to the console.
3✔
1022
        var wg sync.WaitGroup
3✔
1023

3✔
1024
        // Now spin up a network listener for each requested port and start a
3✔
1025
        // goroutine that serves REST with the created mux there.
3✔
1026
        for _, restEndpoint := range cfg.RESTListeners {
6✔
1027
                lis, err := restListen(restEndpoint)
3✔
1028
                if err != nil {
3✔
1029
                        ltndLog.Errorf("gRPC proxy unable to listen on %s",
×
1030
                                restEndpoint)
×
1031
                        return nil, err
×
1032
                }
×
1033

1034
                shutdownFuncs = append(shutdownFuncs, func() {
6✔
1035
                        err := lis.Close()
3✔
1036
                        if err != nil {
3✔
1037
                                rpcsLog.Errorf("Error closing listener: %v",
×
1038
                                        err)
×
1039
                        }
×
1040
                })
1041

1042
                wg.Add(1)
3✔
1043
                go func() {
6✔
1044
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
3✔
1045

3✔
1046
                        // Create our proxy chain now. A request will pass
3✔
1047
                        // through the following chain:
3✔
1048
                        // req ---> CORS handler --> WS proxy --->
3✔
1049
                        //   REST proxy --> gRPC endpoint
3✔
1050
                        corsHandler := allowCORS(restHandler, cfg.RestCORS)
3✔
1051

3✔
1052
                        wg.Done()
3✔
1053
                        err := http.Serve(lis, corsHandler)
3✔
1054
                        if err != nil && !lnrpc.IsClosedConnError(err) {
3✔
1055
                                rpcsLog.Error(err)
×
1056
                        }
×
1057
                }()
1058
        }
1059

1060
        // Wait for REST servers to be up running.
1061
        wg.Wait()
3✔
1062

3✔
1063
        return shutdown, nil
3✔
1064
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc