• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14358372723

09 Apr 2025 01:26PM UTC coverage: 56.696% (-12.3%) from 69.037%
14358372723

Pull #9696

github

web-flow
Merge e2837e400 into 867d27d68
Pull Request #9696: Add `development_guidelines.md` for both human and machine

107055 of 188823 relevant lines covered (56.7%)

22721.56 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lnd.go
1
// Copyright (c) 2013-2017 The btcsuite developers
2
// Copyright (c) 2015-2016 The Decred developers
3
// Copyright (C) 2015-2022 The Lightning Network Developers
4

5
package lnd
6

7
import (
8
        "context"
9
        "errors"
10
        "fmt"
11
        "log/slog"
12
        "net"
13
        "net/http"
14
        "net/http/pprof"
15
        "os"
16
        "runtime"
17
        runtimePprof "runtime/pprof"
18
        "strings"
19
        "sync"
20
        "time"
21

22
        "github.com/btcsuite/btcd/btcutil"
23
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
24
        "github.com/lightningnetwork/lnd/autopilot"
25
        "github.com/lightningnetwork/lnd/build"
26
        "github.com/lightningnetwork/lnd/chanacceptor"
27
        "github.com/lightningnetwork/lnd/channeldb"
28
        "github.com/lightningnetwork/lnd/cluster"
29
        "github.com/lightningnetwork/lnd/keychain"
30
        "github.com/lightningnetwork/lnd/lncfg"
31
        "github.com/lightningnetwork/lnd/lnrpc"
32
        "github.com/lightningnetwork/lnd/lnwallet"
33
        "github.com/lightningnetwork/lnd/macaroons"
34
        "github.com/lightningnetwork/lnd/monitoring"
35
        "github.com/lightningnetwork/lnd/rpcperms"
36
        "github.com/lightningnetwork/lnd/signal"
37
        "github.com/lightningnetwork/lnd/tor"
38
        "github.com/lightningnetwork/lnd/walletunlocker"
39
        "github.com/lightningnetwork/lnd/watchtower"
40
        "google.golang.org/grpc"
41
        "google.golang.org/grpc/credentials"
42
        "google.golang.org/grpc/keepalive"
43
        "gopkg.in/macaroon-bakery.v2/bakery"
44
        "gopkg.in/macaroon.v2"
45
)
46

47
const (
48
        // adminMacaroonFilePermissions is the file permission that is used for
49
        // creating the admin macaroon file.
50
        //
51
        // Why 640 is safe:
52
        // Assuming a reasonably secure Linux system, it will have a
53
        // separate group for each user. E.g. a new user lnd gets assigned group
54
        // lnd which nothing else belongs to. A system that does not do this is
55
        // inherently broken already.
56
        //
57
        // Since there is no other user in the group, no other user can read
58
        // admin macaroon unless the administrator explicitly allowed it. Thus
59
        // there's no harm allowing group read.
60
        adminMacaroonFilePermissions = 0640
61

62
        // leaderResignTimeout is the timeout used when resigning from the
63
        // leader role. This is kept short so LND can shut down quickly in case
64
        // of a system failure or network partition making the cluster
65
        // unresponsive. The cluster itself should ensure that the leader is not
66
        // elected again until the previous leader has resigned or the leader
67
        // election timeout has passed.
68
        leaderResignTimeout = 5 * time.Second
69
)
70

71
// AdminAuthOptions returns a list of DialOptions that can be used to
72
// authenticate with the RPC server with admin capabilities.
73
// skipMacaroons=true should be set if we don't want to include macaroons with
74
// the auth options. This is needed for instance for the WalletUnlocker
75
// service, which must be usable also before macaroons are created.
76
//
77
// NOTE: This should only be called after the RPCListener has signaled it is
78
// ready.
79
func AdminAuthOptions(cfg *Config, skipMacaroons bool) ([]grpc.DialOption,
80
        error) {
×
81

×
82
        creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "")
×
83
        if err != nil {
×
84
                return nil, fmt.Errorf("unable to read TLS cert: %w", err)
×
85
        }
×
86

87
        // Create a dial options array.
88
        opts := []grpc.DialOption{
×
89
                grpc.WithTransportCredentials(creds),
×
90
        }
×
91

×
92
        // Get the admin macaroon if macaroons are active.
×
93
        if !skipMacaroons && !cfg.NoMacaroons {
×
94
                // Load the admin macaroon file.
×
95
                macBytes, err := os.ReadFile(cfg.AdminMacPath)
×
96
                if err != nil {
×
97
                        return nil, fmt.Errorf("unable to read macaroon "+
×
98
                                "path (check the network setting!): %v", err)
×
99
                }
×
100

101
                mac := &macaroon.Macaroon{}
×
102
                if err = mac.UnmarshalBinary(macBytes); err != nil {
×
103
                        return nil, fmt.Errorf("unable to decode macaroon: %w",
×
104
                                err)
×
105
                }
×
106

107
                // Now we append the macaroon credentials to the dial options.
108
                cred, err := macaroons.NewMacaroonCredential(mac)
×
109
                if err != nil {
×
110
                        return nil, fmt.Errorf("error cloning mac: %w", err)
×
111
                }
×
112
                opts = append(opts, grpc.WithPerRPCCredentials(cred))
×
113
        }
114

115
        return opts, nil
×
116
}
117

118
// ListenerWithSignal is a net.Listener that has an additional Ready channel
119
// that will be closed when a server starts listening.
120
type ListenerWithSignal struct {
121
        net.Listener
122

123
        // Ready will be closed by the server listening on Listener.
124
        Ready chan struct{}
125

126
        // MacChan is an optional way to pass the admin macaroon to the program
127
        // that started lnd. The channel should be buffered to avoid lnd being
128
        // blocked on sending to the channel.
129
        MacChan chan []byte
130
}
131

132
// ListenerCfg is a wrapper around custom listeners that can be passed to lnd
133
// when calling its main method.
134
type ListenerCfg struct {
135
        // RPCListeners can be set to the listeners to use for the RPC server.
136
        // If empty a regular network listener will be created.
137
        RPCListeners []*ListenerWithSignal
138
}
139

140
var errStreamIsolationWithProxySkip = errors.New(
141
        "while stream isolation is enabled, the TOR proxy may not be skipped",
142
)
143

144
// Main is the true entry point for lnd. It accepts a fully populated and
145
// validated main configuration struct and an optional listener config struct.
146
// This function starts all main system components then blocks until a signal
147
// is received on the shutdownChan at which point everything is shut down again.
148
func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
149
        interceptor signal.Interceptor) error {
×
150

×
151
        defer func() {
×
152
                ltndLog.Info("Shutdown complete")
×
153
                err := cfg.LogRotator.Close()
×
154
                if err != nil {
×
155
                        ltndLog.Errorf("Could not close log rotator: %v", err)
×
156
                }
×
157
        }()
158

159
        ctx, cancel := context.WithCancel(context.Background())
×
160
        defer cancel()
×
161

×
162
        ctx, err := build.WithBuildInfo(ctx, cfg.LogConfig)
×
163
        if err != nil {
×
164
                return fmt.Errorf("unable to add build info to context: %w",
×
165
                        err)
×
166
        }
×
167

168
        mkErr := func(msg string, err error, attrs ...any) error {
×
169
                ltndLog.ErrorS(ctx, "Shutting down due to error in main "+
×
170
                        "method", err, attrs...)
×
171

×
172
                var (
×
173
                        params = []any{err}
×
174
                        fmtStr = msg + ": %w"
×
175
                )
×
176
                for _, attr := range attrs {
×
177
                        fmtStr += " %s"
×
178

×
179
                        params = append(params, attr)
×
180
                }
×
181

182
                return fmt.Errorf(fmtStr, params...)
×
183
        }
184

185
        // Show version at startup.
186
        ltndLog.InfoS(ctx, "Version Info",
×
187
                slog.String("version", build.Version()),
×
188
                slog.String("commit", build.Commit),
×
189
                slog.Any("debuglevel", build.Deployment),
×
190
                slog.String("logging", cfg.DebugLevel))
×
191

×
192
        var network string
×
193
        switch {
×
194
        case cfg.Bitcoin.TestNet3:
×
195
                network = "testnet"
×
196

197
        case cfg.Bitcoin.TestNet4:
×
198
                network = "testnet4"
×
199

200
        case cfg.Bitcoin.MainNet:
×
201
                network = "mainnet"
×
202

203
        case cfg.Bitcoin.SimNet:
×
204
                network = "simnet"
×
205

206
        case cfg.Bitcoin.RegTest:
×
207
                network = "regtest"
×
208

209
        case cfg.Bitcoin.SigNet:
×
210
                network = "signet"
×
211
        }
212

213
        ltndLog.InfoS(ctx, "Network Info",
×
214
                "active_chain", strings.Title(BitcoinChainName),
×
215
                "network", network)
×
216

×
217
        // Enable http profiling server if requested.
×
218
        if cfg.Pprof.Profile != "" {
×
219
                // Create the http handler.
×
220
                pprofMux := http.NewServeMux()
×
221
                pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
×
222
                pprofMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
×
223
                pprofMux.HandleFunc("/debug/pprof/profile", pprof.Profile)
×
224
                pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
×
225
                pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
×
226

×
227
                if cfg.Pprof.BlockingProfile != 0 {
×
228
                        runtime.SetBlockProfileRate(cfg.Pprof.BlockingProfile)
×
229
                }
×
230
                if cfg.Pprof.MutexProfile != 0 {
×
231
                        runtime.SetMutexProfileFraction(cfg.Pprof.MutexProfile)
×
232
                }
×
233

234
                // Redirect all requests to the pprof handler, thus visiting
235
                // `127.0.0.1:6060` will be redirected to
236
                // `127.0.0.1:6060/debug/pprof`.
237
                pprofMux.Handle("/", http.RedirectHandler(
×
238
                        "/debug/pprof/", http.StatusSeeOther,
×
239
                ))
×
240

×
241
                ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)
×
242

×
243
                // Create the pprof server.
×
244
                pprofServer := &http.Server{
×
245
                        Addr:              cfg.Pprof.Profile,
×
246
                        Handler:           pprofMux,
×
247
                        ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
×
248
                }
×
249

×
250
                // Shut the server down when lnd is shutting down.
×
251
                defer func() {
×
252
                        ltndLog.InfoS(ctx, "Stopping pprof server...")
×
253
                        err := pprofServer.Shutdown(ctx)
×
254
                        if err != nil {
×
255
                                ltndLog.ErrorS(ctx, "Stop pprof server", err)
×
256
                        }
×
257
                }()
258

259
                // Start the pprof server.
260
                go func() {
×
261
                        err := pprofServer.ListenAndServe()
×
262
                        if err != nil && !errors.Is(err, http.ErrServerClosed) {
×
263
                                ltndLog.ErrorS(ctx, "Could not serve pprof "+
×
264
                                        "server", err)
×
265
                        }
×
266
                }()
267
        }
268

269
        // Write cpu profile if requested.
270
        if cfg.Pprof.CPUProfile != "" {
×
271
                f, err := os.Create(cfg.Pprof.CPUProfile)
×
272
                if err != nil {
×
273
                        return mkErr("unable to create CPU profile", err)
×
274
                }
×
275
                _ = runtimePprof.StartCPUProfile(f)
×
276
                defer func() {
×
277
                        _ = f.Close()
×
278
                }()
×
279
                defer runtimePprof.StopCPUProfile()
×
280
        }
281

282
        // Run configuration dependent DB pre-initialization. Note that this
283
        // needs to be done early and once during the startup process, before
284
        // any DB access.
285
        if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
×
286
                return mkErr("error initializing DBs", err)
×
287
        }
×
288

289
        tlsManagerCfg := &TLSManagerCfg{
×
290
                TLSCertPath:        cfg.TLSCertPath,
×
291
                TLSKeyPath:         cfg.TLSKeyPath,
×
292
                TLSEncryptKey:      cfg.TLSEncryptKey,
×
293
                TLSExtraIPs:        cfg.TLSExtraIPs,
×
294
                TLSExtraDomains:    cfg.TLSExtraDomains,
×
295
                TLSAutoRefresh:     cfg.TLSAutoRefresh,
×
296
                TLSDisableAutofill: cfg.TLSDisableAutofill,
×
297
                TLSCertDuration:    cfg.TLSCertDuration,
×
298

×
299
                LetsEncryptDir:    cfg.LetsEncryptDir,
×
300
                LetsEncryptDomain: cfg.LetsEncryptDomain,
×
301
                LetsEncryptListen: cfg.LetsEncryptListen,
×
302

×
303
                DisableRestTLS: cfg.DisableRestTLS,
×
304

×
305
                HTTPHeaderTimeout: cfg.HTTPHeaderTimeout,
×
306
        }
×
307
        tlsManager := NewTLSManager(tlsManagerCfg)
×
308
        serverOpts, restDialOpts, restListen, cleanUp,
×
309
                err := tlsManager.SetCertificateBeforeUnlock()
×
310
        if err != nil {
×
311
                return mkErr("error setting cert before unlock", err)
×
312
        }
×
313
        if cleanUp != nil {
×
314
                defer cleanUp()
×
315
        }
×
316

317
        // If we have chosen to start with a dedicated listener for the
318
        // rpc server, we set it directly.
319
        grpcListeners := append([]*ListenerWithSignal{}, lisCfg.RPCListeners...)
×
320
        if len(grpcListeners) == 0 {
×
321
                // Otherwise we create listeners from the RPCListeners defined
×
322
                // in the config.
×
323
                for _, grpcEndpoint := range cfg.RPCListeners {
×
324
                        // Start a gRPC server listening for HTTP/2
×
325
                        // connections.
×
326
                        lis, err := lncfg.ListenOnAddress(grpcEndpoint)
×
327
                        if err != nil {
×
328
                                return mkErr("unable to listen on grpc "+
×
329
                                        "endpoint", err,
×
330
                                        slog.String(
×
331
                                                "endpoint",
×
332
                                                grpcEndpoint.String(),
×
333
                                        ))
×
334
                        }
×
335
                        defer lis.Close()
×
336

×
337
                        grpcListeners = append(
×
338
                                grpcListeners, &ListenerWithSignal{
×
339
                                        Listener: lis,
×
340
                                        Ready:    make(chan struct{}),
×
341
                                },
×
342
                        )
×
343
                }
344
        }
345

346
        // Create a new RPC interceptor that we'll add to the GRPC server. This
347
        // will be used to log the API calls invoked on the GRPC server.
348
        interceptorChain := rpcperms.NewInterceptorChain(
×
349
                rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
×
350
        )
×
351
        if err := interceptorChain.Start(); err != nil {
×
352
                return mkErr("error starting interceptor chain", err)
×
353
        }
×
354
        defer func() {
×
355
                err := interceptorChain.Stop()
×
356
                if err != nil {
×
357
                        ltndLog.Warnf("error stopping RPC interceptor "+
×
358
                                "chain: %v", err)
×
359
                }
×
360
        }()
361

362
        // Allow the user to overwrite some defaults of the gRPC library related
363
        // to connection keepalive (server side and client side pings).
364
        serverKeepalive := keepalive.ServerParameters{
×
365
                Time:    cfg.GRPC.ServerPingTime,
×
366
                Timeout: cfg.GRPC.ServerPingTimeout,
×
367
        }
×
368
        clientKeepalive := keepalive.EnforcementPolicy{
×
369
                MinTime:             cfg.GRPC.ClientPingMinWait,
×
370
                PermitWithoutStream: cfg.GRPC.ClientAllowPingWithoutStream,
×
371
        }
×
372

×
373
        rpcServerOpts := interceptorChain.CreateServerOpts()
×
374
        serverOpts = append(serverOpts, rpcServerOpts...)
×
375
        serverOpts = append(
×
376
                serverOpts, grpc.MaxRecvMsgSize(lnrpc.MaxGrpcMsgSize),
×
377
                grpc.KeepaliveParams(serverKeepalive),
×
378
                grpc.KeepaliveEnforcementPolicy(clientKeepalive),
×
379
        )
×
380

×
381
        grpcServer := grpc.NewServer(serverOpts...)
×
382
        defer grpcServer.Stop()
×
383

×
384
        // We'll also register the RPC interceptor chain as the StateServer, as
×
385
        // it can be used to query for the current state of the wallet.
×
386
        lnrpc.RegisterStateServer(grpcServer, interceptorChain)
×
387

×
388
        // Initialize, and register our implementation of the gRPC interface
×
389
        // exported by the rpcServer.
×
390
        rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
×
391
        err = rpcServer.RegisterWithGrpcServer(grpcServer)
×
392
        if err != nil {
×
393
                return mkErr("error registering gRPC server", err)
×
394
        }
×
395

396
        // Now that both the WalletUnlocker and LightningService have been
397
        // registered with the GRPC server, we can start listening.
398
        err = startGrpcListen(cfg, grpcServer, grpcListeners)
×
399
        if err != nil {
×
400
                return mkErr("error starting gRPC listener", err)
×
401
        }
×
402

403
        // Now start the REST proxy for our gRPC server above. We'll ensure
404
        // we direct LND to connect to its loopback address rather than a
405
        // wildcard to prevent certificate issues when accessing the proxy
406
        // externally.
407
        stopProxy, err := startRestProxy(
×
408
                ctx, cfg, rpcServer, restDialOpts, restListen,
×
409
        )
×
410
        if err != nil {
×
411
                return mkErr("error starting REST proxy", err)
×
412
        }
×
413
        defer stopProxy()
×
414

×
415
        // Start leader election if we're running on etcd. Continuation will be
×
416
        // blocked until this instance is elected as the current leader or
×
417
        // shutting down.
×
418
        elected := false
×
419
        var leaderElector cluster.LeaderElector
×
420
        if cfg.Cluster.EnableLeaderElection {
×
421
                electionCtx, cancelElection := context.WithCancel(ctx)
×
422

×
423
                go func() {
×
424
                        <-interceptor.ShutdownChannel()
×
425
                        cancelElection()
×
426
                }()
×
427

428
                ltndLog.InfoS(ctx, "Using leader elector",
×
429
                        "elector", cfg.Cluster.LeaderElector)
×
430

×
431
                leaderElector, err = cfg.Cluster.MakeLeaderElector(
×
432
                        electionCtx, cfg.DB,
×
433
                )
×
434
                if err != nil {
×
435
                        return err
×
436
                }
×
437

438
                defer func() {
×
439
                        if !elected {
×
440
                                return
×
441
                        }
×
442

443
                        ltndLog.InfoS(ctx, "Attempting to resign from "+
×
444
                                "leader role", "cluster_id", cfg.Cluster.ID)
×
445

×
446
                        // Ensure that we don't block the shutdown process if
×
447
                        // the leader resigning process takes too long. The
×
448
                        // cluster will ensure that the leader is not elected
×
449
                        // again until the previous leader has resigned or the
×
450
                        // leader election timeout has passed.
×
451
                        timeoutCtx, cancel := context.WithTimeout(
×
452
                                ctx, leaderResignTimeout,
×
453
                        )
×
454
                        defer cancel()
×
455

×
456
                        if err := leaderElector.Resign(timeoutCtx); err != nil {
×
457
                                ltndLog.Errorf("Leader elector failed to "+
×
458
                                        "resign: %v", err)
×
459
                        }
×
460
                }()
461

462
                ltndLog.InfoS(ctx, "Starting leadership campaign",
×
463
                        "cluster_id", cfg.Cluster.ID)
×
464

×
465
                if err := leaderElector.Campaign(electionCtx); err != nil {
×
466
                        return mkErr("leadership campaign failed", err)
×
467
                }
×
468

469
                elected = true
×
470
                ltndLog.InfoS(ctx, "Elected as leader",
×
471
                        "cluster_id", cfg.Cluster.ID)
×
472
        }
473

474
        dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
×
475
        switch {
×
476
        case errors.Is(err, channeldb.ErrDryRunMigrationOK):
×
477
                ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
×
478
                        slog.Any("err", err))
×
479
                return nil
×
480
        case err != nil:
×
481
                return mkErr("unable to open databases", err)
×
482
        }
483

484
        defer cleanUp()
×
485

×
486
        partialChainControl, walletConfig, cleanUp, err := implCfg.BuildWalletConfig(
×
487
                ctx, dbs, &implCfg.AuxComponents, interceptorChain,
×
488
                grpcListeners,
×
489
        )
×
490
        if err != nil {
×
491
                return mkErr("error creating wallet config", err)
×
492
        }
×
493

494
        defer cleanUp()
×
495

×
496
        activeChainControl, cleanUp, err := implCfg.BuildChainControl(
×
497
                partialChainControl, walletConfig,
×
498
        )
×
499
        if err != nil {
×
500
                return mkErr("error loading chain control", err)
×
501
        }
×
502

503
        defer cleanUp()
×
504

×
505
        // TODO(roasbeef): add rotation
×
506
        idKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
×
507
                keychain.KeyLocator{
×
508
                        Family: keychain.KeyFamilyNodeKey,
×
509
                        Index:  0,
×
510
                },
×
511
        )
×
512
        if err != nil {
×
513
                return mkErr("error deriving node key", err)
×
514
        }
×
515

516
        if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
×
517
                return errStreamIsolationWithProxySkip
×
518
        }
×
519

520
        if cfg.Tor.Active {
×
521
                if cfg.Tor.SkipProxyForClearNetTargets {
×
522
                        srvrLog.InfoS(ctx, "Onion services are accessible "+
×
523
                                "via Tor! NOTE: Traffic to clearnet services "+
×
524
                                "is not routed via Tor.")
×
525
                } else {
×
526
                        srvrLog.InfoS(ctx, "Proxying all network traffic "+
×
527
                                "via Tor! NOTE: Ensure the backend node is "+
×
528
                                "proxying over Tor as well",
×
529
                                "stream_isolation", cfg.Tor.StreamIsolation)
×
530
                }
×
531
        }
532

533
        // If tor is active and either v2 or v3 onion services have been
534
        // specified, make a tor controller and pass it into both the watchtower
535
        // server and the regular lnd server.
536
        var torController *tor.Controller
×
537
        if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) {
×
538
                torController = tor.NewController(
×
539
                        cfg.Tor.Control, cfg.Tor.TargetIPAddress,
×
540
                        cfg.Tor.Password,
×
541
                )
×
542

×
543
                // Start the tor controller before giving it to any other
×
544
                // subsystems.
×
545
                if err := torController.Start(); err != nil {
×
546
                        return mkErr("unable to initialize tor controller",
×
547
                                err)
×
548
                }
×
549
                defer func() {
×
550
                        if err := torController.Stop(); err != nil {
×
551
                                ltndLog.ErrorS(ctx, "Error stopping tor "+
×
552
                                        "controller", err)
×
553
                        }
×
554
                }()
555
        }
556

557
        var tower *watchtower.Standalone
×
558
        if cfg.Watchtower.Active {
×
559
                towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
×
560
                        keychain.KeyLocator{
×
561
                                Family: keychain.KeyFamilyTowerID,
×
562
                                Index:  0,
×
563
                        },
×
564
                )
×
565
                if err != nil {
×
566
                        return mkErr("error deriving tower key", err)
×
567
                }
×
568

569
                wtCfg := &watchtower.Config{
×
570
                        BlockFetcher:   activeChainControl.ChainIO,
×
571
                        DB:             dbs.TowerServerDB,
×
572
                        EpochRegistrar: activeChainControl.ChainNotifier,
×
573
                        Net:            cfg.net,
×
574
                        NewAddress: func() (btcutil.Address, error) {
×
575
                                return activeChainControl.Wallet.NewAddress(
×
576
                                        lnwallet.TaprootPubkey, false,
×
577
                                        lnwallet.DefaultAccountName,
×
578
                                )
×
579
                        },
×
580
                        NodeKeyECDH: keychain.NewPubKeyECDH(
581
                                towerKeyDesc, activeChainControl.KeyRing,
582
                        ),
583
                        PublishTx: activeChainControl.Wallet.PublishTransaction,
584
                        ChainHash: *cfg.ActiveNetParams.GenesisHash,
585
                }
586

587
                // If there is a tor controller (user wants auto hidden
588
                // services), then store a pointer in the watchtower config.
589
                if torController != nil {
×
590
                        wtCfg.TorController = torController
×
591
                        wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath
×
592
                        wtCfg.EncryptKey = cfg.Tor.EncryptKey
×
593
                        wtCfg.KeyRing = activeChainControl.KeyRing
×
594

×
595
                        switch {
×
596
                        case cfg.Tor.V2:
×
597
                                wtCfg.Type = tor.V2
×
598
                        case cfg.Tor.V3:
×
599
                                wtCfg.Type = tor.V3
×
600
                        }
601
                }
602

603
                wtConfig, err := cfg.Watchtower.Apply(
×
604
                        wtCfg, lncfg.NormalizeAddresses,
×
605
                )
×
606
                if err != nil {
×
607
                        return mkErr("unable to configure watchtower", err)
×
608
                }
×
609

610
                tower, err = watchtower.New(wtConfig)
×
611
                if err != nil {
×
612
                        return mkErr("unable to create watchtower", err)
×
613
                }
×
614
        }
615

616
        // Initialize the MultiplexAcceptor. If lnd was started with the
617
        // zero-conf feature bit, then this will be a ZeroConfAcceptor.
618
        // Otherwise, this will be a ChainedAcceptor.
619
        var multiAcceptor chanacceptor.MultiplexAcceptor
×
620
        if cfg.ProtocolOptions.ZeroConf() {
×
621
                multiAcceptor = chanacceptor.NewZeroConfAcceptor()
×
622
        } else {
×
623
                multiAcceptor = chanacceptor.NewChainedAcceptor()
×
624
        }
×
625

626
        // Set up the core server which will listen for incoming peer
627
        // connections.
628
        server, err := newServer(
×
629
                cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc,
×
630
                activeChainControl.Cfg.WalletUnlockParams.ChansToRestore,
×
631
                multiAcceptor, torController, tlsManager, leaderElector,
×
632
                implCfg,
×
633
        )
×
634
        if err != nil {
×
635
                return mkErr("unable to create server", err)
×
636
        }
×
637

638
        // Set up an autopilot manager from the current config. This will be
639
        // used to manage the underlying autopilot agent, starting and stopping
640
        // it at will.
641
        atplCfg, err := initAutoPilot(
×
642
                server, cfg.Autopilot, activeChainControl.MinHtlcIn,
×
643
                cfg.ActiveNetParams,
×
644
        )
×
645
        if err != nil {
×
646
                return mkErr("unable to initialize autopilot", err)
×
647
        }
×
648

649
        atplManager, err := autopilot.NewManager(atplCfg)
×
650
        if err != nil {
×
651
                return mkErr("unable to create autopilot manager", err)
×
652
        }
×
653
        if err := atplManager.Start(); err != nil {
×
654
                return mkErr("unable to start autopilot manager", err)
×
655
        }
×
656
        defer atplManager.Stop()
×
657

×
658
        err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
×
659
        if err != nil {
×
660
                return mkErr("unable to load permanent TLS certificate", err)
×
661
        }
×
662

663
        // Now we have created all dependencies necessary to populate and
664
        // start the RPC server.
665
        err = rpcServer.addDeps(
×
666
                server, interceptorChain.MacaroonService(), cfg.SubRPCServers,
×
667
                atplManager, server.invoices, tower, multiAcceptor,
×
668
                server.invoiceHtlcModifier,
×
669
        )
×
670
        if err != nil {
×
671
                return mkErr("unable to add deps to RPC server", err)
×
672
        }
×
673
        if err := rpcServer.Start(); err != nil {
×
674
                return mkErr("unable to start RPC server", err)
×
675
        }
×
676
        defer rpcServer.Stop()
×
677

×
678
        // We transition the RPC state to Active, as the RPC server is up.
×
679
        interceptorChain.SetRPCActive()
×
680

×
681
        if err := interceptor.Notifier.NotifyReady(true); err != nil {
×
682
                return mkErr("error notifying ready", err)
×
683
        }
×
684

685
        // We'll wait until we're fully synced to continue the start up of the
686
        // remainder of the daemon. This ensures that we don't accept any
687
        // possibly invalid state transitions, or accept channels with spent
688
        // funds.
689
        _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
×
690
        if err != nil {
×
691
                return mkErr("unable to determine chain tip", err)
×
692
        }
×
693

694
        ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
×
695
                slog.Int64("start_height", int64(bestHeight)))
×
696

×
697
        type syncResult struct {
×
698
                synced        bool
×
699
                bestBlockTime int64
×
700
                err           error
×
701
        }
×
702

×
703
        var syncedResChan = make(chan syncResult, 1)
×
704

×
705
        for {
×
706
                // We check if the wallet is synced in a separate goroutine as
×
707
                // the call is blocking, and we want to be able to interrupt it
×
708
                // if the daemon is shutting down.
×
709
                go func() {
×
710
                        synced, bestBlockTime, err := activeChainControl.Wallet.
×
711
                                IsSynced()
×
712
                        syncedResChan <- syncResult{synced, bestBlockTime, err}
×
713
                }()
×
714

715
                select {
×
716
                case <-interceptor.ShutdownChannel():
×
717
                        return nil
×
718

719
                case res := <-syncedResChan:
×
720
                        if res.err != nil {
×
721
                                return mkErr("unable to determine if wallet "+
×
722
                                        "is synced", res.err)
×
723
                        }
×
724

725
                        ltndLog.DebugS(ctx, "Syncing to block chain",
×
726
                                "best_block_time", time.Unix(res.bestBlockTime, 0),
×
727
                                "is_synced", res.synced)
×
728

×
729
                        if res.synced {
×
730
                                break
×
731
                        }
732

733
                        // If we're not yet synced, we'll wait for a second
734
                        // before checking again.
735
                        select {
×
736
                        case <-interceptor.ShutdownChannel():
×
737
                                return nil
×
738

739
                        case <-time.After(time.Second):
×
740
                                continue
×
741
                        }
742
                }
743

744
                break
×
745
        }
746

747
        _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
×
748
        if err != nil {
×
749
                return mkErr("unable to determine chain tip", err)
×
750
        }
×
751

752
        ltndLog.InfoS(ctx, "Chain backend is fully synced!",
×
753
                "end_height", bestHeight)
×
754

×
755
        // With all the relevant chains initialized, we can finally start the
×
756
        // server itself. We start the server in an asynchronous goroutine so
×
757
        // that we are able to interrupt and shutdown the daemon gracefully in
×
758
        // case the startup of the subservers do not behave as expected.
×
759
        errChan := make(chan error)
×
760
        go func() {
×
761
                errChan <- server.Start()
×
762
        }()
×
763

764
        defer func() {
×
765
                err := server.Stop()
×
766
                if err != nil {
×
767
                        ltndLog.WarnS(ctx, "Stopping the server including all "+
×
768
                                "its subsystems failed with", err)
×
769
                }
×
770
        }()
771

772
        select {
×
773
        case err := <-errChan:
×
774
                if err == nil {
×
775
                        break
×
776
                }
777

778
                return mkErr("unable to start server", err)
×
779

780
        case <-interceptor.ShutdownChannel():
×
781
                return nil
×
782
        }
783

784
        // We transition the server state to Active, as the server is up.
785
        interceptorChain.SetServerActive()
×
786

×
787
        // Now that the server has started, if the autopilot mode is currently
×
788
        // active, then we'll start the autopilot agent immediately. It will be
×
789
        // stopped together with the autopilot service.
×
790
        if cfg.Autopilot.Active {
×
791
                if err := atplManager.StartAgent(); err != nil {
×
792
                        return mkErr("unable to start autopilot agent", err)
×
793
                }
×
794
        }
795

796
        if cfg.Watchtower.Active {
×
797
                if err := tower.Start(); err != nil {
×
798
                        return mkErr("unable to start watchtower", err)
×
799
                }
×
800
                defer tower.Stop()
×
801
        }
802

803
        // Wait for shutdown signal from either a graceful server stop or from
804
        // the interrupt handler.
805
        <-interceptor.ShutdownChannel()
×
806
        return nil
×
807
}
808

809
// bakeMacaroon creates a new macaroon with newest version and the given
810
// permissions then returns it binary serialized.
811
func bakeMacaroon(ctx context.Context, svc *macaroons.Service,
812
        permissions []bakery.Op) ([]byte, error) {
×
813

×
814
        mac, err := svc.NewMacaroon(
×
815
                ctx, macaroons.DefaultRootKeyID, permissions...,
×
816
        )
×
817
        if err != nil {
×
818
                return nil, err
×
819
        }
×
820

821
        return mac.M().MarshalBinary()
×
822
}
823

824
// saveMacaroon bakes a macaroon with the specified macaroon permissions and
825
// writes it to a file with the given filename and file permissions.
826
func saveMacaroon(ctx context.Context, svc *macaroons.Service, filename string,
827
        macaroonPermissions []bakery.Op, filePermissions os.FileMode) error {
×
828

×
829
        macaroonBytes, err := bakeMacaroon(ctx, svc, macaroonPermissions)
×
830
        if err != nil {
×
831
                return err
×
832
        }
×
833
        err = os.WriteFile(filename, macaroonBytes, filePermissions)
×
834
        if err != nil {
×
835
                _ = os.Remove(filename)
×
836
                return err
×
837
        }
×
838

839
        return nil
×
840
}
841

842
// genDefaultMacaroons checks for three default macaroon files and generates
843
// them if they do not exist; one admin-level, one for invoice access and one
844
// read-only. Each macaroon is checked and created independently to ensure all
845
// three exist. The admin macaroon can also be used to generate more granular
846
// macaroons.
847
func genDefaultMacaroons(ctx context.Context, svc *macaroons.Service,
848
        admFile, roFile, invoiceFile string) error {
×
849

×
850
        // First, we'll generate a macaroon that only allows the caller to
×
851
        // access invoice related calls. This is useful for merchants and other
×
852
        // services to allow an isolated instance that can only query and
×
853
        // modify invoices.
×
854
        if !lnrpc.FileExists(invoiceFile) {
×
855
                err := saveMacaroon(
×
856
                        ctx, svc, invoiceFile, invoicePermissions, 0644,
×
857
                )
×
858
                if err != nil {
×
859
                        return err
×
860
                }
×
861
        }
862

863
        // Generate the read-only macaroon and write it to a file.
864
        if !lnrpc.FileExists(roFile) {
×
865
                err := saveMacaroon(
×
866
                        ctx, svc, roFile, readPermissions, 0644,
×
867
                )
×
868
                if err != nil {
×
869
                        return err
×
870
                }
×
871
        }
872

873
        // Generate the admin macaroon and write it to a file.
874
        if !lnrpc.FileExists(admFile) {
×
875
                err := saveMacaroon(
×
876
                        ctx, svc, admFile, adminPermissions(),
×
877
                        adminMacaroonFilePermissions,
×
878
                )
×
879
                if err != nil {
×
880
                        return err
×
881
                }
×
882
        }
883

884
        return nil
×
885
}
886

887
// adminPermissions returns a list of all permissions in a safe way that doesn't
888
// modify any of the source lists.
889
func adminPermissions() []bakery.Op {
×
890
        admin := make([]bakery.Op, len(readPermissions)+len(writePermissions))
×
891
        copy(admin[:len(readPermissions)], readPermissions)
×
892
        copy(admin[len(readPermissions):], writePermissions)
×
893
        return admin
×
894
}
×
895

896
// createWalletUnlockerService creates a WalletUnlockerService from the passed
897
// config.
898
func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService {
×
899
        // The macaroonFiles are passed to the wallet unlocker so they can be
×
900
        // deleted and recreated in case the root macaroon key is also changed
×
901
        // during the change password operation.
×
902
        macaroonFiles := []string{
×
903
                cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath,
×
904
        }
×
905

×
906
        return walletunlocker.New(
×
907
                cfg.ActiveNetParams.Params, macaroonFiles,
×
908
                cfg.ResetWalletTransactions, nil,
×
909
        )
×
910
}
×
911

912
// startGrpcListen starts the GRPC server on the passed listeners.
913
func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
914
        listeners []*ListenerWithSignal) error {
×
915

×
916
        // Use a WaitGroup so we can be sure the instructions on how to input the
×
917
        // password is the last thing to be printed to the console.
×
918
        var wg sync.WaitGroup
×
919

×
920
        for _, lis := range listeners {
×
921
                wg.Add(1)
×
922
                go func(lis *ListenerWithSignal) {
×
923
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
×
924

×
925
                        // Close the ready chan to indicate we are listening.
×
926
                        close(lis.Ready)
×
927

×
928
                        wg.Done()
×
929
                        _ = grpcServer.Serve(lis)
×
930
                }(lis)
×
931
        }
932

933
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
934
        if cfg.Prometheus.Enabled() {
×
935
                err := monitoring.ExportPrometheusMetrics(
×
936
                        grpcServer, cfg.Prometheus,
×
937
                )
×
938
                if err != nil {
×
939
                        return err
×
940
                }
×
941
        }
942

943
        // Wait for gRPC servers to be up running.
944
        wg.Wait()
×
945

×
946
        return nil
×
947
}
948

949
// startRestProxy starts the given REST proxy on the listeners found in the
950
// config.
951
func startRestProxy(ctx context.Context, cfg *Config, rpcServer *rpcServer,
952
        restDialOpts []grpc.DialOption,
953
        restListen func(net.Addr) (net.Listener, error)) (func(), error) {
×
954

×
955
        // We use the first RPC listener as the destination for our REST proxy.
×
956
        // If the listener is set to listen on all interfaces, we replace it
×
957
        // with localhost, as we cannot dial it directly.
×
958
        restProxyDest := cfg.RPCListeners[0].String()
×
959
        switch {
×
960
        case strings.Contains(restProxyDest, "0.0.0.0"):
×
961
                restProxyDest = strings.Replace(
×
962
                        restProxyDest, "0.0.0.0", "127.0.0.1", 1,
×
963
                )
×
964

965
        case strings.Contains(restProxyDest, "[::]"):
×
966
                restProxyDest = strings.Replace(
×
967
                        restProxyDest, "[::]", "[::1]", 1,
×
968
                )
×
969
        }
970

971
        var shutdownFuncs []func()
×
972
        shutdown := func() {
×
973
                for _, shutdownFn := range shutdownFuncs {
×
974
                        shutdownFn()
×
975
                }
×
976
        }
977

978
        // Start a REST proxy for our gRPC server.
979
        ctx, cancel := context.WithCancel(ctx)
×
980
        shutdownFuncs = append(shutdownFuncs, cancel)
×
981

×
982
        // We'll set up a proxy that will forward REST calls to the GRPC
×
983
        // server.
×
984
        //
×
985
        // The default JSON marshaler of the REST proxy only sets OrigName to
×
986
        // true, which instructs it to use the same field names as specified in
×
987
        // the proto file and not switch to camel case. What we also want is
×
988
        // that the marshaler prints all values, even if they are falsey.
×
989
        customMarshalerOption := proxy.WithMarshalerOption(
×
990
                proxy.MIMEWildcard, &proxy.JSONPb{
×
991
                        MarshalOptions:   *lnrpc.RESTJsonMarshalOpts,
×
992
                        UnmarshalOptions: *lnrpc.RESTJsonUnmarshalOpts,
×
993
                },
×
994
        )
×
995
        mux := proxy.NewServeMux(
×
996
                customMarshalerOption,
×
997

×
998
                // Don't allow falling back to other HTTP methods, we want exact
×
999
                // matches only. The actual method to be used can be overwritten
×
1000
                // by setting X-HTTP-Method-Override so there should be no
×
1001
                // reason for not specifying the correct method in the first
×
1002
                // place.
×
1003
                proxy.WithDisablePathLengthFallback(),
×
1004
        )
×
1005

×
1006
        // Register our services with the REST proxy.
×
1007
        err := rpcServer.RegisterWithRestProxy(
×
1008
                ctx, mux, restDialOpts, restProxyDest,
×
1009
        )
×
1010
        if err != nil {
×
1011
                return nil, err
×
1012
        }
×
1013

1014
        // Wrap the default grpc-gateway handler with the WebSocket handler.
1015
        restHandler := lnrpc.NewWebSocketProxy(
×
1016
                mux, rpcsLog, cfg.WSPingInterval, cfg.WSPongWait,
×
1017
                lnrpc.LndClientStreamingURIs,
×
1018
        )
×
1019

×
1020
        // Use a WaitGroup so we can be sure the instructions on how to input the
×
1021
        // password is the last thing to be printed to the console.
×
1022
        var wg sync.WaitGroup
×
1023

×
1024
        // Now spin up a network listener for each requested port and start a
×
1025
        // goroutine that serves REST with the created mux there.
×
1026
        for _, restEndpoint := range cfg.RESTListeners {
×
1027
                lis, err := restListen(restEndpoint)
×
1028
                if err != nil {
×
1029
                        ltndLog.Errorf("gRPC proxy unable to listen on %s",
×
1030
                                restEndpoint)
×
1031
                        return nil, err
×
1032
                }
×
1033

1034
                shutdownFuncs = append(shutdownFuncs, func() {
×
1035
                        err := lis.Close()
×
1036
                        if err != nil {
×
1037
                                rpcsLog.Errorf("Error closing listener: %v",
×
1038
                                        err)
×
1039
                        }
×
1040
                })
1041

1042
                wg.Add(1)
×
1043
                go func() {
×
1044
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
×
1045

×
1046
                        // Create our proxy chain now. A request will pass
×
1047
                        // through the following chain:
×
1048
                        // req ---> CORS handler --> WS proxy --->
×
1049
                        //   REST proxy --> gRPC endpoint
×
1050
                        corsHandler := allowCORS(restHandler, cfg.RestCORS)
×
1051

×
1052
                        wg.Done()
×
1053
                        err := http.Serve(lis, corsHandler)
×
1054
                        if err != nil && !lnrpc.IsClosedConnError(err) {
×
1055
                                rpcsLog.Error(err)
×
1056
                        }
×
1057
                }()
1058
        }
1059

1060
        // Wait for REST servers to be up running.
1061
        wg.Wait()
×
1062

×
1063
        return shutdown, nil
×
1064
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc