• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13440912774

20 Feb 2025 05:14PM UTC coverage: 57.697% (-1.1%) from 58.802%
13440912774

Pull #9535

github

guggero
GitHub: remove duplicate caching

Turns out that actions/setup-go starting with @v4 also adds caching.
With that, our cache size on disk has almost doubled, leading to the
GitHub runner running out of space in certain situation.
We fix that by disabling the automated caching since we already have our
own, custom-tailored version.
Pull Request #9535: GitHub: remove duplicate caching

103519 of 179417 relevant lines covered (57.7%)

24825.3 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lnd.go
1
// Copyright (c) 2013-2017 The btcsuite developers
2
// Copyright (c) 2015-2016 The Decred developers
3
// Copyright (C) 2015-2022 The Lightning Network Developers
4

5
package lnd
6

7
import (
8
        "context"
9
        "errors"
10
        "fmt"
11
        "log/slog"
12
        "net"
13
        "net/http"
14
        "net/http/pprof"
15
        "os"
16
        "runtime"
17
        runtimePprof "runtime/pprof"
18
        "strings"
19
        "sync"
20
        "time"
21

22
        "github.com/btcsuite/btcd/btcutil"
23
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
24
        "github.com/lightningnetwork/lnd/autopilot"
25
        "github.com/lightningnetwork/lnd/build"
26
        "github.com/lightningnetwork/lnd/chanacceptor"
27
        "github.com/lightningnetwork/lnd/channeldb"
28
        "github.com/lightningnetwork/lnd/cluster"
29
        "github.com/lightningnetwork/lnd/keychain"
30
        "github.com/lightningnetwork/lnd/lncfg"
31
        "github.com/lightningnetwork/lnd/lnrpc"
32
        "github.com/lightningnetwork/lnd/lnwallet"
33
        "github.com/lightningnetwork/lnd/macaroons"
34
        "github.com/lightningnetwork/lnd/monitoring"
35
        "github.com/lightningnetwork/lnd/rpcperms"
36
        "github.com/lightningnetwork/lnd/signal"
37
        "github.com/lightningnetwork/lnd/tor"
38
        "github.com/lightningnetwork/lnd/walletunlocker"
39
        "github.com/lightningnetwork/lnd/watchtower"
40
        "google.golang.org/grpc"
41
        "google.golang.org/grpc/credentials"
42
        "google.golang.org/grpc/keepalive"
43
        "gopkg.in/macaroon-bakery.v2/bakery"
44
        "gopkg.in/macaroon.v2"
45
)
46

47
const (
48
        // adminMacaroonFilePermissions is the file permission that is used for
49
        // creating the admin macaroon file.
50
        //
51
        // Why 640 is safe:
52
        // Assuming a reasonably secure Linux system, it will have a
53
        // separate group for each user. E.g. a new user lnd gets assigned group
54
        // lnd which nothing else belongs to. A system that does not do this is
55
        // inherently broken already.
56
        //
57
        // Since there is no other user in the group, no other user can read
58
        // admin macaroon unless the administrator explicitly allowed it. Thus
59
        // there's no harm allowing group read.
60
        adminMacaroonFilePermissions = 0640
61

62
        // leaderResignTimeout is the timeout used when resigning from the
63
        // leader role. This is kept short so LND can shut down quickly in case
64
        // of a system failure or network partition making the cluster
65
        // unresponsive. The cluster itself should ensure that the leader is not
66
        // elected again until the previous leader has resigned or the leader
67
        // election timeout has passed.
68
        leaderResignTimeout = 5 * time.Second
69
)
70

71
// AdminAuthOptions returns a list of DialOptions that can be used to
72
// authenticate with the RPC server with admin capabilities.
73
// skipMacaroons=true should be set if we don't want to include macaroons with
74
// the auth options. This is needed for instance for the WalletUnlocker
75
// service, which must be usable also before macaroons are created.
76
//
77
// NOTE: This should only be called after the RPCListener has signaled it is
78
// ready.
79
func AdminAuthOptions(cfg *Config, skipMacaroons bool) ([]grpc.DialOption,
80
        error) {
×
81

×
82
        creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "")
×
83
        if err != nil {
×
84
                return nil, fmt.Errorf("unable to read TLS cert: %w", err)
×
85
        }
×
86

87
        // Create a dial options array.
88
        opts := []grpc.DialOption{
×
89
                grpc.WithTransportCredentials(creds),
×
90
        }
×
91

×
92
        // Get the admin macaroon if macaroons are active.
×
93
        if !skipMacaroons && !cfg.NoMacaroons {
×
94
                // Load the admin macaroon file.
×
95
                macBytes, err := os.ReadFile(cfg.AdminMacPath)
×
96
                if err != nil {
×
97
                        return nil, fmt.Errorf("unable to read macaroon "+
×
98
                                "path (check the network setting!): %v", err)
×
99
                }
×
100

101
                mac := &macaroon.Macaroon{}
×
102
                if err = mac.UnmarshalBinary(macBytes); err != nil {
×
103
                        return nil, fmt.Errorf("unable to decode macaroon: %w",
×
104
                                err)
×
105
                }
×
106

107
                // Now we append the macaroon credentials to the dial options.
108
                cred, err := macaroons.NewMacaroonCredential(mac)
×
109
                if err != nil {
×
110
                        return nil, fmt.Errorf("error cloning mac: %w", err)
×
111
                }
×
112
                opts = append(opts, grpc.WithPerRPCCredentials(cred))
×
113
        }
114

115
        return opts, nil
×
116
}
117

118
// ListenerWithSignal is a net.Listener that has an additional Ready channel
119
// that will be closed when a server starts listening.
120
type ListenerWithSignal struct {
121
        net.Listener
122

123
        // Ready will be closed by the server listening on Listener.
124
        Ready chan struct{}
125

126
        // MacChan is an optional way to pass the admin macaroon to the program
127
        // that started lnd. The channel should be buffered to avoid lnd being
128
        // blocked on sending to the channel.
129
        MacChan chan []byte
130
}
131

132
// ListenerCfg is a wrapper around custom listeners that can be passed to lnd
133
// when calling its main method.
134
type ListenerCfg struct {
135
        // RPCListeners can be set to the listeners to use for the RPC server.
136
        // If empty a regular network listener will be created.
137
        RPCListeners []*ListenerWithSignal
138
}
139

140
var errStreamIsolationWithProxySkip = errors.New(
141
        "while stream isolation is enabled, the TOR proxy may not be skipped",
142
)
143

144
// Main is the true entry point for lnd. It accepts a fully populated and
145
// validated main configuration struct and an optional listener config struct.
146
// This function starts all main system components then blocks until a signal
147
// is received on the shutdownChan at which point everything is shut down again.
148
func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
149
        interceptor signal.Interceptor) error {
×
150

×
151
        defer func() {
×
152
                ltndLog.Info("Shutdown complete")
×
153
                err := cfg.LogRotator.Close()
×
154
                if err != nil {
×
155
                        ltndLog.Errorf("Could not close log rotator: %v", err)
×
156
                }
×
157
        }()
158

159
        ctx, cancel := context.WithCancel(context.Background())
×
160
        defer cancel()
×
161

×
162
        ctx, err := build.WithBuildInfo(ctx, cfg.LogConfig)
×
163
        if err != nil {
×
164
                return fmt.Errorf("unable to add build info to context: %w",
×
165
                        err)
×
166
        }
×
167

168
        mkErr := func(msg string, err error, attrs ...any) error {
×
169
                ltndLog.ErrorS(ctx, "Shutting down due to error in main "+
×
170
                        "method", err, attrs...)
×
171

×
172
                var (
×
173
                        params = []any{err}
×
174
                        fmtStr = msg + ": %w"
×
175
                )
×
176
                for _, attr := range attrs {
×
177
                        fmtStr += " %s"
×
178

×
179
                        params = append(params, attr)
×
180
                }
×
181

182
                return fmt.Errorf(fmtStr, params...)
×
183
        }
184

185
        // Show version at startup.
186
        ltndLog.InfoS(ctx, "Version Info",
×
187
                slog.String("version", build.Version()),
×
188
                slog.String("commit", build.Commit),
×
189
                slog.Any("debuglevel", build.Deployment),
×
190
                slog.String("logging", cfg.DebugLevel))
×
191

×
192
        var network string
×
193
        switch {
×
194
        case cfg.Bitcoin.TestNet3:
×
195
                network = "testnet"
×
196

197
        case cfg.Bitcoin.MainNet:
×
198
                network = "mainnet"
×
199

200
        case cfg.Bitcoin.SimNet:
×
201
                network = "simnet"
×
202

203
        case cfg.Bitcoin.RegTest:
×
204
                network = "regtest"
×
205

206
        case cfg.Bitcoin.SigNet:
×
207
                network = "signet"
×
208
        }
209

210
        ltndLog.InfoS(ctx, "Network Info",
×
211
                "active_chain", strings.Title(BitcoinChainName),
×
212
                "network", network)
×
213

×
214
        // Enable http profiling server if requested.
×
215
        if cfg.Pprof.Profile != "" {
×
216
                // Create the http handler.
×
217
                pprofMux := http.NewServeMux()
×
218
                pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
×
219
                pprofMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
×
220
                pprofMux.HandleFunc("/debug/pprof/profile", pprof.Profile)
×
221
                pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
×
222
                pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
×
223

×
224
                if cfg.Pprof.BlockingProfile != 0 {
×
225
                        runtime.SetBlockProfileRate(cfg.Pprof.BlockingProfile)
×
226
                }
×
227
                if cfg.Pprof.MutexProfile != 0 {
×
228
                        runtime.SetMutexProfileFraction(cfg.Pprof.MutexProfile)
×
229
                }
×
230

231
                // Redirect all requests to the pprof handler, thus visiting
232
                // `127.0.0.1:6060` will be redirected to
233
                // `127.0.0.1:6060/debug/pprof`.
234
                pprofMux.Handle("/", http.RedirectHandler(
×
235
                        "/debug/pprof/", http.StatusSeeOther,
×
236
                ))
×
237

×
238
                ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)
×
239

×
240
                // Create the pprof server.
×
241
                pprofServer := &http.Server{
×
242
                        Addr:              cfg.Pprof.Profile,
×
243
                        Handler:           pprofMux,
×
244
                        ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
×
245
                }
×
246

×
247
                // Shut the server down when lnd is shutting down.
×
248
                defer func() {
×
249
                        ltndLog.InfoS(ctx, "Stopping pprof server...")
×
250
                        err := pprofServer.Shutdown(ctx)
×
251
                        if err != nil {
×
252
                                ltndLog.ErrorS(ctx, "Stop pprof server", err)
×
253
                        }
×
254
                }()
255

256
                // Start the pprof server.
257
                go func() {
×
258
                        err := pprofServer.ListenAndServe()
×
259
                        if err != nil && !errors.Is(err, http.ErrServerClosed) {
×
260
                                ltndLog.ErrorS(ctx, "Could not serve pprof "+
×
261
                                        "server", err)
×
262
                        }
×
263
                }()
264
        }
265

266
        // Write cpu profile if requested.
267
        if cfg.Pprof.CPUProfile != "" {
×
268
                f, err := os.Create(cfg.Pprof.CPUProfile)
×
269
                if err != nil {
×
270
                        return mkErr("unable to create CPU profile", err)
×
271
                }
×
272
                _ = runtimePprof.StartCPUProfile(f)
×
273
                defer func() {
×
274
                        _ = f.Close()
×
275
                }()
×
276
                defer runtimePprof.StopCPUProfile()
×
277
        }
278

279
        // Run configuration dependent DB pre-initialization. Note that this
280
        // needs to be done early and once during the startup process, before
281
        // any DB access.
282
        if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
×
283
                return mkErr("error initializing DBs", err)
×
284
        }
×
285

286
        tlsManagerCfg := &TLSManagerCfg{
×
287
                TLSCertPath:        cfg.TLSCertPath,
×
288
                TLSKeyPath:         cfg.TLSKeyPath,
×
289
                TLSEncryptKey:      cfg.TLSEncryptKey,
×
290
                TLSExtraIPs:        cfg.TLSExtraIPs,
×
291
                TLSExtraDomains:    cfg.TLSExtraDomains,
×
292
                TLSAutoRefresh:     cfg.TLSAutoRefresh,
×
293
                TLSDisableAutofill: cfg.TLSDisableAutofill,
×
294
                TLSCertDuration:    cfg.TLSCertDuration,
×
295

×
296
                LetsEncryptDir:    cfg.LetsEncryptDir,
×
297
                LetsEncryptDomain: cfg.LetsEncryptDomain,
×
298
                LetsEncryptListen: cfg.LetsEncryptListen,
×
299

×
300
                DisableRestTLS: cfg.DisableRestTLS,
×
301

×
302
                HTTPHeaderTimeout: cfg.HTTPHeaderTimeout,
×
303
        }
×
304
        tlsManager := NewTLSManager(tlsManagerCfg)
×
305
        serverOpts, restDialOpts, restListen, cleanUp,
×
306
                err := tlsManager.SetCertificateBeforeUnlock()
×
307
        if err != nil {
×
308
                return mkErr("error setting cert before unlock", err)
×
309
        }
×
310
        if cleanUp != nil {
×
311
                defer cleanUp()
×
312
        }
×
313

314
        // If we have chosen to start with a dedicated listener for the
315
        // rpc server, we set it directly.
316
        grpcListeners := append([]*ListenerWithSignal{}, lisCfg.RPCListeners...)
×
317
        if len(grpcListeners) == 0 {
×
318
                // Otherwise we create listeners from the RPCListeners defined
×
319
                // in the config.
×
320
                for _, grpcEndpoint := range cfg.RPCListeners {
×
321
                        // Start a gRPC server listening for HTTP/2
×
322
                        // connections.
×
323
                        lis, err := lncfg.ListenOnAddress(grpcEndpoint)
×
324
                        if err != nil {
×
325
                                return mkErr("unable to listen on grpc "+
×
326
                                        "endpoint", err,
×
327
                                        slog.String(
×
328
                                                "endpoint",
×
329
                                                grpcEndpoint.String(),
×
330
                                        ))
×
331
                        }
×
332
                        defer lis.Close()
×
333

×
334
                        grpcListeners = append(
×
335
                                grpcListeners, &ListenerWithSignal{
×
336
                                        Listener: lis,
×
337
                                        Ready:    make(chan struct{}),
×
338
                                },
×
339
                        )
×
340
                }
341
        }
342

343
        // Create a new RPC interceptor that we'll add to the GRPC server. This
344
        // will be used to log the API calls invoked on the GRPC server.
345
        interceptorChain := rpcperms.NewInterceptorChain(
×
346
                rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
×
347
        )
×
348
        if err := interceptorChain.Start(); err != nil {
×
349
                return mkErr("error starting interceptor chain", err)
×
350
        }
×
351
        defer func() {
×
352
                err := interceptorChain.Stop()
×
353
                if err != nil {
×
354
                        ltndLog.Warnf("error stopping RPC interceptor "+
×
355
                                "chain: %v", err)
×
356
                }
×
357
        }()
358

359
        // Allow the user to overwrite some defaults of the gRPC library related
360
        // to connection keepalive (server side and client side pings).
361
        serverKeepalive := keepalive.ServerParameters{
×
362
                Time:    cfg.GRPC.ServerPingTime,
×
363
                Timeout: cfg.GRPC.ServerPingTimeout,
×
364
        }
×
365
        clientKeepalive := keepalive.EnforcementPolicy{
×
366
                MinTime:             cfg.GRPC.ClientPingMinWait,
×
367
                PermitWithoutStream: cfg.GRPC.ClientAllowPingWithoutStream,
×
368
        }
×
369

×
370
        rpcServerOpts := interceptorChain.CreateServerOpts()
×
371
        serverOpts = append(serverOpts, rpcServerOpts...)
×
372
        serverOpts = append(
×
373
                serverOpts, grpc.MaxRecvMsgSize(lnrpc.MaxGrpcMsgSize),
×
374
                grpc.KeepaliveParams(serverKeepalive),
×
375
                grpc.KeepaliveEnforcementPolicy(clientKeepalive),
×
376
        )
×
377

×
378
        grpcServer := grpc.NewServer(serverOpts...)
×
379
        defer grpcServer.Stop()
×
380

×
381
        // We'll also register the RPC interceptor chain as the StateServer, as
×
382
        // it can be used to query for the current state of the wallet.
×
383
        lnrpc.RegisterStateServer(grpcServer, interceptorChain)
×
384

×
385
        // Initialize, and register our implementation of the gRPC interface
×
386
        // exported by the rpcServer.
×
387
        rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
×
388
        err = rpcServer.RegisterWithGrpcServer(grpcServer)
×
389
        if err != nil {
×
390
                return mkErr("error registering gRPC server", err)
×
391
        }
×
392

393
        // Now that both the WalletUnlocker and LightningService have been
394
        // registered with the GRPC server, we can start listening.
395
        err = startGrpcListen(cfg, grpcServer, grpcListeners)
×
396
        if err != nil {
×
397
                return mkErr("error starting gRPC listener", err)
×
398
        }
×
399

400
        // Now start the REST proxy for our gRPC server above. We'll ensure
401
        // we direct LND to connect to its loopback address rather than a
402
        // wildcard to prevent certificate issues when accessing the proxy
403
        // externally.
404
        stopProxy, err := startRestProxy(
×
405
                ctx, cfg, rpcServer, restDialOpts, restListen,
×
406
        )
×
407
        if err != nil {
×
408
                return mkErr("error starting REST proxy", err)
×
409
        }
×
410
        defer stopProxy()
×
411

×
412
        // Start leader election if we're running on etcd. Continuation will be
×
413
        // blocked until this instance is elected as the current leader or
×
414
        // shutting down.
×
415
        elected := false
×
416
        var leaderElector cluster.LeaderElector
×
417
        if cfg.Cluster.EnableLeaderElection {
×
418
                electionCtx, cancelElection := context.WithCancel(ctx)
×
419

×
420
                go func() {
×
421
                        <-interceptor.ShutdownChannel()
×
422
                        cancelElection()
×
423
                }()
×
424

425
                ltndLog.InfoS(ctx, "Using leader elector",
×
426
                        "elector", cfg.Cluster.LeaderElector)
×
427

×
428
                leaderElector, err = cfg.Cluster.MakeLeaderElector(
×
429
                        electionCtx, cfg.DB,
×
430
                )
×
431
                if err != nil {
×
432
                        return err
×
433
                }
×
434

435
                defer func() {
×
436
                        if !elected {
×
437
                                return
×
438
                        }
×
439

440
                        ltndLog.InfoS(ctx, "Attempting to resign from "+
×
441
                                "leader role", "cluster_id", cfg.Cluster.ID)
×
442

×
443
                        // Ensure that we don't block the shutdown process if
×
444
                        // the leader resigning process takes too long. The
×
445
                        // cluster will ensure that the leader is not elected
×
446
                        // again until the previous leader has resigned or the
×
447
                        // leader election timeout has passed.
×
448
                        timeoutCtx, cancel := context.WithTimeout(
×
449
                                ctx, leaderResignTimeout,
×
450
                        )
×
451
                        defer cancel()
×
452

×
453
                        if err := leaderElector.Resign(timeoutCtx); err != nil {
×
454
                                ltndLog.Errorf("Leader elector failed to "+
×
455
                                        "resign: %v", err)
×
456
                        }
×
457
                }()
458

459
                ltndLog.InfoS(ctx, "Starting leadership campaign",
×
460
                        "cluster_id", cfg.Cluster.ID)
×
461

×
462
                if err := leaderElector.Campaign(electionCtx); err != nil {
×
463
                        return mkErr("leadership campaign failed", err)
×
464
                }
×
465

466
                elected = true
×
467
                ltndLog.InfoS(ctx, "Elected as leader",
×
468
                        "cluster_id", cfg.Cluster.ID)
×
469
        }
470

471
        dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
×
472
        switch {
×
473
        case errors.Is(err, channeldb.ErrDryRunMigrationOK):
×
474
                ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
×
475
                        slog.Any("err", err))
×
476
                return nil
×
477
        case err != nil:
×
478
                return mkErr("unable to open databases", err)
×
479
        }
480

481
        defer cleanUp()
×
482

×
483
        partialChainControl, walletConfig, cleanUp, err := implCfg.BuildWalletConfig(
×
484
                ctx, dbs, &implCfg.AuxComponents, interceptorChain,
×
485
                grpcListeners,
×
486
        )
×
487
        if err != nil {
×
488
                return mkErr("error creating wallet config", err)
×
489
        }
×
490

491
        defer cleanUp()
×
492

×
493
        activeChainControl, cleanUp, err := implCfg.BuildChainControl(
×
494
                partialChainControl, walletConfig,
×
495
        )
×
496
        if err != nil {
×
497
                return mkErr("error loading chain control", err)
×
498
        }
×
499

500
        defer cleanUp()
×
501

×
502
        // TODO(roasbeef): add rotation
×
503
        idKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
×
504
                keychain.KeyLocator{
×
505
                        Family: keychain.KeyFamilyNodeKey,
×
506
                        Index:  0,
×
507
                },
×
508
        )
×
509
        if err != nil {
×
510
                return mkErr("error deriving node key", err)
×
511
        }
×
512

513
        if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
×
514
                return errStreamIsolationWithProxySkip
×
515
        }
×
516

517
        if cfg.Tor.Active {
×
518
                if cfg.Tor.SkipProxyForClearNetTargets {
×
519
                        srvrLog.InfoS(ctx, "Onion services are accessible "+
×
520
                                "via Tor! NOTE: Traffic to clearnet services "+
×
521
                                "is not routed via Tor.")
×
522
                } else {
×
523
                        srvrLog.InfoS(ctx, "Proxying all network traffic "+
×
524
                                "via Tor! NOTE: Ensure the backend node is "+
×
525
                                "proxying over Tor as well",
×
526
                                "stream_isolation", cfg.Tor.StreamIsolation)
×
527
                }
×
528
        }
529

530
        // If tor is active and either v2 or v3 onion services have been
531
        // specified, make a tor controller and pass it into both the watchtower
532
        // server and the regular lnd server.
533
        var torController *tor.Controller
×
534
        if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) {
×
535
                torController = tor.NewController(
×
536
                        cfg.Tor.Control, cfg.Tor.TargetIPAddress,
×
537
                        cfg.Tor.Password,
×
538
                )
×
539

×
540
                // Start the tor controller before giving it to any other
×
541
                // subsystems.
×
542
                if err := torController.Start(); err != nil {
×
543
                        return mkErr("unable to initialize tor controller",
×
544
                                err)
×
545
                }
×
546
                defer func() {
×
547
                        if err := torController.Stop(); err != nil {
×
548
                                ltndLog.ErrorS(ctx, "Error stopping tor "+
×
549
                                        "controller", err)
×
550
                        }
×
551
                }()
552
        }
553

554
        var tower *watchtower.Standalone
×
555
        if cfg.Watchtower.Active {
×
556
                towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
×
557
                        keychain.KeyLocator{
×
558
                                Family: keychain.KeyFamilyTowerID,
×
559
                                Index:  0,
×
560
                        },
×
561
                )
×
562
                if err != nil {
×
563
                        return mkErr("error deriving tower key", err)
×
564
                }
×
565

566
                wtCfg := &watchtower.Config{
×
567
                        BlockFetcher:   activeChainControl.ChainIO,
×
568
                        DB:             dbs.TowerServerDB,
×
569
                        EpochRegistrar: activeChainControl.ChainNotifier,
×
570
                        Net:            cfg.net,
×
571
                        NewAddress: func() (btcutil.Address, error) {
×
572
                                return activeChainControl.Wallet.NewAddress(
×
573
                                        lnwallet.TaprootPubkey, false,
×
574
                                        lnwallet.DefaultAccountName,
×
575
                                )
×
576
                        },
×
577
                        NodeKeyECDH: keychain.NewPubKeyECDH(
578
                                towerKeyDesc, activeChainControl.KeyRing,
579
                        ),
580
                        PublishTx: activeChainControl.Wallet.PublishTransaction,
581
                        ChainHash: *cfg.ActiveNetParams.GenesisHash,
582
                }
583

584
                // If there is a tor controller (user wants auto hidden
585
                // services), then store a pointer in the watchtower config.
586
                if torController != nil {
×
587
                        wtCfg.TorController = torController
×
588
                        wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath
×
589
                        wtCfg.EncryptKey = cfg.Tor.EncryptKey
×
590
                        wtCfg.KeyRing = activeChainControl.KeyRing
×
591

×
592
                        switch {
×
593
                        case cfg.Tor.V2:
×
594
                                wtCfg.Type = tor.V2
×
595
                        case cfg.Tor.V3:
×
596
                                wtCfg.Type = tor.V3
×
597
                        }
598
                }
599

600
                wtConfig, err := cfg.Watchtower.Apply(
×
601
                        wtCfg, lncfg.NormalizeAddresses,
×
602
                )
×
603
                if err != nil {
×
604
                        return mkErr("unable to configure watchtower", err)
×
605
                }
×
606

607
                tower, err = watchtower.New(wtConfig)
×
608
                if err != nil {
×
609
                        return mkErr("unable to create watchtower", err)
×
610
                }
×
611
        }
612

613
        // Initialize the MultiplexAcceptor. If lnd was started with the
614
        // zero-conf feature bit, then this will be a ZeroConfAcceptor.
615
        // Otherwise, this will be a ChainedAcceptor.
616
        var multiAcceptor chanacceptor.MultiplexAcceptor
×
617
        if cfg.ProtocolOptions.ZeroConf() {
×
618
                multiAcceptor = chanacceptor.NewZeroConfAcceptor()
×
619
        } else {
×
620
                multiAcceptor = chanacceptor.NewChainedAcceptor()
×
621
        }
×
622

623
        // Set up the core server which will listen for incoming peer
624
        // connections.
625
        server, err := newServer(
×
626
                cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc,
×
627
                activeChainControl.Cfg.WalletUnlockParams.ChansToRestore,
×
628
                multiAcceptor, torController, tlsManager, leaderElector,
×
629
                implCfg,
×
630
        )
×
631
        if err != nil {
×
632
                return mkErr("unable to create server", err)
×
633
        }
×
634

635
        // Set up an autopilot manager from the current config. This will be
636
        // used to manage the underlying autopilot agent, starting and stopping
637
        // it at will.
638
        atplCfg, err := initAutoPilot(
×
639
                server, cfg.Autopilot, activeChainControl.MinHtlcIn,
×
640
                cfg.ActiveNetParams,
×
641
        )
×
642
        if err != nil {
×
643
                return mkErr("unable to initialize autopilot", err)
×
644
        }
×
645

646
        atplManager, err := autopilot.NewManager(atplCfg)
×
647
        if err != nil {
×
648
                return mkErr("unable to create autopilot manager", err)
×
649
        }
×
650
        if err := atplManager.Start(); err != nil {
×
651
                return mkErr("unable to start autopilot manager", err)
×
652
        }
×
653
        defer atplManager.Stop()
×
654

×
655
        err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
×
656
        if err != nil {
×
657
                return mkErr("unable to load permanent TLS certificate", err)
×
658
        }
×
659

660
        // Now we have created all dependencies necessary to populate and
661
        // start the RPC server.
662
        err = rpcServer.addDeps(
×
663
                server, interceptorChain.MacaroonService(), cfg.SubRPCServers,
×
664
                atplManager, server.invoices, tower, multiAcceptor,
×
665
                server.invoiceHtlcModifier,
×
666
        )
×
667
        if err != nil {
×
668
                return mkErr("unable to add deps to RPC server", err)
×
669
        }
×
670
        if err := rpcServer.Start(); err != nil {
×
671
                return mkErr("unable to start RPC server", err)
×
672
        }
×
673
        defer rpcServer.Stop()
×
674

×
675
        // We transition the RPC state to Active, as the RPC server is up.
×
676
        interceptorChain.SetRPCActive()
×
677

×
678
        if err := interceptor.Notifier.NotifyReady(true); err != nil {
×
679
                return mkErr("error notifying ready", err)
×
680
        }
×
681

682
        // We'll wait until we're fully synced to continue the start up of the
683
        // remainder of the daemon. This ensures that we don't accept any
684
        // possibly invalid state transitions, or accept channels with spent
685
        // funds.
686
        _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
×
687
        if err != nil {
×
688
                return mkErr("unable to determine chain tip", err)
×
689
        }
×
690

691
        ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
×
692
                slog.Int64("start_height", int64(bestHeight)))
×
693

×
694
        type syncResult struct {
×
695
                synced        bool
×
696
                bestBlockTime int64
×
697
                err           error
×
698
        }
×
699

×
700
        var syncedResChan = make(chan syncResult, 1)
×
701

×
702
        for {
×
703
                // We check if the wallet is synced in a separate goroutine as
×
704
                // the call is blocking, and we want to be able to interrupt it
×
705
                // if the daemon is shutting down.
×
706
                go func() {
×
707
                        synced, bestBlockTime, err := activeChainControl.Wallet.
×
708
                                IsSynced()
×
709
                        syncedResChan <- syncResult{synced, bestBlockTime, err}
×
710
                }()
×
711

712
                select {
×
713
                case <-interceptor.ShutdownChannel():
×
714
                        return nil
×
715

716
                case res := <-syncedResChan:
×
717
                        if res.err != nil {
×
718
                                return mkErr("unable to determine if wallet "+
×
719
                                        "is synced", res.err)
×
720
                        }
×
721

722
                        ltndLog.DebugS(ctx, "Syncing to block chain",
×
723
                                "best_block_time", time.Unix(res.bestBlockTime, 0),
×
724
                                "is_synced", res.synced)
×
725

×
726
                        if res.synced {
×
727
                                break
×
728
                        }
729

730
                        // If we're not yet synced, we'll wait for a second
731
                        // before checking again.
732
                        select {
×
733
                        case <-interceptor.ShutdownChannel():
×
734
                                return nil
×
735

736
                        case <-time.After(time.Second):
×
737
                                continue
×
738
                        }
739
                }
740

741
                break
×
742
        }
743

744
        _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
×
745
        if err != nil {
×
746
                return mkErr("unable to determine chain tip", err)
×
747
        }
×
748

749
        ltndLog.InfoS(ctx, "Chain backend is fully synced!",
×
750
                "end_height", bestHeight)
×
751

×
752
        // With all the relevant chains initialized, we can finally start the
×
753
        // server itself. We start the server in an asynchronous goroutine so
×
754
        // that we are able to interrupt and shutdown the daemon gracefully in
×
755
        // case the startup of the subservers do not behave as expected.
×
756
        errChan := make(chan error)
×
757
        go func() {
×
758
                errChan <- server.Start()
×
759
        }()
×
760

761
        defer func() {
×
762
                err := server.Stop()
×
763
                if err != nil {
×
764
                        ltndLog.WarnS(ctx, "Stopping the server including all "+
×
765
                                "its subsystems failed with", err)
×
766
                }
×
767
        }()
768

769
        select {
×
770
        case err := <-errChan:
×
771
                if err == nil {
×
772
                        break
×
773
                }
774

775
                return mkErr("unable to start server", err)
×
776

777
        case <-interceptor.ShutdownChannel():
×
778
                return nil
×
779
        }
780

781
        // We transition the server state to Active, as the server is up.
782
        interceptorChain.SetServerActive()
×
783

×
784
        // Now that the server has started, if the autopilot mode is currently
×
785
        // active, then we'll start the autopilot agent immediately. It will be
×
786
        // stopped together with the autopilot service.
×
787
        if cfg.Autopilot.Active {
×
788
                if err := atplManager.StartAgent(); err != nil {
×
789
                        return mkErr("unable to start autopilot agent", err)
×
790
                }
×
791
        }
792

793
        if cfg.Watchtower.Active {
×
794
                if err := tower.Start(); err != nil {
×
795
                        return mkErr("unable to start watchtower", err)
×
796
                }
×
797
                defer tower.Stop()
×
798
        }
799

800
        // Wait for shutdown signal from either a graceful server stop or from
801
        // the interrupt handler.
802
        <-interceptor.ShutdownChannel()
×
803
        return nil
×
804
}
805

806
// bakeMacaroon creates a new macaroon with newest version and the given
807
// permissions then returns it binary serialized.
808
func bakeMacaroon(ctx context.Context, svc *macaroons.Service,
809
        permissions []bakery.Op) ([]byte, error) {
×
810

×
811
        mac, err := svc.NewMacaroon(
×
812
                ctx, macaroons.DefaultRootKeyID, permissions...,
×
813
        )
×
814
        if err != nil {
×
815
                return nil, err
×
816
        }
×
817

818
        return mac.M().MarshalBinary()
×
819
}
820

821
// saveMacaroon bakes a macaroon with the specified macaroon permissions and
822
// writes it to a file with the given filename and file permissions.
823
func saveMacaroon(ctx context.Context, svc *macaroons.Service, filename string,
824
        macaroonPermissions []bakery.Op, filePermissions os.FileMode) error {
×
825

×
826
        macaroonBytes, err := bakeMacaroon(ctx, svc, macaroonPermissions)
×
827
        if err != nil {
×
828
                return err
×
829
        }
×
830
        err = os.WriteFile(filename, macaroonBytes, filePermissions)
×
831
        if err != nil {
×
832
                _ = os.Remove(filename)
×
833
                return err
×
834
        }
×
835

836
        return nil
×
837
}
838

839
// genDefaultMacaroons checks for three default macaroon files and generates
840
// them if they do not exist; one admin-level, one for invoice access and one
841
// read-only. Each macaroon is checked and created independently to ensure all
842
// three exist. The admin macaroon can also be used to generate more granular
843
// macaroons.
844
func genDefaultMacaroons(ctx context.Context, svc *macaroons.Service,
845
        admFile, roFile, invoiceFile string) error {
×
846

×
847
        // First, we'll generate a macaroon that only allows the caller to
×
848
        // access invoice related calls. This is useful for merchants and other
×
849
        // services to allow an isolated instance that can only query and
×
850
        // modify invoices.
×
851
        if !lnrpc.FileExists(invoiceFile) {
×
852
                err := saveMacaroon(
×
853
                        ctx, svc, invoiceFile, invoicePermissions, 0644,
×
854
                )
×
855
                if err != nil {
×
856
                        return err
×
857
                }
×
858
        }
859

860
        // Generate the read-only macaroon and write it to a file.
861
        if !lnrpc.FileExists(roFile) {
×
862
                err := saveMacaroon(
×
863
                        ctx, svc, roFile, readPermissions, 0644,
×
864
                )
×
865
                if err != nil {
×
866
                        return err
×
867
                }
×
868
        }
869

870
        // Generate the admin macaroon and write it to a file.
871
        if !lnrpc.FileExists(admFile) {
×
872
                err := saveMacaroon(
×
873
                        ctx, svc, admFile, adminPermissions(),
×
874
                        adminMacaroonFilePermissions,
×
875
                )
×
876
                if err != nil {
×
877
                        return err
×
878
                }
×
879
        }
880

881
        return nil
×
882
}
883

884
// adminPermissions returns a list of all permissions in a safe way that doesn't
885
// modify any of the source lists.
886
func adminPermissions() []bakery.Op {
×
887
        admin := make([]bakery.Op, len(readPermissions)+len(writePermissions))
×
888
        copy(admin[:len(readPermissions)], readPermissions)
×
889
        copy(admin[len(readPermissions):], writePermissions)
×
890
        return admin
×
891
}
×
892

893
// createWalletUnlockerService creates a WalletUnlockerService from the passed
894
// config.
895
func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService {
×
896
        // The macaroonFiles are passed to the wallet unlocker so they can be
×
897
        // deleted and recreated in case the root macaroon key is also changed
×
898
        // during the change password operation.
×
899
        macaroonFiles := []string{
×
900
                cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath,
×
901
        }
×
902

×
903
        return walletunlocker.New(
×
904
                cfg.ActiveNetParams.Params, macaroonFiles,
×
905
                cfg.ResetWalletTransactions, nil,
×
906
        )
×
907
}
×
908

909
// startGrpcListen starts the GRPC server on the passed listeners.
910
func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
911
        listeners []*ListenerWithSignal) error {
×
912

×
913
        // Use a WaitGroup so we can be sure the instructions on how to input the
×
914
        // password is the last thing to be printed to the console.
×
915
        var wg sync.WaitGroup
×
916

×
917
        for _, lis := range listeners {
×
918
                wg.Add(1)
×
919
                go func(lis *ListenerWithSignal) {
×
920
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
×
921

×
922
                        // Close the ready chan to indicate we are listening.
×
923
                        close(lis.Ready)
×
924

×
925
                        wg.Done()
×
926
                        _ = grpcServer.Serve(lis)
×
927
                }(lis)
×
928
        }
929

930
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
931
        if cfg.Prometheus.Enabled() {
×
932
                err := monitoring.ExportPrometheusMetrics(
×
933
                        grpcServer, cfg.Prometheus,
×
934
                )
×
935
                if err != nil {
×
936
                        return err
×
937
                }
×
938
        }
939

940
        // Wait for gRPC servers to be up running.
941
        wg.Wait()
×
942

×
943
        return nil
×
944
}
945

946
// startRestProxy starts the given REST proxy on the listeners found in the
947
// config.
948
func startRestProxy(ctx context.Context, cfg *Config, rpcServer *rpcServer,
949
        restDialOpts []grpc.DialOption,
950
        restListen func(net.Addr) (net.Listener, error)) (func(), error) {
×
951

×
952
        // We use the first RPC listener as the destination for our REST proxy.
×
953
        // If the listener is set to listen on all interfaces, we replace it
×
954
        // with localhost, as we cannot dial it directly.
×
955
        restProxyDest := cfg.RPCListeners[0].String()
×
956
        switch {
×
957
        case strings.Contains(restProxyDest, "0.0.0.0"):
×
958
                restProxyDest = strings.Replace(
×
959
                        restProxyDest, "0.0.0.0", "127.0.0.1", 1,
×
960
                )
×
961

962
        case strings.Contains(restProxyDest, "[::]"):
×
963
                restProxyDest = strings.Replace(
×
964
                        restProxyDest, "[::]", "[::1]", 1,
×
965
                )
×
966
        }
967

968
        var shutdownFuncs []func()
×
969
        shutdown := func() {
×
970
                for _, shutdownFn := range shutdownFuncs {
×
971
                        shutdownFn()
×
972
                }
×
973
        }
974

975
        // Start a REST proxy for our gRPC server.
976
        ctx, cancel := context.WithCancel(ctx)
×
977
        shutdownFuncs = append(shutdownFuncs, cancel)
×
978

×
979
        // We'll set up a proxy that will forward REST calls to the GRPC
×
980
        // server.
×
981
        //
×
982
        // The default JSON marshaler of the REST proxy only sets OrigName to
×
983
        // true, which instructs it to use the same field names as specified in
×
984
        // the proto file and not switch to camel case. What we also want is
×
985
        // that the marshaler prints all values, even if they are falsey.
×
986
        customMarshalerOption := proxy.WithMarshalerOption(
×
987
                proxy.MIMEWildcard, &proxy.JSONPb{
×
988
                        MarshalOptions:   *lnrpc.RESTJsonMarshalOpts,
×
989
                        UnmarshalOptions: *lnrpc.RESTJsonUnmarshalOpts,
×
990
                },
×
991
        )
×
992
        mux := proxy.NewServeMux(
×
993
                customMarshalerOption,
×
994

×
995
                // Don't allow falling back to other HTTP methods, we want exact
×
996
                // matches only. The actual method to be used can be overwritten
×
997
                // by setting X-HTTP-Method-Override so there should be no
×
998
                // reason for not specifying the correct method in the first
×
999
                // place.
×
1000
                proxy.WithDisablePathLengthFallback(),
×
1001
        )
×
1002

×
1003
        // Register our services with the REST proxy.
×
1004
        err := rpcServer.RegisterWithRestProxy(
×
1005
                ctx, mux, restDialOpts, restProxyDest,
×
1006
        )
×
1007
        if err != nil {
×
1008
                return nil, err
×
1009
        }
×
1010

1011
        // Wrap the default grpc-gateway handler with the WebSocket handler.
1012
        restHandler := lnrpc.NewWebSocketProxy(
×
1013
                mux, rpcsLog, cfg.WSPingInterval, cfg.WSPongWait,
×
1014
                lnrpc.LndClientStreamingURIs,
×
1015
        )
×
1016

×
1017
        // Use a WaitGroup so we can be sure the instructions on how to input the
×
1018
        // password is the last thing to be printed to the console.
×
1019
        var wg sync.WaitGroup
×
1020

×
1021
        // Now spin up a network listener for each requested port and start a
×
1022
        // goroutine that serves REST with the created mux there.
×
1023
        for _, restEndpoint := range cfg.RESTListeners {
×
1024
                lis, err := restListen(restEndpoint)
×
1025
                if err != nil {
×
1026
                        ltndLog.Errorf("gRPC proxy unable to listen on %s",
×
1027
                                restEndpoint)
×
1028
                        return nil, err
×
1029
                }
×
1030

1031
                shutdownFuncs = append(shutdownFuncs, func() {
×
1032
                        err := lis.Close()
×
1033
                        if err != nil {
×
1034
                                rpcsLog.Errorf("Error closing listener: %v",
×
1035
                                        err)
×
1036
                        }
×
1037
                })
1038

1039
                wg.Add(1)
×
1040
                go func() {
×
1041
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
×
1042

×
1043
                        // Create our proxy chain now. A request will pass
×
1044
                        // through the following chain:
×
1045
                        // req ---> CORS handler --> WS proxy --->
×
1046
                        //   REST proxy --> gRPC endpoint
×
1047
                        corsHandler := allowCORS(restHandler, cfg.RestCORS)
×
1048

×
1049
                        wg.Done()
×
1050
                        err := http.Serve(lis, corsHandler)
×
1051
                        if err != nil && !lnrpc.IsClosedConnError(err) {
×
1052
                                rpcsLog.Error(err)
×
1053
                        }
×
1054
                }()
1055
        }
1056

1057
        // Wait for REST servers to be up running.
1058
        wg.Wait()
×
1059

×
1060
        return shutdown, nil
×
1061
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc