• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 11381029069

17 Oct 2024 08:16AM UTC coverage: 58.884% (+0.1%) from 58.78%
11381029069

push

github

web-flow
Merge pull request #9195 from yyforyongyu/optimize-itest-setup

itest+lntest: speed up test setup

0 of 60 new or added lines in 4 files covered. (0.0%)

77 existing lines in 12 files now uncovered.

131176 of 222771 relevant lines covered (58.88%)

28232.18 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

63.89
/lnd.go
1
// Copyright (c) 2013-2017 The btcsuite developers
2
// Copyright (c) 2015-2016 The Decred developers
3
// Copyright (C) 2015-2022 The Lightning Network Developers
4

5
package lnd
6

7
import (
8
        "context"
9
        "errors"
10
        "fmt"
11
        "net"
12
        "net/http"
13
        "net/http/pprof"
14
        "os"
15
        "runtime"
16
        runtimePprof "runtime/pprof"
17
        "strings"
18
        "sync"
19
        "time"
20

21
        "github.com/btcsuite/btcd/btcutil"
22
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
23
        "github.com/lightningnetwork/lnd/autopilot"
24
        "github.com/lightningnetwork/lnd/build"
25
        "github.com/lightningnetwork/lnd/chanacceptor"
26
        "github.com/lightningnetwork/lnd/channeldb"
27
        "github.com/lightningnetwork/lnd/cluster"
28
        "github.com/lightningnetwork/lnd/keychain"
29
        "github.com/lightningnetwork/lnd/lncfg"
30
        "github.com/lightningnetwork/lnd/lnrpc"
31
        "github.com/lightningnetwork/lnd/lnwallet"
32
        "github.com/lightningnetwork/lnd/macaroons"
33
        "github.com/lightningnetwork/lnd/monitoring"
34
        "github.com/lightningnetwork/lnd/rpcperms"
35
        "github.com/lightningnetwork/lnd/signal"
36
        "github.com/lightningnetwork/lnd/tor"
37
        "github.com/lightningnetwork/lnd/walletunlocker"
38
        "github.com/lightningnetwork/lnd/watchtower"
39
        "google.golang.org/grpc"
40
        "google.golang.org/grpc/credentials"
41
        "google.golang.org/grpc/keepalive"
42
        "gopkg.in/macaroon-bakery.v2/bakery"
43
        "gopkg.in/macaroon.v2"
44
)
45

46
const (
47
        // adminMacaroonFilePermissions is the file permission that is used for
48
        // creating the admin macaroon file.
49
        //
50
        // Why 640 is safe:
51
        // Assuming a reasonably secure Linux system, it will have a
52
        // separate group for each user. E.g. a new user lnd gets assigned group
53
        // lnd which nothing else belongs to. A system that does not do this is
54
        // inherently broken already.
55
        //
56
        // Since there is no other user in the group, no other user can read
57
        // admin macaroon unless the administrator explicitly allowed it. Thus
58
        // there's no harm allowing group read.
59
        adminMacaroonFilePermissions = 0640
60

61
        // leaderResignTimeout is the timeout used when resigning from the
62
        // leader role. This is kept short so LND can shut down quickly in case
63
        // of a system failure or network partition making the cluster
64
        // unresponsive. The cluster itself should ensure that the leader is not
65
        // elected again until the previous leader has resigned or the leader
66
        // election timeout has passed.
67
        leaderResignTimeout = 5 * time.Second
68
)
69

70
// AdminAuthOptions returns a list of DialOptions that can be used to
71
// authenticate with the RPC server with admin capabilities.
72
// skipMacaroons=true should be set if we don't want to include macaroons with
73
// the auth options. This is needed for instance for the WalletUnlocker
74
// service, which must be usable also before macaroons are created.
75
//
76
// NOTE: This should only be called after the RPCListener has signaled it is
77
// ready.
78
func AdminAuthOptions(cfg *Config, skipMacaroons bool) ([]grpc.DialOption,
79
        error) {
×
80

×
81
        creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "")
×
82
        if err != nil {
×
83
                return nil, fmt.Errorf("unable to read TLS cert: %w", err)
×
84
        }
×
85

86
        // Create a dial options array.
87
        opts := []grpc.DialOption{
×
88
                grpc.WithTransportCredentials(creds),
×
89
        }
×
90

×
91
        // Get the admin macaroon if macaroons are active.
×
92
        if !skipMacaroons && !cfg.NoMacaroons {
×
93
                // Load the admin macaroon file.
×
94
                macBytes, err := os.ReadFile(cfg.AdminMacPath)
×
95
                if err != nil {
×
96
                        return nil, fmt.Errorf("unable to read macaroon "+
×
97
                                "path (check the network setting!): %v", err)
×
98
                }
×
99

100
                mac := &macaroon.Macaroon{}
×
101
                if err = mac.UnmarshalBinary(macBytes); err != nil {
×
102
                        return nil, fmt.Errorf("unable to decode macaroon: %w",
×
103
                                err)
×
104
                }
×
105

106
                // Now we append the macaroon credentials to the dial options.
107
                cred, err := macaroons.NewMacaroonCredential(mac)
×
108
                if err != nil {
×
109
                        return nil, fmt.Errorf("error cloning mac: %w", err)
×
110
                }
×
111
                opts = append(opts, grpc.WithPerRPCCredentials(cred))
×
112
        }
113

114
        return opts, nil
×
115
}
116

117
// ListenerWithSignal is a net.Listener that has an additional Ready channel
118
// that will be closed when a server starts listening.
119
type ListenerWithSignal struct {
120
        net.Listener
121

122
        // Ready will be closed by the server listening on Listener.
123
        Ready chan struct{}
124

125
        // MacChan is an optional way to pass the admin macaroon to the program
126
        // that started lnd. The channel should be buffered to avoid lnd being
127
        // blocked on sending to the channel.
128
        MacChan chan []byte
129
}
130

131
// ListenerCfg is a wrapper around custom listeners that can be passed to lnd
132
// when calling its main method.
133
type ListenerCfg struct {
134
        // RPCListeners can be set to the listeners to use for the RPC server.
135
        // If empty a regular network listener will be created.
136
        RPCListeners []*ListenerWithSignal
137
}
138

139
var errStreamIsolationWithProxySkip = errors.New(
140
        "while stream isolation is enabled, the TOR proxy may not be skipped",
141
)
142

143
// Main is the true entry point for lnd. It accepts a fully populated and
144
// validated main configuration struct and an optional listener config struct.
145
// This function starts all main system components then blocks until a signal
146
// is received on the shutdownChan at which point everything is shut down again.
147
func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
148
        interceptor signal.Interceptor) error {
4✔
149

4✔
150
        defer func() {
8✔
151
                ltndLog.Info("Shutdown complete\n")
4✔
152
                err := cfg.LogWriter.Close()
4✔
153
                if err != nil {
4✔
154
                        ltndLog.Errorf("Could not close log rotator: %v", err)
×
155
                }
×
156
        }()
157

158
        mkErr := func(format string, args ...interface{}) error {
4✔
159
                ltndLog.Errorf("Shutting down because error in main "+
×
160
                        "method: "+format, args...)
×
161
                return fmt.Errorf(format, args...)
×
162
        }
×
163

164
        // Show version at startup.
165
        ltndLog.Infof("Version: %s commit=%s, build=%s, logging=%s, "+
4✔
166
                "debuglevel=%s", build.Version(), build.Commit,
4✔
167
                build.Deployment, build.LoggingType, cfg.DebugLevel)
4✔
168

4✔
169
        var network string
4✔
170
        switch {
4✔
171
        case cfg.Bitcoin.TestNet3:
×
172
                network = "testnet"
×
173

174
        case cfg.Bitcoin.MainNet:
×
175
                network = "mainnet"
×
176

177
        case cfg.Bitcoin.SimNet:
×
178
                network = "simnet"
×
179

180
        case cfg.Bitcoin.RegTest:
4✔
181
                network = "regtest"
4✔
182

183
        case cfg.Bitcoin.SigNet:
×
184
                network = "signet"
×
185
        }
186

187
        ltndLog.Infof("Active chain: %v (network=%v)",
4✔
188
                strings.Title(BitcoinChainName), network,
4✔
189
        )
4✔
190

4✔
191
        ctx := context.Background()
4✔
192
        ctx, cancel := context.WithCancel(ctx)
4✔
193
        defer cancel()
4✔
194

4✔
195
        // Enable http profiling server if requested.
4✔
196
        if cfg.Profile != "" {
8✔
197
                // Create the http handler.
4✔
198
                pprofMux := http.NewServeMux()
4✔
199
                pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
4✔
200
                pprofMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
4✔
201
                pprofMux.HandleFunc("/debug/pprof/profile", pprof.Profile)
4✔
202
                pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
4✔
203
                pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
4✔
204

4✔
205
                if cfg.BlockingProfile != 0 {
4✔
206
                        runtime.SetBlockProfileRate(cfg.BlockingProfile)
×
207
                }
×
208
                if cfg.MutexProfile != 0 {
4✔
209
                        runtime.SetMutexProfileFraction(cfg.MutexProfile)
×
210
                }
×
211

212
                // Redirect all requests to the pprof handler, thus visiting
213
                // `127.0.0.1:6060` will be redirected to
214
                // `127.0.0.1:6060/debug/pprof`.
215
                pprofMux.Handle("/", http.RedirectHandler(
4✔
216
                        "/debug/pprof/", http.StatusSeeOther,
4✔
217
                ))
4✔
218

4✔
219
                ltndLog.Infof("Pprof listening on %v", cfg.Profile)
4✔
220

4✔
221
                // Create the pprof server.
4✔
222
                pprofServer := &http.Server{
4✔
223
                        Addr:              cfg.Profile,
4✔
224
                        Handler:           pprofMux,
4✔
225
                        ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
4✔
226
                }
4✔
227

4✔
228
                // Shut the server down when lnd is shutting down.
4✔
229
                defer func() {
8✔
230
                        ltndLog.Info("Stopping pprof server...")
4✔
231
                        err := pprofServer.Shutdown(ctx)
4✔
232
                        if err != nil {
4✔
233
                                ltndLog.Errorf("Stop pprof server got err: %v",
×
234
                                        err)
×
235
                        }
×
236
                }()
237

238
                // Start the pprof server.
239
                go func() {
8✔
240
                        err := pprofServer.ListenAndServe()
4✔
241
                        if err != nil && !errors.Is(err, http.ErrServerClosed) {
4✔
242
                                ltndLog.Errorf("Serving pprof got err: %v", err)
×
243
                        }
×
244
                }()
245
        }
246

247
        // Write cpu profile if requested.
248
        if cfg.CPUProfile != "" {
4✔
249
                f, err := os.Create(cfg.CPUProfile)
×
250
                if err != nil {
×
251
                        return mkErr("unable to create CPU profile: %v", err)
×
252
                }
×
253
                _ = runtimePprof.StartCPUProfile(f)
×
254
                defer func() {
×
255
                        _ = f.Close()
×
256
                }()
×
257
                defer runtimePprof.StopCPUProfile()
×
258
        }
259

260
        // Run configuration dependent DB pre-initialization. Note that this
261
        // needs to be done early and once during the startup process, before
262
        // any DB access.
263
        if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
4✔
264
                return mkErr("error initializing DBs: %v", err)
×
265
        }
×
266

267
        tlsManagerCfg := &TLSManagerCfg{
4✔
268
                TLSCertPath:        cfg.TLSCertPath,
4✔
269
                TLSKeyPath:         cfg.TLSKeyPath,
4✔
270
                TLSEncryptKey:      cfg.TLSEncryptKey,
4✔
271
                TLSExtraIPs:        cfg.TLSExtraIPs,
4✔
272
                TLSExtraDomains:    cfg.TLSExtraDomains,
4✔
273
                TLSAutoRefresh:     cfg.TLSAutoRefresh,
4✔
274
                TLSDisableAutofill: cfg.TLSDisableAutofill,
4✔
275
                TLSCertDuration:    cfg.TLSCertDuration,
4✔
276

4✔
277
                LetsEncryptDir:    cfg.LetsEncryptDir,
4✔
278
                LetsEncryptDomain: cfg.LetsEncryptDomain,
4✔
279
                LetsEncryptListen: cfg.LetsEncryptListen,
4✔
280

4✔
281
                DisableRestTLS: cfg.DisableRestTLS,
4✔
282

4✔
283
                HTTPHeaderTimeout: cfg.HTTPHeaderTimeout,
4✔
284
        }
4✔
285
        tlsManager := NewTLSManager(tlsManagerCfg)
4✔
286
        serverOpts, restDialOpts, restListen, cleanUp,
4✔
287
                err := tlsManager.SetCertificateBeforeUnlock()
4✔
288
        if err != nil {
4✔
289
                return mkErr("error setting cert before unlock: %v", err)
×
290
        }
×
291
        if cleanUp != nil {
8✔
292
                defer cleanUp()
4✔
293
        }
4✔
294

295
        // If we have chosen to start with a dedicated listener for the
296
        // rpc server, we set it directly.
297
        grpcListeners := append([]*ListenerWithSignal{}, lisCfg.RPCListeners...)
4✔
298
        if len(grpcListeners) == 0 {
8✔
299
                // Otherwise we create listeners from the RPCListeners defined
4✔
300
                // in the config.
4✔
301
                for _, grpcEndpoint := range cfg.RPCListeners {
8✔
302
                        // Start a gRPC server listening for HTTP/2
4✔
303
                        // connections.
4✔
304
                        lis, err := lncfg.ListenOnAddress(grpcEndpoint)
4✔
305
                        if err != nil {
4✔
306
                                return mkErr("unable to listen on %s: %v",
×
307
                                        grpcEndpoint, err)
×
308
                        }
×
309
                        defer lis.Close()
4✔
310

4✔
311
                        grpcListeners = append(
4✔
312
                                grpcListeners, &ListenerWithSignal{
4✔
313
                                        Listener: lis,
4✔
314
                                        Ready:    make(chan struct{}),
4✔
315
                                },
4✔
316
                        )
4✔
317
                }
318
        }
319

320
        // Create a new RPC interceptor that we'll add to the GRPC server. This
321
        // will be used to log the API calls invoked on the GRPC server.
322
        interceptorChain := rpcperms.NewInterceptorChain(
4✔
323
                rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
4✔
324
        )
4✔
325
        if err := interceptorChain.Start(); err != nil {
4✔
326
                return mkErr("error starting interceptor chain: %v", err)
×
327
        }
×
328
        defer func() {
8✔
329
                err := interceptorChain.Stop()
4✔
330
                if err != nil {
4✔
331
                        ltndLog.Warnf("error stopping RPC interceptor "+
×
332
                                "chain: %v", err)
×
333
                }
×
334
        }()
335

336
        // Allow the user to overwrite some defaults of the gRPC library related
337
        // to connection keepalive (server side and client side pings).
338
        serverKeepalive := keepalive.ServerParameters{
4✔
339
                Time:    cfg.GRPC.ServerPingTime,
4✔
340
                Timeout: cfg.GRPC.ServerPingTimeout,
4✔
341
        }
4✔
342
        clientKeepalive := keepalive.EnforcementPolicy{
4✔
343
                MinTime:             cfg.GRPC.ClientPingMinWait,
4✔
344
                PermitWithoutStream: cfg.GRPC.ClientAllowPingWithoutStream,
4✔
345
        }
4✔
346

4✔
347
        rpcServerOpts := interceptorChain.CreateServerOpts()
4✔
348
        serverOpts = append(serverOpts, rpcServerOpts...)
4✔
349
        serverOpts = append(
4✔
350
                serverOpts, grpc.MaxRecvMsgSize(lnrpc.MaxGrpcMsgSize),
4✔
351
                grpc.KeepaliveParams(serverKeepalive),
4✔
352
                grpc.KeepaliveEnforcementPolicy(clientKeepalive),
4✔
353
        )
4✔
354

4✔
355
        grpcServer := grpc.NewServer(serverOpts...)
4✔
356
        defer grpcServer.Stop()
4✔
357

4✔
358
        // We'll also register the RPC interceptor chain as the StateServer, as
4✔
359
        // it can be used to query for the current state of the wallet.
4✔
360
        lnrpc.RegisterStateServer(grpcServer, interceptorChain)
4✔
361

4✔
362
        // Initialize, and register our implementation of the gRPC interface
4✔
363
        // exported by the rpcServer.
4✔
364
        rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
4✔
365
        err = rpcServer.RegisterWithGrpcServer(grpcServer)
4✔
366
        if err != nil {
4✔
367
                return mkErr("error registering gRPC server: %v", err)
×
368
        }
×
369

370
        // Now that both the WalletUnlocker and LightningService have been
371
        // registered with the GRPC server, we can start listening.
372
        err = startGrpcListen(cfg, grpcServer, grpcListeners)
4✔
373
        if err != nil {
4✔
374
                return mkErr("error starting gRPC listener: %v", err)
×
375
        }
×
376

377
        // Now start the REST proxy for our gRPC server above. We'll ensure
378
        // we direct LND to connect to its loopback address rather than a
379
        // wildcard to prevent certificate issues when accessing the proxy
380
        // externally.
381
        stopProxy, err := startRestProxy(
4✔
382
                cfg, rpcServer, restDialOpts, restListen,
4✔
383
        )
4✔
384
        if err != nil {
4✔
385
                return mkErr("error starting REST proxy: %v", err)
×
386
        }
×
387
        defer stopProxy()
4✔
388

4✔
389
        // Start leader election if we're running on etcd. Continuation will be
4✔
390
        // blocked until this instance is elected as the current leader or
4✔
391
        // shutting down.
4✔
392
        elected := false
4✔
393
        var leaderElector cluster.LeaderElector
4✔
394
        if cfg.Cluster.EnableLeaderElection {
4✔
395
                electionCtx, cancelElection := context.WithCancel(ctx)
×
396

×
397
                go func() {
×
398
                        <-interceptor.ShutdownChannel()
×
399
                        cancelElection()
×
400
                }()
×
401

402
                ltndLog.Infof("Using %v leader elector",
×
403
                        cfg.Cluster.LeaderElector)
×
404

×
405
                leaderElector, err = cfg.Cluster.MakeLeaderElector(
×
406
                        electionCtx, cfg.DB,
×
407
                )
×
408
                if err != nil {
×
409
                        return err
×
410
                }
×
411

412
                defer func() {
×
413
                        if !elected {
×
414
                                return
×
415
                        }
×
416

417
                        ltndLog.Infof("Attempting to resign from leader role "+
×
418
                                "(%v)", cfg.Cluster.ID)
×
419

×
420
                        // Ensure that we don't block the shutdown process if
×
421
                        // the leader resigning process takes too long. The
×
422
                        // cluster will ensure that the leader is not elected
×
423
                        // again until the previous leader has resigned or the
×
424
                        // leader election timeout has passed.
×
425
                        timeoutCtx, cancel := context.WithTimeout(
×
426
                                ctx, leaderResignTimeout,
×
427
                        )
×
428
                        defer cancel()
×
429

×
430
                        if err := leaderElector.Resign(timeoutCtx); err != nil {
×
431
                                ltndLog.Errorf("Leader elector failed to "+
×
432
                                        "resign: %v", err)
×
433
                        }
×
434
                }()
435

436
                ltndLog.Infof("Starting leadership campaign (%v)",
×
437
                        cfg.Cluster.ID)
×
438

×
439
                if err := leaderElector.Campaign(electionCtx); err != nil {
×
440
                        return mkErr("leadership campaign failed: %v", err)
×
441
                }
×
442

443
                elected = true
×
444
                ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID)
×
445
        }
446

447
        dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
4✔
448
        switch {
4✔
449
        case err == channeldb.ErrDryRunMigrationOK:
×
450
                ltndLog.Infof("%v, exiting", err)
×
451
                return nil
×
452
        case err != nil:
×
453
                return mkErr("unable to open databases: %v", err)
×
454
        }
455

456
        defer cleanUp()
4✔
457

4✔
458
        partialChainControl, walletConfig, cleanUp, err := implCfg.BuildWalletConfig(
4✔
459
                ctx, dbs, &implCfg.AuxComponents, interceptorChain,
4✔
460
                grpcListeners,
4✔
461
        )
4✔
462
        if err != nil {
4✔
463
                return mkErr("error creating wallet config: %v", err)
×
464
        }
×
465

466
        defer cleanUp()
4✔
467

4✔
468
        activeChainControl, cleanUp, err := implCfg.BuildChainControl(
4✔
469
                partialChainControl, walletConfig,
4✔
470
        )
4✔
471
        if err != nil {
4✔
472
                return mkErr("error loading chain control: %v", err)
×
473
        }
×
474

475
        defer cleanUp()
4✔
476

4✔
477
        // TODO(roasbeef): add rotation
4✔
478
        idKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
4✔
479
                keychain.KeyLocator{
4✔
480
                        Family: keychain.KeyFamilyNodeKey,
4✔
481
                        Index:  0,
4✔
482
                },
4✔
483
        )
4✔
484
        if err != nil {
4✔
485
                return mkErr("error deriving node key: %v", err)
×
486
        }
×
487

488
        if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
4✔
489
                return errStreamIsolationWithProxySkip
×
490
        }
×
491

492
        if cfg.Tor.Active {
4✔
493
                if cfg.Tor.SkipProxyForClearNetTargets {
×
494
                        srvrLog.Info("Onion services are accessible via Tor! " +
×
495
                                "NOTE: Traffic to clearnet services is not " +
×
496
                                "routed via Tor.")
×
497
                } else {
×
498
                        srvrLog.Infof("Proxying all network traffic via Tor "+
×
499
                                "(stream_isolation=%v)! NOTE: Ensure the "+
×
500
                                "backend node is proxying over Tor as well",
×
501
                                cfg.Tor.StreamIsolation)
×
502
                }
×
503
        }
504

505
        // If tor is active and either v2 or v3 onion services have been
506
        // specified, make a tor controller and pass it into both the watchtower
507
        // server and the regular lnd server.
508
        var torController *tor.Controller
4✔
509
        if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) {
4✔
510
                torController = tor.NewController(
×
511
                        cfg.Tor.Control, cfg.Tor.TargetIPAddress,
×
512
                        cfg.Tor.Password,
×
513
                )
×
514

×
515
                // Start the tor controller before giving it to any other
×
516
                // subsystems.
×
517
                if err := torController.Start(); err != nil {
×
518
                        return mkErr("unable to initialize tor controller: %v",
×
519
                                err)
×
520
                }
×
521
                defer func() {
×
522
                        if err := torController.Stop(); err != nil {
×
523
                                ltndLog.Errorf("error stopping tor "+
×
524
                                        "controller: %v", err)
×
525
                        }
×
526
                }()
527
        }
528

529
        var tower *watchtower.Standalone
4✔
530
        if cfg.Watchtower.Active {
8✔
531
                towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey(
4✔
532
                        keychain.KeyLocator{
4✔
533
                                Family: keychain.KeyFamilyTowerID,
4✔
534
                                Index:  0,
4✔
535
                        },
4✔
536
                )
4✔
537
                if err != nil {
4✔
538
                        return mkErr("error deriving tower key: %v", err)
×
539
                }
×
540

541
                wtCfg := &watchtower.Config{
4✔
542
                        BlockFetcher:   activeChainControl.ChainIO,
4✔
543
                        DB:             dbs.TowerServerDB,
4✔
544
                        EpochRegistrar: activeChainControl.ChainNotifier,
4✔
545
                        Net:            cfg.net,
4✔
546
                        NewAddress: func() (btcutil.Address, error) {
4✔
547
                                return activeChainControl.Wallet.NewAddress(
×
548
                                        lnwallet.TaprootPubkey, false,
×
549
                                        lnwallet.DefaultAccountName,
×
550
                                )
×
551
                        },
×
552
                        NodeKeyECDH: keychain.NewPubKeyECDH(
553
                                towerKeyDesc, activeChainControl.KeyRing,
554
                        ),
555
                        PublishTx: activeChainControl.Wallet.PublishTransaction,
556
                        ChainHash: *cfg.ActiveNetParams.GenesisHash,
557
                }
558

559
                // If there is a tor controller (user wants auto hidden
560
                // services), then store a pointer in the watchtower config.
561
                if torController != nil {
4✔
562
                        wtCfg.TorController = torController
×
563
                        wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath
×
564
                        wtCfg.EncryptKey = cfg.Tor.EncryptKey
×
565
                        wtCfg.KeyRing = activeChainControl.KeyRing
×
566

×
567
                        switch {
×
568
                        case cfg.Tor.V2:
×
569
                                wtCfg.Type = tor.V2
×
570
                        case cfg.Tor.V3:
×
571
                                wtCfg.Type = tor.V3
×
572
                        }
573
                }
574

575
                wtConfig, err := cfg.Watchtower.Apply(
4✔
576
                        wtCfg, lncfg.NormalizeAddresses,
4✔
577
                )
4✔
578
                if err != nil {
4✔
579
                        return mkErr("unable to configure watchtower: %v", err)
×
580
                }
×
581

582
                tower, err = watchtower.New(wtConfig)
4✔
583
                if err != nil {
4✔
584
                        return mkErr("unable to create watchtower: %v", err)
×
585
                }
×
586
        }
587

588
        // Initialize the MultiplexAcceptor. If lnd was started with the
589
        // zero-conf feature bit, then this will be a ZeroConfAcceptor.
590
        // Otherwise, this will be a ChainedAcceptor.
591
        var multiAcceptor chanacceptor.MultiplexAcceptor
4✔
592
        if cfg.ProtocolOptions.ZeroConf() {
8✔
593
                multiAcceptor = chanacceptor.NewZeroConfAcceptor()
4✔
594
        } else {
8✔
595
                multiAcceptor = chanacceptor.NewChainedAcceptor()
4✔
596
        }
4✔
597

598
        // Set up the core server which will listen for incoming peer
599
        // connections.
600
        server, err := newServer(
4✔
601
                cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc,
4✔
602
                activeChainControl.Cfg.WalletUnlockParams.ChansToRestore,
4✔
603
                multiAcceptor, torController, tlsManager, leaderElector,
4✔
604
                implCfg,
4✔
605
        )
4✔
606
        if err != nil {
4✔
607
                return mkErr("unable to create server: %v", err)
×
608
        }
×
609

610
        // Set up an autopilot manager from the current config. This will be
611
        // used to manage the underlying autopilot agent, starting and stopping
612
        // it at will.
613
        atplCfg, err := initAutoPilot(
4✔
614
                server, cfg.Autopilot, activeChainControl.MinHtlcIn,
4✔
615
                cfg.ActiveNetParams,
4✔
616
        )
4✔
617
        if err != nil {
4✔
618
                return mkErr("unable to initialize autopilot: %v", err)
×
619
        }
×
620

621
        atplManager, err := autopilot.NewManager(atplCfg)
4✔
622
        if err != nil {
4✔
623
                return mkErr("unable to create autopilot manager: %v", err)
×
624
        }
×
625
        if err := atplManager.Start(); err != nil {
4✔
626
                return mkErr("unable to start autopilot manager: %v", err)
×
627
        }
×
628
        defer atplManager.Stop()
4✔
629

4✔
630
        err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
4✔
631
        if err != nil {
4✔
632
                return mkErr("unable to load permanent TLS certificate: %v",
×
633
                        err)
×
634
        }
×
635

636
        // Now we have created all dependencies necessary to populate and
637
        // start the RPC server.
638
        err = rpcServer.addDeps(
4✔
639
                server, interceptorChain.MacaroonService(), cfg.SubRPCServers,
4✔
640
                atplManager, server.invoices, tower, multiAcceptor,
4✔
641
                server.invoiceHtlcModifier,
4✔
642
        )
4✔
643
        if err != nil {
4✔
644
                return mkErr("unable to add deps to RPC server: %v", err)
×
645
        }
×
646
        if err := rpcServer.Start(); err != nil {
4✔
647
                return mkErr("unable to start RPC server: %v", err)
×
648
        }
×
649
        defer rpcServer.Stop()
4✔
650

4✔
651
        // We transition the RPC state to Active, as the RPC server is up.
4✔
652
        interceptorChain.SetRPCActive()
4✔
653

4✔
654
        if err := interceptor.Notifier.NotifyReady(true); err != nil {
4✔
655
                return mkErr("error notifying ready: %v", err)
×
656
        }
×
657

658
        // We'll wait until we're fully synced to continue the start up of the
659
        // remainder of the daemon. This ensures that we don't accept any
660
        // possibly invalid state transitions, or accept channels with spent
661
        // funds.
662
        _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
4✔
663
        if err != nil {
4✔
664
                return mkErr("unable to determine chain tip: %v", err)
×
665
        }
×
666

667
        ltndLog.Infof("Waiting for chain backend to finish sync, "+
4✔
668
                "start_height=%v", bestHeight)
4✔
669

4✔
670
        type syncResult struct {
4✔
671
                synced        bool
4✔
672
                bestBlockTime int64
4✔
673
                err           error
4✔
674
        }
4✔
675

4✔
676
        var syncedResChan = make(chan syncResult, 1)
4✔
677

4✔
678
        for {
8✔
679
                // We check if the wallet is synced in a separate goroutine as
4✔
680
                // the call is blocking, and we want to be able to interrupt it
4✔
681
                // if the daemon is shutting down.
4✔
682
                go func() {
8✔
683
                        synced, bestBlockTime, err := activeChainControl.Wallet.
4✔
684
                                IsSynced()
4✔
685
                        syncedResChan <- syncResult{synced, bestBlockTime, err}
4✔
686
                }()
4✔
687

688
                select {
4✔
UNCOV
689
                case <-interceptor.ShutdownChannel():
×
UNCOV
690
                        return nil
×
691

692
                case res := <-syncedResChan:
4✔
693
                        if res.err != nil {
4✔
694
                                return mkErr("unable to determine if wallet "+
×
UNCOV
695
                                        "is synced: %v", res.err)
×
UNCOV
696
                        }
×
697

698
                        ltndLog.Debugf("Syncing to block timestamp: %v, is "+
4✔
699
                                "synced=%v", time.Unix(res.bestBlockTime, 0),
4✔
700
                                res.synced)
4✔
701

4✔
702
                        if res.synced {
8✔
703
                                break
4✔
704
                        }
705

706
                        // If we're not yet synced, we'll wait for a second
707
                        // before checking again.
708
                        select {
4✔
UNCOV
709
                        case <-interceptor.ShutdownChannel():
×
UNCOV
710
                                return nil
×
711

712
                        case <-time.After(time.Second):
4✔
713
                                continue
4✔
714
                        }
715
                }
716

717
                break
4✔
718
        }
719

720
        _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
4✔
721
        if err != nil {
4✔
722
                return mkErr("unable to determine chain tip: %v", err)
×
UNCOV
723
        }
×
724

725
        ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
4✔
726
                bestHeight)
4✔
727

4✔
728
        // With all the relevant chains initialized, we can finally start the
4✔
729
        // server itself. We start the server in an asynchronous goroutine so
4✔
730
        // that we are able to interrupt and shutdown the daemon gracefully in
4✔
731
        // case the startup of the subservers do not behave as expected.
4✔
732
        errChan := make(chan error)
4✔
733
        go func() {
8✔
734
                errChan <- server.Start()
4✔
735
        }()
4✔
736

737
        defer func() {
8✔
738
                err := server.Stop()
4✔
739
                if err != nil {
4✔
UNCOV
740
                        ltndLog.Warnf("Stopping the server including all "+
×
UNCOV
741
                                "its subsystems failed with %v", err)
×
742
                }
×
743
        }()
744

745
        select {
4✔
746
        case err := <-errChan:
4✔
747
                if err == nil {
8✔
748
                        break
4✔
749
                }
750

UNCOV
751
                return mkErr("unable to start server: %v", err)
×
752

UNCOV
753
        case <-interceptor.ShutdownChannel():
×
UNCOV
754
                return nil
×
755
        }
756

757
        // We transition the server state to Active, as the server is up.
758
        interceptorChain.SetServerActive()
4✔
759

4✔
760
        // Now that the server has started, if the autopilot mode is currently
4✔
761
        // active, then we'll start the autopilot agent immediately. It will be
4✔
762
        // stopped together with the autopilot service.
4✔
763
        if cfg.Autopilot.Active {
4✔
UNCOV
764
                if err := atplManager.StartAgent(); err != nil {
×
UNCOV
765
                        return mkErr("unable to start autopilot agent: %v", err)
×
UNCOV
766
                }
×
767
        }
768

769
        if cfg.Watchtower.Active {
8✔
770
                if err := tower.Start(); err != nil {
4✔
UNCOV
771
                        return mkErr("unable to start watchtower: %v", err)
×
UNCOV
772
                }
×
773
                defer tower.Stop()
4✔
774
        }
775

776
        // Wait for shutdown signal from either a graceful server stop or from
777
        // the interrupt handler.
778
        <-interceptor.ShutdownChannel()
4✔
779
        return nil
4✔
780
}
781

782
// bakeMacaroon creates a new macaroon with newest version and the given
783
// permissions then returns it binary serialized.
784
func bakeMacaroon(ctx context.Context, svc *macaroons.Service,
785
        permissions []bakery.Op) ([]byte, error) {
4✔
786

4✔
787
        mac, err := svc.NewMacaroon(
4✔
788
                ctx, macaroons.DefaultRootKeyID, permissions...,
4✔
789
        )
4✔
790
        if err != nil {
4✔
UNCOV
791
                return nil, err
×
UNCOV
792
        }
×
793

794
        return mac.M().MarshalBinary()
4✔
795
}
796

797
// saveMacaroon bakes a macaroon with the specified macaroon permissions and
798
// writes it to a file with the given filename and file permissions.
799
func saveMacaroon(ctx context.Context, svc *macaroons.Service, filename string,
800
        macaroonPermissions []bakery.Op, filePermissions os.FileMode) error {
4✔
801

4✔
802
        macaroonBytes, err := bakeMacaroon(ctx, svc, macaroonPermissions)
4✔
803
        if err != nil {
4✔
804
                return err
×
UNCOV
805
        }
×
806
        err = os.WriteFile(filename, macaroonBytes, filePermissions)
4✔
807
        if err != nil {
4✔
UNCOV
808
                _ = os.Remove(filename)
×
UNCOV
809
                return err
×
UNCOV
810
        }
×
811

812
        return nil
4✔
813
}
814

815
// genDefaultMacaroons checks for three default macaroon files and generates
816
// them if they do not exist; one admin-level, one for invoice access and one
817
// read-only. Each macaroon is checked and created independently to ensure all
818
// three exist. The admin macaroon can also be used to generate more granular
819
// macaroons.
820
func genDefaultMacaroons(ctx context.Context, svc *macaroons.Service,
821
        admFile, roFile, invoiceFile string) error {
4✔
822

4✔
823
        // First, we'll generate a macaroon that only allows the caller to
4✔
824
        // access invoice related calls. This is useful for merchants and other
4✔
825
        // services to allow an isolated instance that can only query and
4✔
826
        // modify invoices.
4✔
827
        if !lnrpc.FileExists(invoiceFile) {
8✔
828
                err := saveMacaroon(
4✔
829
                        ctx, svc, invoiceFile, invoicePermissions, 0644,
4✔
830
                )
4✔
831
                if err != nil {
4✔
UNCOV
832
                        return err
×
UNCOV
833
                }
×
834
        }
835

836
        // Generate the read-only macaroon and write it to a file.
837
        if !lnrpc.FileExists(roFile) {
8✔
838
                err := saveMacaroon(
4✔
839
                        ctx, svc, roFile, readPermissions, 0644,
4✔
840
                )
4✔
841
                if err != nil {
4✔
UNCOV
842
                        return err
×
UNCOV
843
                }
×
844
        }
845

846
        // Generate the admin macaroon and write it to a file.
847
        if !lnrpc.FileExists(admFile) {
8✔
848
                err := saveMacaroon(
4✔
849
                        ctx, svc, admFile, adminPermissions(),
4✔
850
                        adminMacaroonFilePermissions,
4✔
851
                )
4✔
852
                if err != nil {
4✔
UNCOV
853
                        return err
×
UNCOV
854
                }
×
855
        }
856

857
        return nil
4✔
858
}
859

860
// adminPermissions returns a list of all permissions in a safe way that doesn't
861
// modify any of the source lists.
862
func adminPermissions() []bakery.Op {
4✔
863
        admin := make([]bakery.Op, len(readPermissions)+len(writePermissions))
4✔
864
        copy(admin[:len(readPermissions)], readPermissions)
4✔
865
        copy(admin[len(readPermissions):], writePermissions)
4✔
866
        return admin
4✔
867
}
4✔
868

869
// createWalletUnlockerService creates a WalletUnlockerService from the passed
870
// config.
871
func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService {
4✔
872
        // The macaroonFiles are passed to the wallet unlocker so they can be
4✔
873
        // deleted and recreated in case the root macaroon key is also changed
4✔
874
        // during the change password operation.
4✔
875
        macaroonFiles := []string{
4✔
876
                cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath,
4✔
877
        }
4✔
878

4✔
879
        return walletunlocker.New(
4✔
880
                cfg.ActiveNetParams.Params, macaroonFiles,
4✔
881
                cfg.ResetWalletTransactions, nil,
4✔
882
        )
4✔
883
}
4✔
884

885
// startGrpcListen starts the GRPC server on the passed listeners.
886
func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
887
        listeners []*ListenerWithSignal) error {
4✔
888

4✔
889
        // Use a WaitGroup so we can be sure the instructions on how to input the
4✔
890
        // password is the last thing to be printed to the console.
4✔
891
        var wg sync.WaitGroup
4✔
892

4✔
893
        for _, lis := range listeners {
8✔
894
                wg.Add(1)
4✔
895
                go func(lis *ListenerWithSignal) {
8✔
896
                        rpcsLog.Infof("RPC server listening on %s", lis.Addr())
4✔
897

4✔
898
                        // Close the ready chan to indicate we are listening.
4✔
899
                        close(lis.Ready)
4✔
900

4✔
901
                        wg.Done()
4✔
902
                        _ = grpcServer.Serve(lis)
4✔
903
                }(lis)
4✔
904
        }
905

906
        // If Prometheus monitoring is enabled, start the Prometheus exporter.
907
        if cfg.Prometheus.Enabled() {
4✔
908
                err := monitoring.ExportPrometheusMetrics(
×
909
                        grpcServer, cfg.Prometheus,
×
910
                )
×
911
                if err != nil {
×
UNCOV
912
                        return err
×
UNCOV
913
                }
×
914
        }
915

916
        // Wait for gRPC servers to be up running.
917
        wg.Wait()
4✔
918

4✔
919
        return nil
4✔
920
}
921

922
// startRestProxy starts the given REST proxy on the listeners found in the
923
// config.
924
func startRestProxy(cfg *Config, rpcServer *rpcServer, restDialOpts []grpc.DialOption,
925
        restListen func(net.Addr) (net.Listener, error)) (func(), error) {
4✔
926

4✔
927
        // We use the first RPC listener as the destination for our REST proxy.
4✔
928
        // If the listener is set to listen on all interfaces, we replace it
4✔
929
        // with localhost, as we cannot dial it directly.
4✔
930
        restProxyDest := cfg.RPCListeners[0].String()
4✔
931
        switch {
4✔
UNCOV
932
        case strings.Contains(restProxyDest, "0.0.0.0"):
×
UNCOV
933
                restProxyDest = strings.Replace(
×
UNCOV
934
                        restProxyDest, "0.0.0.0", "127.0.0.1", 1,
×
UNCOV
935
                )
×
936

UNCOV
937
        case strings.Contains(restProxyDest, "[::]"):
×
UNCOV
938
                restProxyDest = strings.Replace(
×
UNCOV
939
                        restProxyDest, "[::]", "[::1]", 1,
×
UNCOV
940
                )
×
941
        }
942

943
        var shutdownFuncs []func()
4✔
944
        shutdown := func() {
8✔
945
                for _, shutdownFn := range shutdownFuncs {
8✔
946
                        shutdownFn()
4✔
947
                }
4✔
948
        }
949

950
        // Start a REST proxy for our gRPC server.
951
        ctx := context.Background()
4✔
952
        ctx, cancel := context.WithCancel(ctx)
4✔
953
        shutdownFuncs = append(shutdownFuncs, cancel)
4✔
954

4✔
955
        // We'll set up a proxy that will forward REST calls to the GRPC
4✔
956
        // server.
4✔
957
        //
4✔
958
        // The default JSON marshaler of the REST proxy only sets OrigName to
4✔
959
        // true, which instructs it to use the same field names as specified in
4✔
960
        // the proto file and not switch to camel case. What we also want is
4✔
961
        // that the marshaler prints all values, even if they are falsey.
4✔
962
        customMarshalerOption := proxy.WithMarshalerOption(
4✔
963
                proxy.MIMEWildcard, &proxy.JSONPb{
4✔
964
                        MarshalOptions:   *lnrpc.RESTJsonMarshalOpts,
4✔
965
                        UnmarshalOptions: *lnrpc.RESTJsonUnmarshalOpts,
4✔
966
                },
4✔
967
        )
4✔
968
        mux := proxy.NewServeMux(
4✔
969
                customMarshalerOption,
4✔
970

4✔
971
                // Don't allow falling back to other HTTP methods, we want exact
4✔
972
                // matches only. The actual method to be used can be overwritten
4✔
973
                // by setting X-HTTP-Method-Override so there should be no
4✔
974
                // reason for not specifying the correct method in the first
4✔
975
                // place.
4✔
976
                proxy.WithDisablePathLengthFallback(),
4✔
977
        )
4✔
978

4✔
979
        // Register our services with the REST proxy.
4✔
980
        err := rpcServer.RegisterWithRestProxy(
4✔
981
                ctx, mux, restDialOpts, restProxyDest,
4✔
982
        )
4✔
983
        if err != nil {
4✔
UNCOV
984
                return nil, err
×
UNCOV
985
        }
×
986

987
        // Wrap the default grpc-gateway handler with the WebSocket handler.
988
        restHandler := lnrpc.NewWebSocketProxy(
4✔
989
                mux, rpcsLog, cfg.WSPingInterval, cfg.WSPongWait,
4✔
990
                lnrpc.LndClientStreamingURIs,
4✔
991
        )
4✔
992

4✔
993
        // Use a WaitGroup so we can be sure the instructions on how to input the
4✔
994
        // password is the last thing to be printed to the console.
4✔
995
        var wg sync.WaitGroup
4✔
996

4✔
997
        // Now spin up a network listener for each requested port and start a
4✔
998
        // goroutine that serves REST with the created mux there.
4✔
999
        for _, restEndpoint := range cfg.RESTListeners {
8✔
1000
                lis, err := restListen(restEndpoint)
4✔
1001
                if err != nil {
4✔
UNCOV
1002
                        ltndLog.Errorf("gRPC proxy unable to listen on %s",
×
UNCOV
1003
                                restEndpoint)
×
UNCOV
1004
                        return nil, err
×
UNCOV
1005
                }
×
1006

1007
                shutdownFuncs = append(shutdownFuncs, func() {
8✔
1008
                        err := lis.Close()
4✔
1009
                        if err != nil {
4✔
UNCOV
1010
                                rpcsLog.Errorf("Error closing listener: %v",
×
UNCOV
1011
                                        err)
×
UNCOV
1012
                        }
×
1013
                })
1014

1015
                wg.Add(1)
4✔
1016
                go func() {
8✔
1017
                        rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
4✔
1018

4✔
1019
                        // Create our proxy chain now. A request will pass
4✔
1020
                        // through the following chain:
4✔
1021
                        // req ---> CORS handler --> WS proxy --->
4✔
1022
                        //   REST proxy --> gRPC endpoint
4✔
1023
                        corsHandler := allowCORS(restHandler, cfg.RestCORS)
4✔
1024

4✔
1025
                        wg.Done()
4✔
1026
                        err := http.Serve(lis, corsHandler)
4✔
1027
                        if err != nil && !lnrpc.IsClosedConnError(err) {
4✔
UNCOV
1028
                                rpcsLog.Error(err)
×
UNCOV
1029
                        }
×
1030
                }()
1031
        }
1032

1033
        // Wait for REST servers to be up running.
1034
        wg.Wait()
4✔
1035

4✔
1036
        return shutdown, nil
4✔
1037
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc