• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13315131746

13 Feb 2025 07:03PM UTC coverage: 49.357% (-0.007%) from 49.364%
13315131746

push

github

web-flow
Merge pull request #9507 from saubyk/18.5-releasenotes-patch

Update release-notes-0.18.5.md

100764 of 204155 relevant lines covered (49.36%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

70.03
/rpcserver.go
1
package lnd
2

3
import (
4
        "bytes"
5
        "context"
6
        "encoding/hex"
7
        "errors"
8
        "fmt"
9
        "io"
10
        "math"
11
        "net"
12
        "net/http"
13
        "os"
14
        "path/filepath"
15
        "runtime"
16
        "sort"
17
        "strconv"
18
        "strings"
19
        "sync"
20
        "sync/atomic"
21
        "time"
22

23
        "github.com/btcsuite/btcd/blockchain"
24
        "github.com/btcsuite/btcd/btcec/v2"
25
        "github.com/btcsuite/btcd/btcec/v2/ecdsa"
26
        "github.com/btcsuite/btcd/btcutil"
27
        "github.com/btcsuite/btcd/btcutil/psbt"
28
        "github.com/btcsuite/btcd/chaincfg"
29
        "github.com/btcsuite/btcd/chaincfg/chainhash"
30
        "github.com/btcsuite/btcd/txscript"
31
        "github.com/btcsuite/btcd/wire"
32
        "github.com/btcsuite/btcwallet/waddrmgr"
33
        "github.com/btcsuite/btcwallet/wallet"
34
        "github.com/btcsuite/btcwallet/wallet/txauthor"
35
        "github.com/davecgh/go-spew/spew"
36
        proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
37
        "github.com/lightningnetwork/lnd/autopilot"
38
        "github.com/lightningnetwork/lnd/build"
39
        "github.com/lightningnetwork/lnd/chainreg"
40
        "github.com/lightningnetwork/lnd/chanacceptor"
41
        "github.com/lightningnetwork/lnd/chanbackup"
42
        "github.com/lightningnetwork/lnd/chanfitness"
43
        "github.com/lightningnetwork/lnd/channeldb"
44
        "github.com/lightningnetwork/lnd/channelnotifier"
45
        "github.com/lightningnetwork/lnd/clock"
46
        "github.com/lightningnetwork/lnd/contractcourt"
47
        "github.com/lightningnetwork/lnd/discovery"
48
        "github.com/lightningnetwork/lnd/feature"
49
        "github.com/lightningnetwork/lnd/fn/v2"
50
        "github.com/lightningnetwork/lnd/funding"
51
        "github.com/lightningnetwork/lnd/graph"
52
        graphdb "github.com/lightningnetwork/lnd/graph/db"
53
        "github.com/lightningnetwork/lnd/graph/db/models"
54
        "github.com/lightningnetwork/lnd/graph/graphsession"
55
        "github.com/lightningnetwork/lnd/htlcswitch"
56
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
57
        "github.com/lightningnetwork/lnd/input"
58
        "github.com/lightningnetwork/lnd/invoices"
59
        "github.com/lightningnetwork/lnd/keychain"
60
        "github.com/lightningnetwork/lnd/kvdb"
61
        "github.com/lightningnetwork/lnd/labels"
62
        "github.com/lightningnetwork/lnd/lncfg"
63
        "github.com/lightningnetwork/lnd/lnrpc"
64
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
65
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
66
        "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
67
        "github.com/lightningnetwork/lnd/lntypes"
68
        "github.com/lightningnetwork/lnd/lnutils"
69
        "github.com/lightningnetwork/lnd/lnwallet"
70
        "github.com/lightningnetwork/lnd/lnwallet/btcwallet"
71
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
72
        "github.com/lightningnetwork/lnd/lnwallet/chancloser"
73
        "github.com/lightningnetwork/lnd/lnwallet/chanfunding"
74
        "github.com/lightningnetwork/lnd/lnwire"
75
        "github.com/lightningnetwork/lnd/macaroons"
76
        "github.com/lightningnetwork/lnd/peer"
77
        "github.com/lightningnetwork/lnd/peernotifier"
78
        "github.com/lightningnetwork/lnd/record"
79
        "github.com/lightningnetwork/lnd/routing"
80
        "github.com/lightningnetwork/lnd/routing/blindedpath"
81
        "github.com/lightningnetwork/lnd/routing/route"
82
        "github.com/lightningnetwork/lnd/rpcperms"
83
        "github.com/lightningnetwork/lnd/signal"
84
        "github.com/lightningnetwork/lnd/sweep"
85
        "github.com/lightningnetwork/lnd/tlv"
86
        "github.com/lightningnetwork/lnd/watchtower"
87
        "github.com/lightningnetwork/lnd/zpay32"
88
        "github.com/tv42/zbase32"
89
        "google.golang.org/grpc"
90
        "google.golang.org/grpc/codes"
91
        "google.golang.org/grpc/status"
92
        "google.golang.org/protobuf/proto"
93
        "gopkg.in/macaroon-bakery.v2/bakery"
94
)
95

96
const (
97
        // defaultNumBlocksEstimate is the number of blocks that we fall back
98
        // to issuing an estimate for if a fee pre fence doesn't specify an
99
        // explicit conf target or fee rate.
100
        defaultNumBlocksEstimate = 6
101
)
102

103
var (
104
        // readPermissions is a slice of all entities that allow read
105
        // permissions for authorization purposes, all lowercase.
106
        readPermissions = []bakery.Op{
107
                {
108
                        Entity: "onchain",
109
                        Action: "read",
110
                },
111
                {
112
                        Entity: "offchain",
113
                        Action: "read",
114
                },
115
                {
116
                        Entity: "address",
117
                        Action: "read",
118
                },
119
                {
120
                        Entity: "message",
121
                        Action: "read",
122
                },
123
                {
124
                        Entity: "peers",
125
                        Action: "read",
126
                },
127
                {
128
                        Entity: "info",
129
                        Action: "read",
130
                },
131
                {
132
                        Entity: "invoices",
133
                        Action: "read",
134
                },
135
                {
136
                        Entity: "signer",
137
                        Action: "read",
138
                },
139
                {
140
                        Entity: "macaroon",
141
                        Action: "read",
142
                },
143
        }
144

145
        // writePermissions is a slice of all entities that allow write
146
        // permissions for authorization purposes, all lowercase.
147
        writePermissions = []bakery.Op{
148
                {
149
                        Entity: "onchain",
150
                        Action: "write",
151
                },
152
                {
153
                        Entity: "offchain",
154
                        Action: "write",
155
                },
156
                {
157
                        Entity: "address",
158
                        Action: "write",
159
                },
160
                {
161
                        Entity: "message",
162
                        Action: "write",
163
                },
164
                {
165
                        Entity: "peers",
166
                        Action: "write",
167
                },
168
                {
169
                        Entity: "info",
170
                        Action: "write",
171
                },
172
                {
173
                        Entity: "invoices",
174
                        Action: "write",
175
                },
176
                {
177
                        Entity: "signer",
178
                        Action: "generate",
179
                },
180
                {
181
                        Entity: "macaroon",
182
                        Action: "generate",
183
                },
184
                {
185
                        Entity: "macaroon",
186
                        Action: "write",
187
                },
188
        }
189

190
        // invoicePermissions is a slice of all the entities that allows a user
191
        // to only access calls that are related to invoices, so: streaming
192
        // RPCs, generating, and listening invoices.
193
        invoicePermissions = []bakery.Op{
194
                {
195
                        Entity: "invoices",
196
                        Action: "read",
197
                },
198
                {
199
                        Entity: "invoices",
200
                        Action: "write",
201
                },
202
                {
203
                        Entity: "address",
204
                        Action: "read",
205
                },
206
                {
207
                        Entity: "address",
208
                        Action: "write",
209
                },
210
                {
211
                        Entity: "onchain",
212
                        Action: "read",
213
                },
214
        }
215

216
        // TODO(guggero): Refactor into constants that are used for all
217
        // permissions in this file. Also expose the list of possible
218
        // permissions in an RPC when per RPC permissions are
219
        // implemented.
220
        validActions  = []string{"read", "write", "generate"}
221
        validEntities = []string{
222
                "onchain", "offchain", "address", "message",
223
                "peers", "info", "invoices", "signer", "macaroon",
224
                macaroons.PermissionEntityCustomURI,
225
        }
226

227
        // If the --no-macaroons flag is used to start lnd, the macaroon service
228
        // is not initialized. errMacaroonDisabled is then returned when
229
        // macaroon related services are used.
230
        errMacaroonDisabled = fmt.Errorf("macaroon authentication disabled, " +
231
                "remove --no-macaroons flag to enable")
232
)
233

234
// stringInSlice returns true if a string is contained in the given slice.
235
func stringInSlice(a string, slice []string) bool {
3✔
236
        for _, b := range slice {
6✔
237
                if b == a {
6✔
238
                        return true
3✔
239
                }
3✔
240
        }
241
        return false
3✔
242
}
243

244
// GetAllPermissions returns all the permissions required to interact with lnd.
245
func GetAllPermissions() []bakery.Op {
×
246
        allPerms := make([]bakery.Op, 0)
×
247

×
248
        // The map will help keep track of which specific permission pairs have
×
249
        // already been added to the slice.
×
250
        allPermsMap := make(map[string]map[string]struct{})
×
251

×
252
        for _, perms := range MainRPCServerPermissions() {
×
253
                for _, perm := range perms {
×
254
                        entity := perm.Entity
×
255
                        action := perm.Action
×
256

×
257
                        // If this specific entity-action permission pair isn't
×
258
                        // in the map yet. Add it to map, and the permission
×
259
                        // slice.
×
260
                        if acts, ok := allPermsMap[entity]; ok {
×
261
                                if _, ok := acts[action]; !ok {
×
262
                                        allPermsMap[entity][action] = struct{}{}
×
263

×
264
                                        allPerms = append(
×
265
                                                allPerms, perm,
×
266
                                        )
×
267
                                }
×
268
                        } else {
×
269
                                allPermsMap[entity] = make(map[string]struct{})
×
270
                                allPermsMap[entity][action] = struct{}{}
×
271
                                allPerms = append(allPerms, perm)
×
272
                        }
×
273
                }
274
        }
275

276
        return allPerms
×
277
}
278

279
// MainRPCServerPermissions returns a mapping of the main RPC server calls to
280
// the permissions they require.
281
func MainRPCServerPermissions() map[string][]bakery.Op {
3✔
282
        return map[string][]bakery.Op{
3✔
283
                "/lnrpc.Lightning/SendCoins": {{
3✔
284
                        Entity: "onchain",
3✔
285
                        Action: "write",
3✔
286
                }},
3✔
287
                "/lnrpc.Lightning/ListUnspent": {{
3✔
288
                        Entity: "onchain",
3✔
289
                        Action: "read",
3✔
290
                }},
3✔
291
                "/lnrpc.Lightning/SendMany": {{
3✔
292
                        Entity: "onchain",
3✔
293
                        Action: "write",
3✔
294
                }},
3✔
295
                "/lnrpc.Lightning/NewAddress": {{
3✔
296
                        Entity: "address",
3✔
297
                        Action: "write",
3✔
298
                }},
3✔
299
                "/lnrpc.Lightning/SignMessage": {{
3✔
300
                        Entity: "message",
3✔
301
                        Action: "write",
3✔
302
                }},
3✔
303
                "/lnrpc.Lightning/VerifyMessage": {{
3✔
304
                        Entity: "message",
3✔
305
                        Action: "read",
3✔
306
                }},
3✔
307
                "/lnrpc.Lightning/ConnectPeer": {{
3✔
308
                        Entity: "peers",
3✔
309
                        Action: "write",
3✔
310
                }},
3✔
311
                "/lnrpc.Lightning/DisconnectPeer": {{
3✔
312
                        Entity: "peers",
3✔
313
                        Action: "write",
3✔
314
                }},
3✔
315
                "/lnrpc.Lightning/OpenChannel": {{
3✔
316
                        Entity: "onchain",
3✔
317
                        Action: "write",
3✔
318
                }, {
3✔
319
                        Entity: "offchain",
3✔
320
                        Action: "write",
3✔
321
                }},
3✔
322
                "/lnrpc.Lightning/BatchOpenChannel": {{
3✔
323
                        Entity: "onchain",
3✔
324
                        Action: "write",
3✔
325
                }, {
3✔
326
                        Entity: "offchain",
3✔
327
                        Action: "write",
3✔
328
                }},
3✔
329
                "/lnrpc.Lightning/OpenChannelSync": {{
3✔
330
                        Entity: "onchain",
3✔
331
                        Action: "write",
3✔
332
                }, {
3✔
333
                        Entity: "offchain",
3✔
334
                        Action: "write",
3✔
335
                }},
3✔
336
                "/lnrpc.Lightning/CloseChannel": {{
3✔
337
                        Entity: "onchain",
3✔
338
                        Action: "write",
3✔
339
                }, {
3✔
340
                        Entity: "offchain",
3✔
341
                        Action: "write",
3✔
342
                }},
3✔
343
                "/lnrpc.Lightning/AbandonChannel": {{
3✔
344
                        Entity: "offchain",
3✔
345
                        Action: "write",
3✔
346
                }},
3✔
347
                "/lnrpc.Lightning/GetInfo": {{
3✔
348
                        Entity: "info",
3✔
349
                        Action: "read",
3✔
350
                }},
3✔
351
                "/lnrpc.Lightning/GetDebugInfo": {{
3✔
352
                        Entity: "info",
3✔
353
                        Action: "read",
3✔
354
                }, {
3✔
355
                        Entity: "offchain",
3✔
356
                        Action: "read",
3✔
357
                }, {
3✔
358
                        Entity: "onchain",
3✔
359
                        Action: "read",
3✔
360
                }, {
3✔
361
                        Entity: "peers",
3✔
362
                        Action: "read",
3✔
363
                }},
3✔
364
                "/lnrpc.Lightning/GetRecoveryInfo": {{
3✔
365
                        Entity: "info",
3✔
366
                        Action: "read",
3✔
367
                }},
3✔
368
                "/lnrpc.Lightning/ListPeers": {{
3✔
369
                        Entity: "peers",
3✔
370
                        Action: "read",
3✔
371
                }},
3✔
372
                "/lnrpc.Lightning/WalletBalance": {{
3✔
373
                        Entity: "onchain",
3✔
374
                        Action: "read",
3✔
375
                }},
3✔
376
                "/lnrpc.Lightning/EstimateFee": {{
3✔
377
                        Entity: "onchain",
3✔
378
                        Action: "read",
3✔
379
                }},
3✔
380
                "/lnrpc.Lightning/ChannelBalance": {{
3✔
381
                        Entity: "offchain",
3✔
382
                        Action: "read",
3✔
383
                }},
3✔
384
                "/lnrpc.Lightning/PendingChannels": {{
3✔
385
                        Entity: "offchain",
3✔
386
                        Action: "read",
3✔
387
                }},
3✔
388
                "/lnrpc.Lightning/ListChannels": {{
3✔
389
                        Entity: "offchain",
3✔
390
                        Action: "read",
3✔
391
                }},
3✔
392
                "/lnrpc.Lightning/SubscribeChannelEvents": {{
3✔
393
                        Entity: "offchain",
3✔
394
                        Action: "read",
3✔
395
                }},
3✔
396
                "/lnrpc.Lightning/ClosedChannels": {{
3✔
397
                        Entity: "offchain",
3✔
398
                        Action: "read",
3✔
399
                }},
3✔
400
                "/lnrpc.Lightning/SendPayment": {{
3✔
401
                        Entity: "offchain",
3✔
402
                        Action: "write",
3✔
403
                }},
3✔
404
                "/lnrpc.Lightning/SendPaymentSync": {{
3✔
405
                        Entity: "offchain",
3✔
406
                        Action: "write",
3✔
407
                }},
3✔
408
                "/lnrpc.Lightning/SendToRoute": {{
3✔
409
                        Entity: "offchain",
3✔
410
                        Action: "write",
3✔
411
                }},
3✔
412
                "/lnrpc.Lightning/SendToRouteSync": {{
3✔
413
                        Entity: "offchain",
3✔
414
                        Action: "write",
3✔
415
                }},
3✔
416
                "/lnrpc.Lightning/AddInvoice": {{
3✔
417
                        Entity: "invoices",
3✔
418
                        Action: "write",
3✔
419
                }},
3✔
420
                "/lnrpc.Lightning/LookupInvoice": {{
3✔
421
                        Entity: "invoices",
3✔
422
                        Action: "read",
3✔
423
                }},
3✔
424
                "/lnrpc.Lightning/ListInvoices": {{
3✔
425
                        Entity: "invoices",
3✔
426
                        Action: "read",
3✔
427
                }},
3✔
428
                "/lnrpc.Lightning/SubscribeInvoices": {{
3✔
429
                        Entity: "invoices",
3✔
430
                        Action: "read",
3✔
431
                }},
3✔
432
                "/lnrpc.Lightning/SubscribeTransactions": {{
3✔
433
                        Entity: "onchain",
3✔
434
                        Action: "read",
3✔
435
                }},
3✔
436
                "/lnrpc.Lightning/GetTransactions": {{
3✔
437
                        Entity: "onchain",
3✔
438
                        Action: "read",
3✔
439
                }},
3✔
440
                "/lnrpc.Lightning/DescribeGraph": {{
3✔
441
                        Entity: "info",
3✔
442
                        Action: "read",
3✔
443
                }},
3✔
444
                "/lnrpc.Lightning/GetNodeMetrics": {{
3✔
445
                        Entity: "info",
3✔
446
                        Action: "read",
3✔
447
                }},
3✔
448
                "/lnrpc.Lightning/GetChanInfo": {{
3✔
449
                        Entity: "info",
3✔
450
                        Action: "read",
3✔
451
                }},
3✔
452
                "/lnrpc.Lightning/GetNodeInfo": {{
3✔
453
                        Entity: "info",
3✔
454
                        Action: "read",
3✔
455
                }},
3✔
456
                "/lnrpc.Lightning/QueryRoutes": {{
3✔
457
                        Entity: "info",
3✔
458
                        Action: "read",
3✔
459
                }},
3✔
460
                "/lnrpc.Lightning/GetNetworkInfo": {{
3✔
461
                        Entity: "info",
3✔
462
                        Action: "read",
3✔
463
                }},
3✔
464
                "/lnrpc.Lightning/StopDaemon": {{
3✔
465
                        Entity: "info",
3✔
466
                        Action: "write",
3✔
467
                }},
3✔
468
                "/lnrpc.Lightning/SubscribeChannelGraph": {{
3✔
469
                        Entity: "info",
3✔
470
                        Action: "read",
3✔
471
                }},
3✔
472
                "/lnrpc.Lightning/ListPayments": {{
3✔
473
                        Entity: "offchain",
3✔
474
                        Action: "read",
3✔
475
                }},
3✔
476
                "/lnrpc.Lightning/DeletePayment": {{
3✔
477
                        Entity: "offchain",
3✔
478
                        Action: "write",
3✔
479
                }},
3✔
480
                "/lnrpc.Lightning/DeleteAllPayments": {{
3✔
481
                        Entity: "offchain",
3✔
482
                        Action: "write",
3✔
483
                }},
3✔
484
                "/lnrpc.Lightning/DebugLevel": {{
3✔
485
                        Entity: "info",
3✔
486
                        Action: "write",
3✔
487
                }},
3✔
488
                "/lnrpc.Lightning/DecodePayReq": {{
3✔
489
                        Entity: "offchain",
3✔
490
                        Action: "read",
3✔
491
                }},
3✔
492
                "/lnrpc.Lightning/FeeReport": {{
3✔
493
                        Entity: "offchain",
3✔
494
                        Action: "read",
3✔
495
                }},
3✔
496
                "/lnrpc.Lightning/UpdateChannelPolicy": {{
3✔
497
                        Entity: "offchain",
3✔
498
                        Action: "write",
3✔
499
                }},
3✔
500
                "/lnrpc.Lightning/ForwardingHistory": {{
3✔
501
                        Entity: "offchain",
3✔
502
                        Action: "read",
3✔
503
                }},
3✔
504
                "/lnrpc.Lightning/RestoreChannelBackups": {{
3✔
505
                        Entity: "offchain",
3✔
506
                        Action: "write",
3✔
507
                }},
3✔
508
                "/lnrpc.Lightning/ExportChannelBackup": {{
3✔
509
                        Entity: "offchain",
3✔
510
                        Action: "read",
3✔
511
                }},
3✔
512
                "/lnrpc.Lightning/VerifyChanBackup": {{
3✔
513
                        Entity: "offchain",
3✔
514
                        Action: "read",
3✔
515
                }},
3✔
516
                "/lnrpc.Lightning/ExportAllChannelBackups": {{
3✔
517
                        Entity: "offchain",
3✔
518
                        Action: "read",
3✔
519
                }},
3✔
520
                "/lnrpc.Lightning/SubscribeChannelBackups": {{
3✔
521
                        Entity: "offchain",
3✔
522
                        Action: "read",
3✔
523
                }},
3✔
524
                "/lnrpc.Lightning/ChannelAcceptor": {{
3✔
525
                        Entity: "onchain",
3✔
526
                        Action: "write",
3✔
527
                }, {
3✔
528
                        Entity: "offchain",
3✔
529
                        Action: "write",
3✔
530
                }},
3✔
531
                "/lnrpc.Lightning/BakeMacaroon": {{
3✔
532
                        Entity: "macaroon",
3✔
533
                        Action: "generate",
3✔
534
                }},
3✔
535
                "/lnrpc.Lightning/ListMacaroonIDs": {{
3✔
536
                        Entity: "macaroon",
3✔
537
                        Action: "read",
3✔
538
                }},
3✔
539
                "/lnrpc.Lightning/DeleteMacaroonID": {{
3✔
540
                        Entity: "macaroon",
3✔
541
                        Action: "write",
3✔
542
                }},
3✔
543
                "/lnrpc.Lightning/ListPermissions": {{
3✔
544
                        Entity: "info",
3✔
545
                        Action: "read",
3✔
546
                }},
3✔
547
                "/lnrpc.Lightning/CheckMacaroonPermissions": {{
3✔
548
                        Entity: "macaroon",
3✔
549
                        Action: "read",
3✔
550
                }},
3✔
551
                "/lnrpc.Lightning/SubscribePeerEvents": {{
3✔
552
                        Entity: "peers",
3✔
553
                        Action: "read",
3✔
554
                }},
3✔
555
                "/lnrpc.Lightning/FundingStateStep": {{
3✔
556
                        Entity: "onchain",
3✔
557
                        Action: "write",
3✔
558
                }, {
3✔
559
                        Entity: "offchain",
3✔
560
                        Action: "write",
3✔
561
                }},
3✔
562
                lnrpc.RegisterRPCMiddlewareURI: {{
3✔
563
                        Entity: "macaroon",
3✔
564
                        Action: "write",
3✔
565
                }},
3✔
566
                "/lnrpc.Lightning/SendCustomMessage": {{
3✔
567
                        Entity: "offchain",
3✔
568
                        Action: "write",
3✔
569
                }},
3✔
570
                "/lnrpc.Lightning/SubscribeCustomMessages": {{
3✔
571
                        Entity: "offchain",
3✔
572
                        Action: "read",
3✔
573
                }},
3✔
574
                "/lnrpc.Lightning/LookupHtlcResolution": {{
3✔
575
                        Entity: "offchain",
3✔
576
                        Action: "read",
3✔
577
                }},
3✔
578
                "/lnrpc.Lightning/ListAliases": {{
3✔
579
                        Entity: "offchain",
3✔
580
                        Action: "read",
3✔
581
                }},
3✔
582
        }
3✔
583
}
3✔
584

585
// AuxDataParser is an interface that is used to parse auxiliary custom data
586
// within RPC messages. This is used to transform binary blobs to human-readable
587
// JSON representations.
588
type AuxDataParser interface {
589
        // InlineParseCustomData replaces any custom data binary blob in the
590
        // given RPC message with its corresponding JSON formatted data. This
591
        // transforms the binary (likely TLV encoded) data to a human-readable
592
        // JSON representation (still as byte slice).
593
        InlineParseCustomData(msg proto.Message) error
594
}
595

596
// rpcServer is a gRPC, RPC front end to the lnd daemon.
597
// TODO(roasbeef): pagination support for the list-style calls
598
type rpcServer struct {
599
        started  int32 // To be used atomically.
600
        shutdown int32 // To be used atomically.
601

602
        // Required by the grpc-gateway/v2 library for forward compatibility.
603
        // Must be after the atomically used variables to not break struct
604
        // alignment.
605
        lnrpc.UnimplementedLightningServer
606

607
        server *server
608

609
        cfg *Config
610

611
        // subServers are a set of sub-RPC servers that use the same gRPC and
612
        // listening sockets as the main RPC server, but which maintain their
613
        // own independent service. This allows us to expose a set of
614
        // micro-service like abstractions to the outside world for users to
615
        // consume.
616
        subServers      []lnrpc.SubServer
617
        subGrpcHandlers []lnrpc.GrpcHandler
618

619
        // routerBackend contains the backend implementation of the router
620
        // rpc sub server.
621
        routerBackend *routerrpc.RouterBackend
622

623
        // chanPredicate is used in the bidirectional ChannelAcceptor streaming
624
        // method.
625
        chanPredicate chanacceptor.MultiplexAcceptor
626

627
        quit chan struct{}
628

629
        // macService is the macaroon service that we need to mint new
630
        // macaroons.
631
        macService *macaroons.Service
632

633
        // selfNode is our own pubkey.
634
        selfNode route.Vertex
635

636
        // interceptorChain is the interceptor added to our gRPC server.
637
        interceptorChain *rpcperms.InterceptorChain
638

639
        // implCfg is the configuration for some of the interfaces that can be
640
        // provided externally.
641
        implCfg *ImplementationCfg
642

643
        // interceptor is used to be able to request a shutdown
644
        interceptor signal.Interceptor
645

646
        graphCache        sync.RWMutex
647
        describeGraphResp *lnrpc.ChannelGraph
648
        graphCacheEvictor *time.Timer
649
}
650

651
// A compile time check to ensure that rpcServer fully implements the
652
// LightningServer gRPC service.
653
var _ lnrpc.LightningServer = (*rpcServer)(nil)
654

655
// newRPCServer creates and returns a new instance of the rpcServer. Before
656
// dependencies are added, this will be an non-functioning RPC server only to
657
// be used to register the LightningService with the gRPC server.
658
func newRPCServer(cfg *Config, interceptorChain *rpcperms.InterceptorChain,
659
        implCfg *ImplementationCfg, interceptor signal.Interceptor) *rpcServer {
3✔
660

3✔
661
        // We go trhough the list of registered sub-servers, and create a gRPC
3✔
662
        // handler for each. These are used to register with the gRPC server
3✔
663
        // before all dependencies are available.
3✔
664
        registeredSubServers := lnrpc.RegisteredSubServers()
3✔
665

3✔
666
        var subServerHandlers []lnrpc.GrpcHandler
3✔
667
        for _, subServer := range registeredSubServers {
6✔
668
                subServerHandlers = append(
3✔
669
                        subServerHandlers, subServer.NewGrpcHandler(),
3✔
670
                )
3✔
671
        }
3✔
672

673
        return &rpcServer{
3✔
674
                cfg:              cfg,
3✔
675
                subGrpcHandlers:  subServerHandlers,
3✔
676
                interceptorChain: interceptorChain,
3✔
677
                implCfg:          implCfg,
3✔
678
                quit:             make(chan struct{}, 1),
3✔
679
                interceptor:      interceptor,
3✔
680
        }
3✔
681
}
682

683
// addDeps populates all dependencies needed by the RPC server, and any
684
// of the sub-servers that it maintains. When this is done, the RPC server can
685
// be started, and start accepting RPC calls.
686
func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
687
        subServerCgs *subRPCServerConfigs, atpl *autopilot.Manager,
688
        invoiceRegistry *invoices.InvoiceRegistry, tower *watchtower.Standalone,
689
        chanPredicate chanacceptor.MultiplexAcceptor,
690
        invoiceHtlcModifier *invoices.HtlcModificationInterceptor) error {
3✔
691

3✔
692
        // Set up router rpc backend.
3✔
693
        selfNode, err := s.graphDB.SourceNode()
3✔
694
        if err != nil {
3✔
695
                return err
×
696
        }
×
697
        graph := s.graphDB
3✔
698

3✔
699
        routerBackend := &routerrpc.RouterBackend{
3✔
700
                SelfNode: selfNode.PubKeyBytes,
3✔
701
                FetchChannelCapacity: func(chanID uint64) (btcutil.Amount,
3✔
702
                        error) {
6✔
703

3✔
704
                        info, _, _, err := graph.FetchChannelEdgesByID(chanID)
3✔
705
                        if err != nil {
6✔
706
                                return 0, err
3✔
707
                        }
3✔
708
                        return info.Capacity, nil
3✔
709
                },
710
                FetchAmountPairCapacity: func(nodeFrom, nodeTo route.Vertex,
711
                        amount lnwire.MilliSatoshi) (btcutil.Amount, error) {
3✔
712

3✔
713
                        return routing.FetchAmountPairCapacity(
3✔
714
                                graphsession.NewRoutingGraph(graph),
3✔
715
                                selfNode.PubKeyBytes, nodeFrom, nodeTo, amount,
3✔
716
                        )
3✔
717
                },
3✔
718
                FetchChannelEndpoints: func(chanID uint64) (route.Vertex,
719
                        route.Vertex, error) {
×
720

×
721
                        info, _, _, err := graph.FetchChannelEdgesByID(
×
722
                                chanID,
×
723
                        )
×
724
                        if err != nil {
×
725
                                return route.Vertex{}, route.Vertex{},
×
726
                                        fmt.Errorf("unable to fetch channel "+
×
727
                                                "edges by channel ID %d: %v",
×
728
                                                chanID, err)
×
729
                        }
×
730

731
                        return info.NodeKey1Bytes, info.NodeKey2Bytes, nil
×
732
                },
733
                FindRoute:              s.chanRouter.FindRoute,
734
                MissionControl:         s.defaultMC,
735
                ActiveNetParams:        r.cfg.ActiveNetParams.Params,
736
                Tower:                  s.controlTower,
737
                MaxTotalTimelock:       r.cfg.MaxOutgoingCltvExpiry,
738
                DefaultFinalCltvDelta:  uint16(r.cfg.Bitcoin.TimeLockDelta),
739
                SubscribeHtlcEvents:    s.htlcNotifier.SubscribeHtlcEvents,
740
                InterceptableForwarder: s.interceptableSwitch,
741
                SetChannelEnabled: func(outpoint wire.OutPoint) error {
3✔
742
                        return s.chanStatusMgr.RequestEnable(outpoint, true)
3✔
743
                },
3✔
744
                SetChannelDisabled: func(outpoint wire.OutPoint) error {
3✔
745
                        return s.chanStatusMgr.RequestDisable(outpoint, true)
3✔
746
                },
3✔
747
                SetChannelAuto:     s.chanStatusMgr.RequestAuto,
748
                UseStatusInitiated: subServerCgs.RouterRPC.UseStatusInitiated,
749
                ParseCustomChannelData: func(msg proto.Message) error {
3✔
750
                        err = fn.MapOptionZ(
3✔
751
                                r.server.implCfg.AuxDataParser,
3✔
752
                                func(parser AuxDataParser) error {
3✔
753
                                        return parser.InlineParseCustomData(msg)
×
754
                                },
×
755
                        )
756
                        if err != nil {
3✔
757
                                return fmt.Errorf("error parsing custom data: "+
×
758
                                        "%w", err)
×
759
                        }
×
760

761
                        return nil
3✔
762
                },
763
                ShouldSetExpEndorsement: func() bool {
3✔
764
                        if s.cfg.ProtocolOptions.NoExperimentalEndorsement() {
3✔
765
                                return false
×
766
                        }
×
767

768
                        return clock.NewDefaultClock().Now().Before(
3✔
769
                                EndorsementExperimentEnd,
3✔
770
                        )
3✔
771
                },
772
        }
773

774
        genInvoiceFeatures := func() *lnwire.FeatureVector {
6✔
775
                return s.featureMgr.Get(feature.SetInvoice)
3✔
776
        }
3✔
777
        genAmpInvoiceFeatures := func() *lnwire.FeatureVector {
3✔
778
                return s.featureMgr.Get(feature.SetInvoiceAmp)
×
779
        }
×
780

781
        parseAddr := func(addr string) (net.Addr, error) {
6✔
782
                return parseAddr(addr, r.cfg.net)
3✔
783
        }
3✔
784

785
        var (
3✔
786
                subServers     []lnrpc.SubServer
3✔
787
                subServerPerms []lnrpc.MacaroonPerms
3✔
788
        )
3✔
789

3✔
790
        // Before we create any of the sub-servers, we need to ensure that all
3✔
791
        // the dependencies they need are properly populated within each sub
3✔
792
        // server configuration struct.
3✔
793
        //
3✔
794
        // TODO(roasbeef): extend sub-sever config to have both (local vs remote) DB
3✔
795
        err = subServerCgs.PopulateDependencies(
3✔
796
                r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry,
3✔
797
                s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter,
3✔
798
                routerBackend, s.nodeSigner, s.graphDB, s.chanStateDB,
3✔
799
                s.sweeper, tower, s.towerClientMgr, r.cfg.net.ResolveTCPAddr,
3✔
800
                genInvoiceFeatures, genAmpInvoiceFeatures,
3✔
801
                s.getNodeAnnouncement, s.updateAndBroadcastSelfNode, parseAddr,
3✔
802
                rpcsLog, s.aliasMgr, r.implCfg.AuxDataParser,
3✔
803
                invoiceHtlcModifier,
3✔
804
        )
3✔
805
        if err != nil {
3✔
806
                return err
×
807
        }
×
808

809
        // Now that the sub-servers have all their dependencies in place, we
810
        // can create each sub-server!
811
        for _, subServerInstance := range r.subGrpcHandlers {
6✔
812
                subServer, macPerms, err := subServerInstance.CreateSubServer(
3✔
813
                        subServerCgs,
3✔
814
                )
3✔
815
                if err != nil {
3✔
816
                        return err
×
817
                }
×
818

819
                // We'll collect the sub-server, and also the set of
820
                // permissions it needs for macaroons so we can apply the
821
                // interceptors below.
822
                subServers = append(subServers, subServer)
3✔
823
                subServerPerms = append(subServerPerms, macPerms)
3✔
824
        }
825

826
        // Next, we need to merge the set of sub server macaroon permissions
827
        // with the main RPC server permissions so we can unite them under a
828
        // single set of interceptors.
829
        for m, ops := range MainRPCServerPermissions() {
6✔
830
                err := r.interceptorChain.AddPermission(m, ops)
3✔
831
                if err != nil {
3✔
832
                        return err
×
833
                }
×
834
        }
835

836
        for _, subServerPerm := range subServerPerms {
6✔
837
                for method, ops := range subServerPerm {
6✔
838
                        err := r.interceptorChain.AddPermission(method, ops)
3✔
839
                        if err != nil {
3✔
840
                                return err
×
841
                        }
×
842
                }
843
        }
844

845
        // External subserver possibly need to register their own permissions
846
        // and macaroon validator.
847
        for method, ops := range r.implCfg.ExternalValidator.Permissions() {
3✔
848
                err := r.interceptorChain.AddPermission(method, ops)
×
849
                if err != nil {
×
850
                        return err
×
851
                }
×
852

853
                // Give the external subservers the possibility to also use
854
                // their own validator to check any macaroons attached to calls
855
                // to this method. This allows them to have their own root key
856
                // ID database and permission entities.
857
                err = macService.RegisterExternalValidator(
×
858
                        method, r.implCfg.ExternalValidator,
×
859
                )
×
860
                if err != nil {
×
861
                        return fmt.Errorf("could not register external "+
×
862
                                "macaroon validator: %v", err)
×
863
                }
×
864
        }
865

866
        // Finally, with all the set up complete, add the last dependencies to
867
        // the rpc server.
868
        r.server = s
3✔
869
        r.subServers = subServers
3✔
870
        r.routerBackend = routerBackend
3✔
871
        r.chanPredicate = chanPredicate
3✔
872
        r.macService = macService
3✔
873
        r.selfNode = selfNode.PubKeyBytes
3✔
874

3✔
875
        graphCacheDuration := r.cfg.Caches.RPCGraphCacheDuration
3✔
876
        if graphCacheDuration != 0 {
6✔
877
                r.graphCacheEvictor = time.AfterFunc(graphCacheDuration, func() {
6✔
878
                        // Grab the mutex and purge the current populated
3✔
879
                        // describe graph response.
3✔
880
                        r.graphCache.Lock()
3✔
881
                        defer r.graphCache.Unlock()
3✔
882

3✔
883
                        r.describeGraphResp = nil
3✔
884

3✔
885
                        // Reset ourselves as well at the end so we run again
3✔
886
                        // after the duration.
3✔
887
                        r.graphCacheEvictor.Reset(graphCacheDuration)
3✔
888
                })
3✔
889
        }
890

891
        return nil
3✔
892
}
893

894
// RegisterWithGrpcServer registers the rpcServer and any subservers with the
895
// root gRPC server.
896
func (r *rpcServer) RegisterWithGrpcServer(grpcServer *grpc.Server) error {
3✔
897
        // Register the main RPC server.
3✔
898
        lnrpc.RegisterLightningServer(grpcServer, r)
3✔
899

3✔
900
        // Now the main RPC server has been registered, we'll iterate through
3✔
901
        // all the sub-RPC servers and register them to ensure that requests
3✔
902
        // are properly routed towards them.
3✔
903
        for _, subServer := range r.subGrpcHandlers {
6✔
904
                err := subServer.RegisterWithRootServer(grpcServer)
3✔
905
                if err != nil {
3✔
906
                        return fmt.Errorf("unable to register "+
×
907
                                "sub-server with root: %v", err)
×
908
                }
×
909
        }
910

911
        // Before actually listening on the gRPC listener, give external
912
        // subservers the chance to register to our gRPC server. Those external
913
        // subservers (think GrUB) are responsible for starting/stopping on
914
        // their own, we just let them register their services to the same
915
        // server instance so all of them can be exposed on the same
916
        // port/listener.
917
        err := r.implCfg.RegisterGrpcSubserver(grpcServer)
3✔
918
        if err != nil {
3✔
919
                rpcsLog.Errorf("error registering external gRPC "+
×
920
                        "subserver: %v", err)
×
921
        }
×
922

923
        return nil
3✔
924
}
925

926
// Start launches any helper goroutines required for the rpcServer to function.
927
func (r *rpcServer) Start() error {
3✔
928
        if atomic.AddInt32(&r.started, 1) != 1 {
3✔
929
                return nil
×
930
        }
×
931

932
        // First, we'll start all the sub-servers to ensure that they're ready
933
        // to take new requests in.
934
        //
935
        // TODO(roasbeef): some may require that the entire daemon be started
936
        // at that point
937
        for _, subServer := range r.subServers {
6✔
938
                rpcsLog.Debugf("Starting sub RPC server: %v", subServer.Name())
3✔
939

3✔
940
                if err := subServer.Start(); err != nil {
3✔
941
                        return err
×
942
                }
×
943
        }
944

945
        return nil
3✔
946
}
947

948
// RegisterWithRestProxy registers the RPC server and any subservers with the
949
// given REST proxy.
950
func (r *rpcServer) RegisterWithRestProxy(restCtx context.Context,
951
        restMux *proxy.ServeMux, restDialOpts []grpc.DialOption,
952
        restProxyDest string) error {
3✔
953

3✔
954
        // With our custom REST proxy mux created, register our main RPC and
3✔
955
        // give all subservers a chance to register as well.
3✔
956
        err := lnrpc.RegisterLightningHandlerFromEndpoint(
3✔
957
                restCtx, restMux, restProxyDest, restDialOpts,
3✔
958
        )
3✔
959
        if err != nil {
3✔
960
                return err
×
961
        }
×
962

963
        // Register our State service with the REST proxy.
964
        err = lnrpc.RegisterStateHandlerFromEndpoint(
3✔
965
                restCtx, restMux, restProxyDest, restDialOpts,
3✔
966
        )
3✔
967
        if err != nil {
3✔
968
                return err
×
969
        }
×
970

971
        // Register all the subservers with the REST proxy.
972
        for _, subServer := range r.subGrpcHandlers {
6✔
973
                err := subServer.RegisterWithRestServer(
3✔
974
                        restCtx, restMux, restProxyDest, restDialOpts,
3✔
975
                )
3✔
976
                if err != nil {
3✔
977
                        return fmt.Errorf("unable to register REST sub-server "+
×
978
                                "with root: %v", err)
×
979
                }
×
980
        }
981

982
        // Before listening on any of the interfaces, we also want to give the
983
        // external subservers a chance to register their own REST proxy stub
984
        // with our mux instance.
985
        err = r.implCfg.RegisterRestSubserver(
3✔
986
                restCtx, restMux, restProxyDest, restDialOpts,
3✔
987
        )
3✔
988
        if err != nil {
3✔
989
                rpcsLog.Errorf("error registering external REST subserver: %v",
×
990
                        err)
×
991
        }
×
992
        return nil
3✔
993
}
994

995
// Stop signals any active goroutines for a graceful closure.
996
func (r *rpcServer) Stop() error {
3✔
997
        if atomic.AddInt32(&r.shutdown, 1) != 1 {
3✔
998
                return nil
×
999
        }
×
1000

1001
        rpcsLog.Infof("Stopping RPC Server")
3✔
1002

3✔
1003
        close(r.quit)
3✔
1004

3✔
1005
        // After we've signalled all of our active goroutines to exit, we'll
3✔
1006
        // then do the same to signal a graceful shutdown of all the sub
3✔
1007
        // servers.
3✔
1008
        for _, subServer := range r.subServers {
6✔
1009
                rpcsLog.Infof("Stopping %v Sub-RPC Server",
3✔
1010
                        subServer.Name())
3✔
1011

3✔
1012
                if err := subServer.Stop(); err != nil {
3✔
1013
                        rpcsLog.Errorf("unable to stop sub-server %v: %v",
×
1014
                                subServer.Name(), err)
×
1015
                        continue
×
1016
                }
1017
        }
1018

1019
        return nil
3✔
1020
}
1021

1022
// addrPairsToOutputs converts a map describing a set of outputs to be created,
1023
// the outputs themselves. The passed map pairs up an address, to a desired
1024
// output value amount. Each address is converted to its corresponding pkScript
1025
// to be used within the constructed output(s).
1026
func addrPairsToOutputs(addrPairs map[string]int64,
1027
        params *chaincfg.Params) ([]*wire.TxOut, error) {
3✔
1028

3✔
1029
        outputs := make([]*wire.TxOut, 0, len(addrPairs))
3✔
1030
        for addr, amt := range addrPairs {
6✔
1031
                addr, err := btcutil.DecodeAddress(addr, params)
3✔
1032
                if err != nil {
3✔
1033
                        return nil, err
×
1034
                }
×
1035

1036
                if !addr.IsForNet(params) {
3✔
1037
                        return nil, fmt.Errorf("address is not for %s",
×
1038
                                params.Name)
×
1039
                }
×
1040

1041
                pkscript, err := txscript.PayToAddrScript(addr)
3✔
1042
                if err != nil {
3✔
1043
                        return nil, err
×
1044
                }
×
1045

1046
                outputs = append(outputs, wire.NewTxOut(amt, pkscript))
3✔
1047
        }
1048

1049
        return outputs, nil
3✔
1050
}
1051

1052
// allowCORS wraps the given http.Handler with a function that adds the
1053
// Access-Control-Allow-Origin header to the response.
1054
func allowCORS(handler http.Handler, origins []string) http.Handler {
3✔
1055
        allowHeaders := "Access-Control-Allow-Headers"
3✔
1056
        allowMethods := "Access-Control-Allow-Methods"
3✔
1057
        allowOrigin := "Access-Control-Allow-Origin"
3✔
1058

3✔
1059
        // If the user didn't supply any origins that means CORS is disabled
3✔
1060
        // and we should return the original handler.
3✔
1061
        if len(origins) == 0 {
3✔
1062
                return handler
×
1063
        }
×
1064

1065
        return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
6✔
1066
                origin := r.Header.Get("Origin")
3✔
1067

3✔
1068
                // Skip everything if the browser doesn't send the Origin field.
3✔
1069
                if origin == "" {
6✔
1070
                        handler.ServeHTTP(w, r)
3✔
1071
                        return
3✔
1072
                }
3✔
1073

1074
                // Set the static header fields first.
1075
                w.Header().Set(
3✔
1076
                        allowHeaders,
3✔
1077
                        "Content-Type, Accept, Grpc-Metadata-Macaroon",
3✔
1078
                )
3✔
1079
                w.Header().Set(allowMethods, "GET, POST, DELETE")
3✔
1080

3✔
1081
                // Either we allow all origins or the incoming request matches
3✔
1082
                // a specific origin in our list of allowed origins.
3✔
1083
                for _, allowedOrigin := range origins {
6✔
1084
                        if allowedOrigin == "*" || origin == allowedOrigin {
6✔
1085
                                // Only set allowed origin to requested origin.
3✔
1086
                                w.Header().Set(allowOrigin, origin)
3✔
1087

3✔
1088
                                break
3✔
1089
                        }
1090
                }
1091

1092
                // For a pre-flight request we only need to send the headers
1093
                // back. No need to call the rest of the chain.
1094
                if r.Method == "OPTIONS" {
6✔
1095
                        return
3✔
1096
                }
3✔
1097

1098
                // Everything's prepared now, we can pass the request along the
1099
                // chain of handlers.
1100
                handler.ServeHTTP(w, r)
×
1101
        })
1102
}
1103

1104
// sendCoinsOnChain makes an on-chain transaction in or to send coins to one or
1105
// more addresses specified in the passed payment map. The payment map maps an
1106
// address to a specified output value to be sent to that address.
1107
func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
1108
        feeRate chainfee.SatPerKWeight, minConfs int32, label string,
1109
        strategy wallet.CoinSelectionStrategy,
1110
        selectedUtxos fn.Set[wire.OutPoint]) (*chainhash.Hash, error) {
3✔
1111

3✔
1112
        outputs, err := addrPairsToOutputs(paymentMap, r.cfg.ActiveNetParams.Params)
3✔
1113
        if err != nil {
3✔
1114
                return nil, err
×
1115
        }
×
1116

1117
        // We first do a dry run, to sanity check we won't spend our wallet
1118
        // balance below the reserved amount.
1119
        authoredTx, err := r.server.cc.Wallet.CreateSimpleTx(
3✔
1120
                selectedUtxos, outputs, feeRate, minConfs, strategy, true,
3✔
1121
        )
3✔
1122
        if err != nil {
6✔
1123
                return nil, err
3✔
1124
        }
3✔
1125

1126
        // Check the authored transaction and use the explicitly set change index
1127
        // to make sure that the wallet reserved balance is not invalidated.
1128
        _, err = r.server.cc.Wallet.CheckReservedValueTx(
3✔
1129
                lnwallet.CheckReservedValueTxReq{
3✔
1130
                        Tx:          authoredTx.Tx,
3✔
1131
                        ChangeIndex: &authoredTx.ChangeIndex,
3✔
1132
                },
3✔
1133
        )
3✔
1134
        if err != nil {
6✔
1135
                return nil, err
3✔
1136
        }
3✔
1137

1138
        // If that checks out, we're fairly confident that creating sending to
1139
        // these outputs will keep the wallet balance above the reserve.
1140
        tx, err := r.server.cc.Wallet.SendOutputs(
3✔
1141
                selectedUtxos, outputs, feeRate, minConfs, label, strategy,
3✔
1142
        )
3✔
1143
        if err != nil {
3✔
1144
                return nil, err
×
1145
        }
×
1146

1147
        txHash := tx.TxHash()
3✔
1148
        return &txHash, nil
3✔
1149
}
1150

1151
// ListUnspent returns useful information about each unspent output owned by
1152
// the wallet, as reported by the underlying `ListUnspentWitness`; the
1153
// information returned is: outpoint, amount in satoshis, address, address
1154
// type, scriptPubKey in hex and number of confirmations.  The result is
1155
// filtered to contain outputs whose number of confirmations is between a
1156
// minimum and maximum number of confirmations specified by the user, with
1157
// 0 meaning unconfirmed.
1158
func (r *rpcServer) ListUnspent(ctx context.Context,
1159
        in *lnrpc.ListUnspentRequest) (*lnrpc.ListUnspentResponse, error) {
×
1160

×
1161
        // Validate the confirmation arguments.
×
1162
        minConfs, maxConfs, err := lnrpc.ParseConfs(in.MinConfs, in.MaxConfs)
×
1163
        if err != nil {
×
1164
                return nil, err
×
1165
        }
×
1166

1167
        // With our arguments validated, we'll query the internal wallet for
1168
        // the set of UTXOs that match our query.
1169
        //
1170
        // We'll acquire the global coin selection lock to ensure there aren't
1171
        // any other concurrent processes attempting to lock any UTXOs which may
1172
        // be shown available to us.
1173
        var utxos []*lnwallet.Utxo
×
1174
        err = r.server.cc.Wallet.WithCoinSelectLock(func() error {
×
1175
                utxos, err = r.server.cc.Wallet.ListUnspentWitness(
×
1176
                        minConfs, maxConfs, in.Account,
×
1177
                )
×
1178
                return err
×
1179
        })
×
1180
        if err != nil {
×
1181
                return nil, err
×
1182
        }
×
1183

1184
        rpcUtxos, err := lnrpc.MarshalUtxos(utxos, r.cfg.ActiveNetParams.Params)
×
1185
        if err != nil {
×
1186
                return nil, err
×
1187
        }
×
1188

1189
        maxStr := ""
×
1190
        if maxConfs != math.MaxInt32 {
×
1191
                maxStr = " max=" + fmt.Sprintf("%d", maxConfs)
×
1192
        }
×
1193

1194
        rpcsLog.Debugf("[listunspent] min=%v%v, generated utxos: %v", minConfs,
×
1195
                maxStr, utxos)
×
1196

×
1197
        return &lnrpc.ListUnspentResponse{
×
1198
                Utxos: rpcUtxos,
×
1199
        }, nil
×
1200
}
1201

1202
// EstimateFee handles a request for estimating the fee for sending a
1203
// transaction spending to multiple specified outputs in parallel.
1204
func (r *rpcServer) EstimateFee(ctx context.Context,
1205
        in *lnrpc.EstimateFeeRequest) (*lnrpc.EstimateFeeResponse, error) {
3✔
1206

3✔
1207
        // Create the list of outputs we are spending to.
3✔
1208
        outputs, err := addrPairsToOutputs(in.AddrToAmount, r.cfg.ActiveNetParams.Params)
3✔
1209
        if err != nil {
3✔
1210
                return nil, err
×
1211
        }
×
1212

1213
        // Query the fee estimator for the fee rate for the given confirmation
1214
        // target.
1215
        target := in.TargetConf
3✔
1216
        feePref := sweep.FeeEstimateInfo{
3✔
1217
                ConfTarget: uint32(target),
3✔
1218
        }
3✔
1219

3✔
1220
        // Since we are providing a fee estimation as an RPC response, there's
3✔
1221
        // no need to set a max feerate here, so we use 0.
3✔
1222
        feePerKw, err := feePref.Estimate(r.server.cc.FeeEstimator, 0)
3✔
1223
        if err != nil {
3✔
1224
                return nil, err
×
1225
        }
×
1226

1227
        // Then, we'll extract the minimum number of confirmations that each
1228
        // output we use to fund the transaction should satisfy.
1229
        minConfs, err := lnrpc.ExtractMinConfs(
3✔
1230
                in.GetMinConfs(), in.GetSpendUnconfirmed(),
3✔
1231
        )
3✔
1232
        if err != nil {
3✔
1233
                return nil, err
×
1234
        }
×
1235

1236
        coinSelectionStrategy, err := lnrpc.UnmarshallCoinSelectionStrategy(
3✔
1237
                in.CoinSelectionStrategy,
3✔
1238
                r.server.cc.Wallet.Cfg.CoinSelectionStrategy,
3✔
1239
        )
3✔
1240
        if err != nil {
3✔
1241
                return nil, err
×
1242
        }
×
1243

1244
        // We will ask the wallet to create a tx using this fee rate. We set
1245
        // dryRun=true to avoid inflating the change addresses in the db.
1246
        var tx *txauthor.AuthoredTx
3✔
1247
        wallet := r.server.cc.Wallet
3✔
1248
        err = wallet.WithCoinSelectLock(func() error {
6✔
1249
                tx, err = wallet.CreateSimpleTx(
3✔
1250
                        nil, outputs, feePerKw, minConfs, coinSelectionStrategy,
3✔
1251
                        true,
3✔
1252
                )
3✔
1253
                return err
3✔
1254
        })
3✔
1255
        if err != nil {
3✔
1256
                return nil, err
×
1257
        }
×
1258

1259
        // Use the created tx to calculate the total fee.
1260
        totalOutput := int64(0)
3✔
1261
        for _, out := range tx.Tx.TxOut {
6✔
1262
                totalOutput += out.Value
3✔
1263
        }
3✔
1264
        totalFee := int64(tx.TotalInput) - totalOutput
3✔
1265

3✔
1266
        resp := &lnrpc.EstimateFeeResponse{
3✔
1267
                FeeSat:      totalFee,
3✔
1268
                SatPerVbyte: uint64(feePerKw.FeePerVByte()),
3✔
1269

3✔
1270
                // Deprecated field.
3✔
1271
                FeerateSatPerByte: int64(feePerKw.FeePerVByte()),
3✔
1272
        }
3✔
1273

3✔
1274
        rpcsLog.Debugf("[estimatefee] fee estimate for conf target %d: %v",
3✔
1275
                target, resp)
3✔
1276

3✔
1277
        return resp, nil
3✔
1278
}
1279

1280
// maybeUseDefaultConf makes sure that when the user doesn't set either the fee
1281
// rate or conf target, the default conf target is used.
1282
func maybeUseDefaultConf(satPerByte int64, satPerVByte uint64,
1283
        targetConf uint32) uint32 {
3✔
1284

3✔
1285
        // If the fee rate is set, there's no need to use the default conf
3✔
1286
        // target. In this case, we just return the targetConf from the
3✔
1287
        // request.
3✔
1288
        if satPerByte != 0 || satPerVByte != 0 {
6✔
1289
                return targetConf
3✔
1290
        }
3✔
1291

1292
        // Return the user specified conf target if set.
1293
        if targetConf != 0 {
6✔
1294
                return targetConf
3✔
1295
        }
3✔
1296

1297
        // If the fee rate is not set, yet the conf target is zero, the default
1298
        // 6 will be returned.
1299
        rpcsLog.Errorf("Expected either 'sat_per_vbyte' or 'conf_target' to " +
3✔
1300
                "be set, using default conf of 6 instead")
3✔
1301

3✔
1302
        return defaultNumBlocksEstimate
3✔
1303
}
1304

1305
// SendCoins executes a request to send coins to a particular address. Unlike
1306
// SendMany, this RPC call only allows creating a single output at a time.
1307
func (r *rpcServer) SendCoins(ctx context.Context,
1308
        in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
3✔
1309

3✔
1310
        // Keep the old behavior prior to 0.18.0 - when the user doesn't set
3✔
1311
        // fee rate or conf target, the default conf target of 6 is used.
3✔
1312
        targetConf := maybeUseDefaultConf(
3✔
1313
                in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf),
3✔
1314
        )
3✔
1315

3✔
1316
        // Calculate an appropriate fee rate for this transaction.
3✔
1317
        feePerKw, err := lnrpc.CalculateFeeRate(
3✔
1318
                uint64(in.SatPerByte), in.SatPerVbyte, // nolint:staticcheck
3✔
1319
                targetConf, r.server.cc.FeeEstimator,
3✔
1320
        )
3✔
1321
        if err != nil {
3✔
1322
                return nil, err
×
1323
        }
×
1324

1325
        // Then, we'll extract the minimum number of confirmations that each
1326
        // output we use to fund the transaction should satisfy.
1327
        minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed)
3✔
1328
        if err != nil {
3✔
1329
                return nil, err
×
1330
        }
×
1331

1332
        rpcsLog.Infof("[sendcoins] addr=%v, amt=%v, sat/kw=%v, min_confs=%v, "+
3✔
1333
                "send_all=%v, select_outpoints=%v",
3✔
1334
                in.Addr, btcutil.Amount(in.Amount), int64(feePerKw), minConfs,
3✔
1335
                in.SendAll, len(in.Outpoints))
3✔
1336

3✔
1337
        // Decode the address receiving the coins, we need to check whether the
3✔
1338
        // address is valid for this network.
3✔
1339
        targetAddr, err := btcutil.DecodeAddress(
3✔
1340
                in.Addr, r.cfg.ActiveNetParams.Params,
3✔
1341
        )
3✔
1342
        if err != nil {
6✔
1343
                return nil, err
3✔
1344
        }
3✔
1345

1346
        // Make the check on the decoded address according to the active network.
1347
        if !targetAddr.IsForNet(r.cfg.ActiveNetParams.Params) {
6✔
1348
                return nil, fmt.Errorf("address: %v is not valid for this "+
3✔
1349
                        "network: %v", targetAddr.String(),
3✔
1350
                        r.cfg.ActiveNetParams.Params.Name)
3✔
1351
        }
3✔
1352

1353
        // If the destination address parses to a valid pubkey, we assume the user
1354
        // accidentally tried to send funds to a bare pubkey address. This check is
1355
        // here to prevent unintended transfers.
1356
        decodedAddr, _ := hex.DecodeString(in.Addr)
3✔
1357
        _, err = btcec.ParsePubKey(decodedAddr)
3✔
1358
        if err == nil {
6✔
1359
                return nil, fmt.Errorf("cannot send coins to pubkeys")
3✔
1360
        }
3✔
1361

1362
        label, err := labels.ValidateAPI(in.Label)
3✔
1363
        if err != nil {
3✔
1364
                return nil, err
×
1365
        }
×
1366

1367
        coinSelectionStrategy, err := lnrpc.UnmarshallCoinSelectionStrategy(
3✔
1368
                in.CoinSelectionStrategy,
3✔
1369
                r.server.cc.Wallet.Cfg.CoinSelectionStrategy,
3✔
1370
        )
3✔
1371
        if err != nil {
3✔
1372
                return nil, err
×
1373
        }
×
1374

1375
        var txid *chainhash.Hash
3✔
1376

3✔
1377
        wallet := r.server.cc.Wallet
3✔
1378
        maxFeeRate := r.cfg.Sweeper.MaxFeeRate.FeePerKWeight()
3✔
1379

3✔
1380
        var selectOutpoints fn.Set[wire.OutPoint]
3✔
1381
        if len(in.Outpoints) != 0 {
6✔
1382
                wireOutpoints, err := toWireOutpoints(in.Outpoints)
3✔
1383
                if err != nil {
3✔
1384
                        return nil, fmt.Errorf("can't create outpoints "+
×
1385
                                "%w", err)
×
1386
                }
×
1387

1388
                if fn.HasDuplicates(wireOutpoints) {
6✔
1389
                        return nil, fmt.Errorf("selected outpoints contain " +
3✔
1390
                                "duplicate values")
3✔
1391
                }
3✔
1392

1393
                selectOutpoints = fn.NewSet(wireOutpoints...)
3✔
1394
        }
1395

1396
        // If the send all flag is active, then we'll attempt to sweep all the
1397
        // coins in the wallet in a single transaction (if possible),
1398
        // otherwise, we'll respect the amount, and attempt a regular 2-output
1399
        // send.
1400
        if in.SendAll {
6✔
1401
                // At this point, the amount shouldn't be set since we've been
3✔
1402
                // instructed to sweep all the coins from the wallet.
3✔
1403
                if in.Amount != 0 {
6✔
1404
                        return nil, fmt.Errorf("amount set while SendAll is " +
3✔
1405
                                "active")
3✔
1406
                }
3✔
1407

1408
                _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
1409
                if err != nil {
3✔
1410
                        return nil, err
×
1411
                }
×
1412

1413
                // With the sweeper instance created, we can now generate a
1414
                // transaction that will sweep ALL outputs from the wallet in a
1415
                // single transaction. This will be generated in a concurrent
1416
                // safe manner, so no need to worry about locking. The tx will
1417
                // pay to the change address created above if we needed to
1418
                // reserve any value, the rest will go to targetAddr.
1419
                sweepTxPkg, err := sweep.CraftSweepAllTx(
3✔
1420
                        feePerKw, maxFeeRate, uint32(bestHeight), nil,
3✔
1421
                        targetAddr, wallet, wallet, wallet.WalletController,
3✔
1422
                        r.server.cc.Signer, minConfs, selectOutpoints,
3✔
1423
                )
3✔
1424
                if err != nil {
3✔
1425
                        return nil, err
×
1426
                }
×
1427

1428
                // Before we publish the transaction we make sure it won't
1429
                // violate our reserved wallet value.
1430
                var reservedVal btcutil.Amount
3✔
1431
                err = wallet.WithCoinSelectLock(func() error {
6✔
1432
                        var err error
3✔
1433
                        reservedVal, err = wallet.CheckReservedValueTx(
3✔
1434
                                lnwallet.CheckReservedValueTxReq{
3✔
1435
                                        Tx: sweepTxPkg.SweepTx,
3✔
1436
                                },
3✔
1437
                        )
3✔
1438
                        return err
3✔
1439
                })
3✔
1440

1441
                // If sending everything to this address would invalidate our
1442
                // reserved wallet balance, we create a new sweep tx, where
1443
                // we'll send the reserved value back to our wallet.
1444
                if err == lnwallet.ErrReservedValueInvalidated {
6✔
1445
                        sweepTxPkg.CancelSweepAttempt()
3✔
1446

3✔
1447
                        rpcsLog.Debugf("Reserved value %v not satisfied after "+
3✔
1448
                                "send_all, trying with change output",
3✔
1449
                                reservedVal)
3✔
1450

3✔
1451
                        // We'll request a change address from the wallet,
3✔
1452
                        // where we'll send this reserved value back to. This
3✔
1453
                        // ensures this is an address the wallet knows about,
3✔
1454
                        // allowing us to pass the reserved value check.
3✔
1455
                        changeAddr, err := r.server.cc.Wallet.NewAddress(
3✔
1456
                                lnwallet.TaprootPubkey, true,
3✔
1457
                                lnwallet.DefaultAccountName,
3✔
1458
                        )
3✔
1459
                        if err != nil {
3✔
1460
                                return nil, err
×
1461
                        }
×
1462

1463
                        // Send the reserved value to this change address, the
1464
                        // remaining funds will go to the targetAddr.
1465
                        outputs := []sweep.DeliveryAddr{
3✔
1466
                                {
3✔
1467
                                        Addr: changeAddr,
3✔
1468
                                        Amt:  reservedVal,
3✔
1469
                                },
3✔
1470
                        }
3✔
1471

3✔
1472
                        sweepTxPkg, err = sweep.CraftSweepAllTx(
3✔
1473
                                feePerKw, maxFeeRate, uint32(bestHeight),
3✔
1474
                                outputs, targetAddr, wallet, wallet,
3✔
1475
                                wallet.WalletController,
3✔
1476
                                r.server.cc.Signer, minConfs, selectOutpoints,
3✔
1477
                        )
3✔
1478
                        if err != nil {
3✔
1479
                                return nil, err
×
1480
                        }
×
1481

1482
                        // Sanity check the new tx by re-doing the check.
1483
                        err = wallet.WithCoinSelectLock(func() error {
6✔
1484
                                _, err := wallet.CheckReservedValueTx(
3✔
1485
                                        lnwallet.CheckReservedValueTxReq{
3✔
1486
                                                Tx: sweepTxPkg.SweepTx,
3✔
1487
                                        },
3✔
1488
                                )
3✔
1489
                                return err
3✔
1490
                        })
3✔
1491
                        if err != nil {
3✔
1492
                                sweepTxPkg.CancelSweepAttempt()
×
1493

×
1494
                                return nil, err
×
1495
                        }
×
1496
                } else if err != nil {
3✔
1497
                        sweepTxPkg.CancelSweepAttempt()
×
1498

×
1499
                        return nil, err
×
1500
                }
×
1501

1502
                rpcsLog.Debugf("Sweeping coins from wallet to addr=%v, "+
3✔
1503
                        "with tx=%v", in.Addr, spew.Sdump(sweepTxPkg.SweepTx))
3✔
1504

3✔
1505
                // As our sweep transaction was created, successfully, we'll
3✔
1506
                // now attempt to publish it, cancelling the sweep pkg to
3✔
1507
                // return all outputs if it fails.
3✔
1508
                err = wallet.PublishTransaction(sweepTxPkg.SweepTx, label)
3✔
1509
                if err != nil {
3✔
1510
                        sweepTxPkg.CancelSweepAttempt()
×
1511

×
1512
                        return nil, fmt.Errorf("unable to broadcast sweep "+
×
1513
                                "transaction: %v", err)
×
1514
                }
×
1515

1516
                sweepTXID := sweepTxPkg.SweepTx.TxHash()
3✔
1517
                txid = &sweepTXID
3✔
1518
        } else {
3✔
1519

3✔
1520
                // We'll now construct out payment map, and use the wallet's
3✔
1521
                // coin selection synchronization method to ensure that no coin
3✔
1522
                // selection (funding, sweep alls, other sends) can proceed
3✔
1523
                // while we instruct the wallet to send this transaction.
3✔
1524
                paymentMap := map[string]int64{targetAddr.String(): in.Amount}
3✔
1525
                err := wallet.WithCoinSelectLock(func() error {
6✔
1526
                        newTXID, err := r.sendCoinsOnChain(
3✔
1527
                                paymentMap, feePerKw, minConfs, label,
3✔
1528
                                coinSelectionStrategy, selectOutpoints,
3✔
1529
                        )
3✔
1530
                        if err != nil {
6✔
1531
                                return err
3✔
1532
                        }
3✔
1533

1534
                        txid = newTXID
3✔
1535

3✔
1536
                        return nil
3✔
1537
                })
1538
                if err != nil {
6✔
1539
                        return nil, err
3✔
1540
                }
3✔
1541
        }
1542

1543
        rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String())
3✔
1544

3✔
1545
        return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil
3✔
1546
}
1547

1548
// SendMany handles a request for a transaction create multiple specified
1549
// outputs in parallel.
1550
func (r *rpcServer) SendMany(ctx context.Context,
1551
        in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
×
1552

×
1553
        // Keep the old behavior prior to 0.18.0 - when the user doesn't set
×
1554
        // fee rate or conf target, the default conf target of 6 is used.
×
1555
        targetConf := maybeUseDefaultConf(
×
1556
                in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf),
×
1557
        )
×
1558

×
1559
        // Calculate an appropriate fee rate for this transaction.
×
1560
        feePerKw, err := lnrpc.CalculateFeeRate(
×
1561
                uint64(in.SatPerByte), in.SatPerVbyte, // nolint:staticcheck
×
1562
                targetConf, r.server.cc.FeeEstimator,
×
1563
        )
×
1564
        if err != nil {
×
1565
                return nil, err
×
1566
        }
×
1567

1568
        // Then, we'll extract the minimum number of confirmations that each
1569
        // output we use to fund the transaction should satisfy.
1570
        minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed)
×
1571
        if err != nil {
×
1572
                return nil, err
×
1573
        }
×
1574

1575
        label, err := labels.ValidateAPI(in.Label)
×
1576
        if err != nil {
×
1577
                return nil, err
×
1578
        }
×
1579

1580
        coinSelectionStrategy, err := lnrpc.UnmarshallCoinSelectionStrategy(
×
1581
                in.CoinSelectionStrategy,
×
1582
                r.server.cc.Wallet.Cfg.CoinSelectionStrategy,
×
1583
        )
×
1584
        if err != nil {
×
1585
                return nil, err
×
1586
        }
×
1587

1588
        rpcsLog.Infof("[sendmany] outputs=%v, sat/kw=%v",
×
1589
                spew.Sdump(in.AddrToAmount), int64(feePerKw))
×
1590

×
1591
        var txid *chainhash.Hash
×
1592

×
1593
        // We'll attempt to send to the target set of outputs, ensuring that we
×
1594
        // synchronize with any other ongoing coin selection attempts which
×
1595
        // happen to also be concurrently executing.
×
1596
        wallet := r.server.cc.Wallet
×
1597
        err = wallet.WithCoinSelectLock(func() error {
×
1598
                sendManyTXID, err := r.sendCoinsOnChain(
×
1599
                        in.AddrToAmount, feePerKw, minConfs, label,
×
1600
                        coinSelectionStrategy, nil,
×
1601
                )
×
1602
                if err != nil {
×
1603
                        return err
×
1604
                }
×
1605

1606
                txid = sendManyTXID
×
1607

×
1608
                return nil
×
1609
        })
1610
        if err != nil {
×
1611
                return nil, err
×
1612
        }
×
1613

1614
        rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String())
×
1615

×
1616
        return &lnrpc.SendManyResponse{Txid: txid.String()}, nil
×
1617
}
1618

1619
// NewAddress creates a new address under control of the local wallet.
1620
func (r *rpcServer) NewAddress(ctx context.Context,
1621
        in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) {
3✔
1622

3✔
1623
        // Always use the default wallet account unless one was specified.
3✔
1624
        account := lnwallet.DefaultAccountName
3✔
1625
        if in.Account != "" {
6✔
1626
                account = in.Account
3✔
1627
        }
3✔
1628

1629
        // Translate the gRPC proto address type to the wallet controller's
1630
        // available address types.
1631
        var (
3✔
1632
                addr btcutil.Address
3✔
1633
                err  error
3✔
1634
        )
3✔
1635
        switch in.Type {
3✔
1636
        case lnrpc.AddressType_WITNESS_PUBKEY_HASH:
3✔
1637
                addr, err = r.server.cc.Wallet.NewAddress(
3✔
1638
                        lnwallet.WitnessPubKey, false, account,
3✔
1639
                )
3✔
1640
                if err != nil {
3✔
1641
                        return nil, err
×
1642
                }
×
1643

1644
        case lnrpc.AddressType_NESTED_PUBKEY_HASH:
3✔
1645
                addr, err = r.server.cc.Wallet.NewAddress(
3✔
1646
                        lnwallet.NestedWitnessPubKey, false, account,
3✔
1647
                )
3✔
1648
                if err != nil {
3✔
1649
                        return nil, err
×
1650
                }
×
1651

1652
        case lnrpc.AddressType_TAPROOT_PUBKEY:
3✔
1653
                addr, err = r.server.cc.Wallet.NewAddress(
3✔
1654
                        lnwallet.TaprootPubkey, false, account,
3✔
1655
                )
3✔
1656
                if err != nil {
3✔
1657
                        return nil, err
×
1658
                }
×
1659

1660
        case lnrpc.AddressType_UNUSED_WITNESS_PUBKEY_HASH:
3✔
1661
                addr, err = r.server.cc.Wallet.LastUnusedAddress(
3✔
1662
                        lnwallet.WitnessPubKey, account,
3✔
1663
                )
3✔
1664
                if err != nil {
3✔
1665
                        return nil, err
×
1666
                }
×
1667

1668
        case lnrpc.AddressType_UNUSED_NESTED_PUBKEY_HASH:
×
1669
                addr, err = r.server.cc.Wallet.LastUnusedAddress(
×
1670
                        lnwallet.NestedWitnessPubKey, account,
×
1671
                )
×
1672
                if err != nil {
×
1673
                        return nil, err
×
1674
                }
×
1675

1676
        case lnrpc.AddressType_UNUSED_TAPROOT_PUBKEY:
3✔
1677
                addr, err = r.server.cc.Wallet.LastUnusedAddress(
3✔
1678
                        lnwallet.TaprootPubkey, account,
3✔
1679
                )
3✔
1680
                if err != nil {
3✔
1681
                        return nil, err
×
1682
                }
×
1683

1684
        default:
×
1685
                return nil, fmt.Errorf("unknown address type: %v", in.Type)
×
1686
        }
1687

1688
        rpcsLog.Debugf("[newaddress] account=%v type=%v addr=%v", account,
3✔
1689
                in.Type, addr.String())
3✔
1690
        return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
3✔
1691
}
1692

1693
var (
1694
        // signedMsgPrefix is a special prefix that we'll prepend to any
1695
        // messages we sign/verify. We do this to ensure that we don't
1696
        // accidentally sign a sighash, or other sensitive material. By
1697
        // prepending this fragment, we mind message signing to our particular
1698
        // context.
1699
        signedMsgPrefix = []byte("Lightning Signed Message:")
1700
)
1701

1702
// SignMessage signs a message with the resident node's private key. The
1703
// returned signature string is zbase32 encoded and pubkey recoverable, meaning
1704
// that only the message digest and signature are needed for verification.
1705
func (r *rpcServer) SignMessage(_ context.Context,
1706
        in *lnrpc.SignMessageRequest) (*lnrpc.SignMessageResponse, error) {
3✔
1707

3✔
1708
        if in.Msg == nil {
3✔
1709
                return nil, fmt.Errorf("need a message to sign")
×
1710
        }
×
1711

1712
        in.Msg = append(signedMsgPrefix, in.Msg...)
3✔
1713
        sigBytes, err := r.server.nodeSigner.SignMessageCompact(
3✔
1714
                in.Msg, !in.SingleHash,
3✔
1715
        )
3✔
1716
        if err != nil {
3✔
1717
                return nil, err
×
1718
        }
×
1719

1720
        sig := zbase32.EncodeToString(sigBytes)
3✔
1721
        return &lnrpc.SignMessageResponse{Signature: sig}, nil
3✔
1722
}
1723

1724
// VerifyMessage verifies a signature over a msg. The signature must be zbase32
1725
// encoded and signed by an active node in the resident node's channel
1726
// database. In addition to returning the validity of the signature,
1727
// VerifyMessage also returns the recovered pubkey from the signature.
1728
func (r *rpcServer) VerifyMessage(ctx context.Context,
1729
        in *lnrpc.VerifyMessageRequest) (*lnrpc.VerifyMessageResponse, error) {
3✔
1730

3✔
1731
        if in.Msg == nil {
3✔
1732
                return nil, fmt.Errorf("need a message to verify")
×
1733
        }
×
1734

1735
        // The signature should be zbase32 encoded
1736
        sig, err := zbase32.DecodeString(in.Signature)
3✔
1737
        if err != nil {
3✔
1738
                return nil, fmt.Errorf("failed to decode signature: %w", err)
×
1739
        }
×
1740

1741
        // The signature is over the double-sha256 hash of the message.
1742
        in.Msg = append(signedMsgPrefix, in.Msg...)
3✔
1743
        digest := chainhash.DoubleHashB(in.Msg)
3✔
1744

3✔
1745
        // RecoverCompact both recovers the pubkey and validates the signature.
3✔
1746
        pubKey, _, err := ecdsa.RecoverCompact(sig, digest)
3✔
1747
        if err != nil {
3✔
1748
                return &lnrpc.VerifyMessageResponse{Valid: false}, nil
×
1749
        }
×
1750
        pubKeyHex := hex.EncodeToString(pubKey.SerializeCompressed())
3✔
1751

3✔
1752
        var pub [33]byte
3✔
1753
        copy(pub[:], pubKey.SerializeCompressed())
3✔
1754

3✔
1755
        // Query the channel graph to ensure a node in the network with active
3✔
1756
        // channels signed the message.
3✔
1757
        //
3✔
1758
        // TODO(phlip9): Require valid nodes to have capital in active channels.
3✔
1759
        graph := r.server.graphDB
3✔
1760
        _, active, err := graph.HasLightningNode(pub)
3✔
1761
        if err != nil {
3✔
1762
                return nil, fmt.Errorf("failed to query graph: %w", err)
×
1763
        }
×
1764

1765
        return &lnrpc.VerifyMessageResponse{
3✔
1766
                Valid:  active,
3✔
1767
                Pubkey: pubKeyHex,
3✔
1768
        }, nil
3✔
1769
}
1770

1771
// ConnectPeer attempts to establish a connection to a remote peer.
1772
func (r *rpcServer) ConnectPeer(ctx context.Context,
1773
        in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) {
3✔
1774

3✔
1775
        // The server hasn't yet started, so it won't be able to service any of
3✔
1776
        // our requests, so we'll bail early here.
3✔
1777
        if !r.server.Started() {
3✔
1778
                return nil, ErrServerNotActive
×
1779
        }
×
1780

1781
        if in.Addr == nil {
3✔
1782
                return nil, fmt.Errorf("need: lnc pubkeyhash@hostname")
×
1783
        }
×
1784

1785
        pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey)
3✔
1786
        if err != nil {
3✔
1787
                return nil, err
×
1788
        }
×
1789
        pubKey, err := btcec.ParsePubKey(pubkeyHex)
3✔
1790
        if err != nil {
3✔
1791
                return nil, err
×
1792
        }
×
1793

1794
        // Connections to ourselves are disallowed for obvious reasons.
1795
        if pubKey.IsEqual(r.server.identityECDH.PubKey()) {
3✔
1796
                return nil, fmt.Errorf("cannot make connection to self")
×
1797
        }
×
1798

1799
        addr, err := parseAddr(in.Addr.Host, r.cfg.net)
3✔
1800
        if err != nil {
3✔
1801
                return nil, err
×
1802
        }
×
1803

1804
        peerAddr := &lnwire.NetAddress{
3✔
1805
                IdentityKey: pubKey,
3✔
1806
                Address:     addr,
3✔
1807
                ChainNet:    r.cfg.ActiveNetParams.Net,
3✔
1808
        }
3✔
1809

3✔
1810
        rpcsLog.Debugf("[connectpeer] requested connection to %x@%s",
3✔
1811
                peerAddr.IdentityKey.SerializeCompressed(), peerAddr.Address)
3✔
1812

3✔
1813
        // By default, we will use the global connection timeout value.
3✔
1814
        timeout := r.cfg.ConnectionTimeout
3✔
1815

3✔
1816
        // Check if the connection timeout is set. If set, we will use it in our
3✔
1817
        // request.
3✔
1818
        if in.Timeout != 0 {
6✔
1819
                timeout = time.Duration(in.Timeout) * time.Second
3✔
1820
                rpcsLog.Debugf("[connectpeer] connection timeout is set to %v",
3✔
1821
                        timeout)
3✔
1822
        }
3✔
1823

1824
        if err := r.server.ConnectToPeer(
3✔
1825
                peerAddr, in.Perm, timeout,
3✔
1826
        ); err != nil {
6✔
1827
                rpcsLog.Errorf("[connectpeer]: error connecting to peer: %v",
3✔
1828
                        err)
3✔
1829
                return nil, err
3✔
1830
        }
3✔
1831

1832
        rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
3✔
1833

3✔
1834
        return &lnrpc.ConnectPeerResponse{
3✔
1835
                Status: fmt.Sprintf("connection to %v initiated",
3✔
1836
                        peerAddr.String()),
3✔
1837
        }, nil
3✔
1838
}
1839

1840
// DisconnectPeer attempts to disconnect one peer from another identified by a
1841
// given pubKey. In the case that we currently have a pending or active channel
1842
// with the target peer, this action will be disallowed.
1843
func (r *rpcServer) DisconnectPeer(ctx context.Context,
1844
        in *lnrpc.DisconnectPeerRequest) (*lnrpc.DisconnectPeerResponse, error) {
3✔
1845

3✔
1846
        rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey)
3✔
1847

3✔
1848
        if !r.server.Started() {
3✔
1849
                return nil, ErrServerNotActive
×
1850
        }
×
1851

1852
        // First we'll validate the string passed in within the request to
1853
        // ensure that it's a valid hex-string, and also a valid compressed
1854
        // public key.
1855
        pubKeyBytes, err := hex.DecodeString(in.PubKey)
3✔
1856
        if err != nil {
3✔
1857
                return nil, fmt.Errorf("unable to decode pubkey bytes: %w", err)
×
1858
        }
×
1859
        peerPubKey, err := btcec.ParsePubKey(pubKeyBytes)
3✔
1860
        if err != nil {
3✔
1861
                return nil, fmt.Errorf("unable to parse pubkey: %w", err)
×
1862
        }
×
1863

1864
        // Next, we'll fetch the pending/active channels we have with a
1865
        // particular peer.
1866
        nodeChannels, err := r.server.chanStateDB.FetchOpenChannels(peerPubKey)
3✔
1867
        if err != nil {
3✔
1868
                return nil, fmt.Errorf("unable to fetch channels for peer: %w",
×
1869
                        err)
×
1870
        }
×
1871

1872
        // In order to avoid erroneously disconnecting from a peer that we have
1873
        // an active channel with, if we have any channels active with this
1874
        // peer, then we'll disallow disconnecting from them.
1875
        if len(nodeChannels) != 0 {
6✔
1876
                // If we are not in a dev environment or the configed dev value
3✔
1877
                // `unsafedisconnect` is false, we return an error since there
3✔
1878
                // are active channels.
3✔
1879
                if !r.cfg.Dev.GetUnsafeDisconnect() {
3✔
1880
                        return nil, fmt.Errorf("cannot disconnect from "+
×
1881
                                "peer(%x), still has %d active channels",
×
1882
                                pubKeyBytes, len(nodeChannels))
×
1883
                }
×
1884

1885
                // We are in a dev environment, print a warning log and
1886
                // disconnect.
1887
                rpcsLog.Warnf("UnsafeDisconnect mode, disconnecting from "+
3✔
1888
                        "peer(%x) while there are %d active channels",
3✔
1889
                        pubKeyBytes, len(nodeChannels))
3✔
1890
        }
1891

1892
        // With all initial validation complete, we'll now request that the
1893
        // server disconnects from the peer.
1894
        err = r.server.DisconnectPeer(peerPubKey)
3✔
1895
        if err != nil {
3✔
1896
                return nil, fmt.Errorf("unable to disconnect peer: %w", err)
×
1897
        }
×
1898

1899
        return &lnrpc.DisconnectPeerResponse{
3✔
1900
                Status: "disconnect initiated",
3✔
1901
        }, nil
3✔
1902
}
1903

1904
// newFundingShimAssembler returns a new fully populated
1905
// chanfunding.CannedAssembler using a FundingShim obtained from an RPC caller.
1906
func newFundingShimAssembler(chanPointShim *lnrpc.ChanPointShim, initiator bool,
1907
        keyRing keychain.KeyRing) (chanfunding.Assembler, error) {
3✔
1908

3✔
1909
        // Perform some basic sanity checks to ensure that all the expected
3✔
1910
        // fields are populated.
3✔
1911
        switch {
3✔
1912
        case chanPointShim.RemoteKey == nil:
×
1913
                return nil, fmt.Errorf("remote key not set")
×
1914

1915
        case chanPointShim.LocalKey == nil:
×
1916
                return nil, fmt.Errorf("local key desc not set")
×
1917

1918
        case chanPointShim.LocalKey.RawKeyBytes == nil:
×
1919
                return nil, fmt.Errorf("local raw key bytes not set")
×
1920

1921
        case chanPointShim.LocalKey.KeyLoc == nil:
×
1922
                return nil, fmt.Errorf("local key loc not set")
×
1923

1924
        case chanPointShim.ChanPoint == nil:
×
1925
                return nil, fmt.Errorf("chan point not set")
×
1926

1927
        case len(chanPointShim.PendingChanId) != 32:
×
1928
                return nil, fmt.Errorf("pending chan ID not set")
×
1929
        }
1930

1931
        // First, we'll map the RPC's channel point to one we can actually use.
1932
        index := chanPointShim.ChanPoint.OutputIndex
3✔
1933
        txid, err := lnrpc.GetChanPointFundingTxid(chanPointShim.ChanPoint)
3✔
1934
        if err != nil {
3✔
1935
                return nil, err
×
1936
        }
×
1937
        chanPoint := wire.NewOutPoint(txid, index)
3✔
1938

3✔
1939
        // Next we'll parse out the remote party's funding key, as well as our
3✔
1940
        // full key descriptor.
3✔
1941
        remoteKey, err := btcec.ParsePubKey(chanPointShim.RemoteKey)
3✔
1942
        if err != nil {
3✔
1943
                return nil, err
×
1944
        }
×
1945

1946
        shimKeyDesc := chanPointShim.LocalKey
3✔
1947
        localKey, err := btcec.ParsePubKey(shimKeyDesc.RawKeyBytes)
3✔
1948
        if err != nil {
3✔
1949
                return nil, err
×
1950
        }
×
1951
        localKeyDesc := keychain.KeyDescriptor{
3✔
1952
                PubKey: localKey,
3✔
1953
                KeyLocator: keychain.KeyLocator{
3✔
1954
                        Family: keychain.KeyFamily(
3✔
1955
                                shimKeyDesc.KeyLoc.KeyFamily,
3✔
1956
                        ),
3✔
1957
                        Index: uint32(shimKeyDesc.KeyLoc.KeyIndex),
3✔
1958
                },
3✔
1959
        }
3✔
1960

3✔
1961
        // Verify that if we re-derive this key according to the passed
3✔
1962
        // KeyLocator, that we get the exact same key back. Otherwise, we may
3✔
1963
        // end up in a situation where we aren't able to actually sign for this
3✔
1964
        // newly created channel.
3✔
1965
        derivedKey, err := keyRing.DeriveKey(localKeyDesc.KeyLocator)
3✔
1966
        if err != nil {
3✔
1967
                return nil, err
×
1968
        }
×
1969
        if !derivedKey.PubKey.IsEqual(localKey) {
3✔
1970
                return nil, fmt.Errorf("KeyLocator does not match attached " +
×
1971
                        "raw pubkey")
×
1972
        }
×
1973

1974
        // With all the parts assembled, we can now make the canned assembler
1975
        // to pass into the wallet.
1976
        //
1977
        // TODO(roasbeef): update to support musig2
1978
        return chanfunding.NewCannedAssembler(
3✔
1979
                chanPointShim.ThawHeight, *chanPoint,
3✔
1980
                btcutil.Amount(chanPointShim.Amt), &localKeyDesc,
3✔
1981
                remoteKey, initiator, chanPointShim.Musig2,
3✔
1982
        ), nil
3✔
1983
}
1984

1985
// newPsbtAssembler returns a new fully populated
1986
// chanfunding.PsbtAssembler using a FundingShim obtained from an RPC caller.
1987
func newPsbtAssembler(req *lnrpc.OpenChannelRequest,
1988
        psbtShim *lnrpc.PsbtShim, netParams *chaincfg.Params) (
1989
        chanfunding.Assembler, error) {
3✔
1990

3✔
1991
        var (
3✔
1992
                packet *psbt.Packet
3✔
1993
                err    error
3✔
1994
        )
3✔
1995

3✔
1996
        // Perform some basic sanity checks to ensure that all the expected
3✔
1997
        // fields are populated and none of the incompatible fields are.
3✔
1998
        if len(psbtShim.PendingChanId) != 32 {
3✔
1999
                return nil, fmt.Errorf("pending chan ID not set")
×
2000
        }
×
2001
        if req.SatPerByte != 0 || req.SatPerVbyte != 0 || req.TargetConf != 0 { // nolint:staticcheck
3✔
2002
                return nil, fmt.Errorf("specifying fee estimation parameters " +
×
2003
                        "is not supported for PSBT funding")
×
2004
        }
×
2005

2006
        // The base PSBT is optional. But if it's set, it has to be a valid,
2007
        // binary serialized PSBT.
2008
        if len(psbtShim.BasePsbt) > 0 {
6✔
2009
                packet, err = psbt.NewFromRawBytes(
3✔
2010
                        bytes.NewReader(psbtShim.BasePsbt), false,
3✔
2011
                )
3✔
2012
                if err != nil {
3✔
2013
                        return nil, fmt.Errorf("error parsing base PSBT: %w",
×
2014
                                err)
×
2015
                }
×
2016
        }
2017

2018
        // With all the parts assembled, we can now make the canned assembler
2019
        // to pass into the wallet.
2020
        return chanfunding.NewPsbtAssembler(
3✔
2021
                btcutil.Amount(req.LocalFundingAmount), packet, netParams,
3✔
2022
                !psbtShim.NoPublish,
3✔
2023
        ), nil
3✔
2024
}
2025

2026
// canOpenChannel returns an error if the necessary subsystems for channel
2027
// funding are not ready.
2028
func (r *rpcServer) canOpenChannel() error {
3✔
2029
        // We can't open a channel until the main server has started.
3✔
2030
        if !r.server.Started() {
3✔
2031
                return ErrServerNotActive
×
2032
        }
×
2033

2034
        // Creation of channels before the wallet syncs up is currently
2035
        // disallowed.
2036
        isSynced, _, err := r.server.cc.Wallet.IsSynced()
3✔
2037
        if err != nil {
3✔
2038
                return err
×
2039
        }
×
2040
        if !isSynced {
3✔
2041
                return errors.New("channels cannot be created before the " +
×
2042
                        "wallet is fully synced")
×
2043
        }
×
2044

2045
        return nil
3✔
2046
}
2047

2048
// parseOpenChannelReq parses an OpenChannelRequest message into an InitFundingMsg
2049
// struct. The logic is abstracted so that it can be shared between OpenChannel
2050
// and OpenChannelSync.
2051
func (r *rpcServer) parseOpenChannelReq(in *lnrpc.OpenChannelRequest,
2052
        isSync bool) (*funding.InitFundingMsg, error) {
3✔
2053

3✔
2054
        rpcsLog.Debugf("[openchannel] request to NodeKey(%x) "+
3✔
2055
                "allocation(us=%v, them=%v)", in.NodePubkey,
3✔
2056
                in.LocalFundingAmount, in.PushSat)
3✔
2057

3✔
2058
        localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
3✔
2059
        remoteInitialBalance := btcutil.Amount(in.PushSat)
3✔
2060

3✔
2061
        // If we are not committing the maximum viable balance towards a channel
3✔
2062
        // then the local funding amount must be specified. In case FundMax is
3✔
2063
        // set the funding amount is specified as the interval between minimum
3✔
2064
        // funding amount and by the configured maximum channel size.
3✔
2065
        if !in.FundMax && localFundingAmt == 0 {
3✔
2066
                return nil, fmt.Errorf("local funding amount must be non-zero")
×
2067
        }
×
2068

2069
        // Ensure that the initial balance of the remote party (if pushing
2070
        // satoshis) does not exceed the amount the local party has requested
2071
        // for funding. This is only checked if we are not committing the
2072
        // maximum viable amount towards the channel balance. If we do commit
2073
        // the maximum then the remote balance is checked in a dedicated FundMax
2074
        // check.
2075
        if !in.FundMax && remoteInitialBalance >= localFundingAmt {
3✔
2076
                return nil, fmt.Errorf("amount pushed to remote peer for " +
×
2077
                        "initial state must be below the local funding amount")
×
2078
        }
×
2079

2080
        // We either allow the fundmax or the psbt flow hence we return an error
2081
        // if both are set.
2082
        if in.FundingShim != nil && in.FundMax {
3✔
2083
                return nil, fmt.Errorf("cannot provide a psbt funding shim " +
×
2084
                        "while committing the maximum wallet balance towards " +
×
2085
                        "the channel opening")
×
2086
        }
×
2087

2088
        // If the FundMax flag is set, ensure that the acceptable minimum local
2089
        // amount adheres to the amount to be pushed to the remote, and to
2090
        // current rules, while also respecting the settings for the maximum
2091
        // channel size.
2092
        var minFundAmt, fundUpToMaxAmt btcutil.Amount
3✔
2093
        if in.FundMax {
6✔
2094
                // We assume the configured maximum channel size to be the upper
3✔
2095
                // bound of our "maxed" out funding attempt.
3✔
2096
                fundUpToMaxAmt = btcutil.Amount(r.cfg.MaxChanSize)
3✔
2097

3✔
2098
                // Since the standard non-fundmax flow requires the minimum
3✔
2099
                // funding amount to be at least in the amount of the initial
3✔
2100
                // remote balance(push amount) we need to adjust the minimum
3✔
2101
                // funding amount accordingly. We initially assume the minimum
3✔
2102
                // allowed channel size as minimum funding amount.
3✔
2103
                minFundAmt = funding.MinChanFundingSize
3✔
2104

3✔
2105
                // If minFundAmt is less than the initial remote balance we
3✔
2106
                // simply assign the initial remote balance to minFundAmt in
3✔
2107
                // order to fullfil the criterion. Whether or not this so
3✔
2108
                // determined minimum amount is actually available is
3✔
2109
                // ascertained downstream in the lnwallet's reservation
3✔
2110
                // workflow.
3✔
2111
                if remoteInitialBalance >= minFundAmt {
6✔
2112
                        minFundAmt = remoteInitialBalance
3✔
2113
                }
3✔
2114
        }
2115

2116
        minHtlcIn := lnwire.MilliSatoshi(in.MinHtlcMsat)
3✔
2117
        remoteCsvDelay := uint16(in.RemoteCsvDelay)
3✔
2118
        maxValue := lnwire.MilliSatoshi(in.RemoteMaxValueInFlightMsat)
3✔
2119
        maxHtlcs := uint16(in.RemoteMaxHtlcs)
3✔
2120
        remoteChanReserve := btcutil.Amount(in.RemoteChanReserveSat)
3✔
2121

3✔
2122
        globalFeatureSet := r.server.featureMgr.Get(feature.SetNodeAnn)
3✔
2123

3✔
2124
        // Determine if the user provided channel fees
3✔
2125
        // and if so pass them on to the funding workflow.
3✔
2126
        var channelBaseFee, channelFeeRate *uint64
3✔
2127
        if in.UseBaseFee {
6✔
2128
                channelBaseFee = &in.BaseFee
3✔
2129
        }
3✔
2130
        if in.UseFeeRate {
6✔
2131
                channelFeeRate = &in.FeeRate
3✔
2132
        }
3✔
2133

2134
        // Ensure that the remote channel reserve does not exceed 20% of the
2135
        // channel capacity.
2136
        if !in.FundMax && remoteChanReserve >= localFundingAmt/5 {
3✔
2137
                return nil, fmt.Errorf("remote channel reserve must be less " +
×
2138
                        "than the %%20 of the channel capacity")
×
2139
        }
×
2140

2141
        // Ensure that the user doesn't exceed the current soft-limit for
2142
        // channel size. If the funding amount is above the soft-limit, then
2143
        // we'll reject the request.
2144
        // If the FundMax flag is set the local amount is determined downstream
2145
        // in the wallet hence we do not check it here against the maximum
2146
        // funding amount. Only if the localFundingAmt is specified we can check
2147
        // if it exceeds the maximum funding amount.
2148
        wumboEnabled := globalFeatureSet.HasFeature(
3✔
2149
                lnwire.WumboChannelsOptional,
3✔
2150
        )
3✔
2151
        if !in.FundMax && !wumboEnabled && localFundingAmt > MaxFundingAmount {
3✔
2152
                return nil, fmt.Errorf("funding amount is too large, the max "+
×
2153
                        "channel size is: %v", MaxFundingAmount)
×
2154
        }
×
2155

2156
        // Restrict the size of the channel we'll actually open. At a later
2157
        // level, we'll ensure that the output we create, after accounting for
2158
        // fees, does not leave a dust output. In case of the FundMax flow
2159
        // dedicated checks ensure that the lower boundary of the channel size
2160
        // is at least in the amount of MinChanFundingSize or potentially higher
2161
        // if a remote balance is specified.
2162
        if !in.FundMax && localFundingAmt < funding.MinChanFundingSize {
6✔
2163
                return nil, fmt.Errorf("channel is too small, the minimum "+
3✔
2164
                        "channel size is: %v SAT", int64(funding.MinChanFundingSize))
3✔
2165
        }
3✔
2166

2167
        // Prevent users from submitting a max-htlc value that would exceed the
2168
        // protocol maximum.
2169
        if maxHtlcs > input.MaxHTLCNumber/2 {
3✔
2170
                return nil, fmt.Errorf("remote-max-htlcs (%v) cannot be "+
×
2171
                        "greater than %v", maxHtlcs, input.MaxHTLCNumber/2)
×
2172
        }
×
2173

2174
        // Then, we'll extract the minimum number of confirmations that each
2175
        // output we use to fund the channel's funding transaction should
2176
        // satisfy.
2177
        minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed)
3✔
2178
        if err != nil {
3✔
2179
                return nil, err
×
2180
        }
×
2181

2182
        // TODO(roasbeef): also return channel ID?
2183

2184
        var nodePubKey *btcec.PublicKey
3✔
2185

3✔
2186
        // Parse the remote pubkey the NodePubkey field of the request. If it's
3✔
2187
        // not present, we'll fallback to the deprecated version that parses the
3✔
2188
        // key from a hex string if this is for REST for backwards compatibility.
3✔
2189
        switch {
3✔
2190
        // Parse the raw bytes of the node key into a pubkey object so we can
2191
        // easily manipulate it.
2192
        case len(in.NodePubkey) > 0:
3✔
2193
                nodePubKey, err = btcec.ParsePubKey(in.NodePubkey)
3✔
2194
                if err != nil {
3✔
2195
                        return nil, err
×
2196
                }
×
2197

2198
        // Decode the provided target node's public key, parsing it into a pub
2199
        // key object. For all sync call, byte slices are expected to be encoded
2200
        // as hex strings.
2201
        case isSync:
×
2202
                keyBytes, err := hex.DecodeString(in.NodePubkeyString) // nolint:staticcheck
×
2203
                if err != nil {
×
2204
                        return nil, err
×
2205
                }
×
2206

2207
                nodePubKey, err = btcec.ParsePubKey(keyBytes)
×
2208
                if err != nil {
×
2209
                        return nil, err
×
2210
                }
×
2211

2212
        default:
×
2213
                return nil, fmt.Errorf("NodePubkey is not set")
×
2214
        }
2215

2216
        // Making a channel to ourselves wouldn't be of any use, so we
2217
        // explicitly disallow them.
2218
        if nodePubKey.IsEqual(r.server.identityECDH.PubKey()) {
3✔
2219
                return nil, fmt.Errorf("cannot open channel to self")
×
2220
        }
×
2221

2222
        // NOTE: We also need to do the fee rate calculation for the psbt
2223
        // funding flow because the `batchfund` depends on it.
2224
        targetConf := maybeUseDefaultConf(
3✔
2225
                in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf),
3✔
2226
        )
3✔
2227

3✔
2228
        // Calculate an appropriate fee rate for this transaction.
3✔
2229
        feeRate, err := lnrpc.CalculateFeeRate(
3✔
2230
                uint64(in.SatPerByte), in.SatPerVbyte,
3✔
2231
                targetConf, r.server.cc.FeeEstimator,
3✔
2232
        )
3✔
2233
        if err != nil {
3✔
2234
                return nil, err
×
2235
        }
×
2236

2237
        rpcsLog.Debugf("[openchannel]: using fee of %v sat/kw for "+
3✔
2238
                "funding tx", int64(feeRate))
3✔
2239

3✔
2240
        script, err := chancloser.ParseUpfrontShutdownAddress(
3✔
2241
                in.CloseAddress, r.cfg.ActiveNetParams.Params,
3✔
2242
        )
3✔
2243
        if err != nil {
3✔
2244
                return nil, fmt.Errorf("error parsing upfront shutdown: %w",
×
2245
                        err)
×
2246
        }
×
2247

2248
        var channelType *lnwire.ChannelType
3✔
2249
        switch in.CommitmentType {
3✔
2250
        case lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE:
3✔
2251
                if in.ZeroConf {
3✔
2252
                        return nil, fmt.Errorf("use anchors for zero-conf")
×
2253
                }
×
2254

2255
        case lnrpc.CommitmentType_LEGACY:
3✔
2256
                channelType = new(lnwire.ChannelType)
3✔
2257
                *channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector())
3✔
2258

2259
        case lnrpc.CommitmentType_STATIC_REMOTE_KEY:
3✔
2260
                channelType = new(lnwire.ChannelType)
3✔
2261
                *channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector(
3✔
2262
                        lnwire.StaticRemoteKeyRequired,
3✔
2263
                ))
3✔
2264

2265
        case lnrpc.CommitmentType_ANCHORS:
3✔
2266
                channelType = new(lnwire.ChannelType)
3✔
2267
                fv := lnwire.NewRawFeatureVector(
3✔
2268
                        lnwire.StaticRemoteKeyRequired,
3✔
2269
                        lnwire.AnchorsZeroFeeHtlcTxRequired,
3✔
2270
                )
3✔
2271

3✔
2272
                if in.ZeroConf {
6✔
2273
                        fv.Set(lnwire.ZeroConfRequired)
3✔
2274
                }
3✔
2275

2276
                if in.ScidAlias {
6✔
2277
                        fv.Set(lnwire.ScidAliasRequired)
3✔
2278
                }
3✔
2279

2280
                *channelType = lnwire.ChannelType(*fv)
3✔
2281

2282
        case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
3✔
2283
                channelType = new(lnwire.ChannelType)
3✔
2284
                fv := lnwire.NewRawFeatureVector(
3✔
2285
                        lnwire.StaticRemoteKeyRequired,
3✔
2286
                        lnwire.AnchorsZeroFeeHtlcTxRequired,
3✔
2287
                        lnwire.ScriptEnforcedLeaseRequired,
3✔
2288
                )
3✔
2289

3✔
2290
                if in.ZeroConf {
6✔
2291
                        fv.Set(lnwire.ZeroConfRequired)
3✔
2292
                }
3✔
2293

2294
                if in.ScidAlias {
3✔
2295
                        fv.Set(lnwire.ScidAliasRequired)
×
2296
                }
×
2297

2298
                *channelType = lnwire.ChannelType(*fv)
3✔
2299

2300
        case lnrpc.CommitmentType_SIMPLE_TAPROOT:
3✔
2301
                // If the taproot channel type is being set, then the channel
3✔
2302
                // MUST be private (unadvertised) for now.
3✔
2303
                if !in.Private {
3✔
2304
                        return nil, fmt.Errorf("taproot channels must be " +
×
2305
                                "private")
×
2306
                }
×
2307

2308
                channelType = new(lnwire.ChannelType)
3✔
2309
                fv := lnwire.NewRawFeatureVector(
3✔
2310
                        lnwire.SimpleTaprootChannelsRequiredStaging,
3✔
2311
                )
3✔
2312

3✔
2313
                // TODO(roasbeef): no need for the rest as they're now
3✔
2314
                // implicit?
3✔
2315

3✔
2316
                if in.ZeroConf {
6✔
2317
                        fv.Set(lnwire.ZeroConfRequired)
3✔
2318
                }
3✔
2319

2320
                if in.ScidAlias {
3✔
2321
                        fv.Set(lnwire.ScidAliasRequired)
×
2322
                }
×
2323

2324
                *channelType = lnwire.ChannelType(*fv)
3✔
2325

2326
        case lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY:
×
2327
                // If the taproot overlay channel type is being set, then the
×
2328
                // channel MUST be private.
×
2329
                if !in.Private {
×
2330
                        return nil, fmt.Errorf("taproot overlay channels " +
×
2331
                                "must be private")
×
2332
                }
×
2333

2334
                channelType = new(lnwire.ChannelType)
×
2335
                fv := lnwire.NewRawFeatureVector(
×
2336
                        lnwire.SimpleTaprootOverlayChansRequired,
×
2337
                )
×
2338

×
2339
                if in.ZeroConf {
×
2340
                        fv.Set(lnwire.ZeroConfRequired)
×
2341
                }
×
2342

2343
                if in.ScidAlias {
×
2344
                        fv.Set(lnwire.ScidAliasRequired)
×
2345
                }
×
2346

2347
                *channelType = lnwire.ChannelType(*fv)
×
2348

2349
        default:
×
2350
                return nil, fmt.Errorf("unhandled request channel type %v",
×
2351
                        in.CommitmentType)
×
2352
        }
2353

2354
        // We limit the channel memo to be 500 characters long. This enforces
2355
        // a reasonable upper bound on storage consumption. This also mimics
2356
        // the length limit for the label of a TX.
2357
        const maxMemoLength = 500
3✔
2358
        if len(in.Memo) > maxMemoLength {
6✔
2359
                return nil, fmt.Errorf("provided memo (%s) is of length %d, "+
3✔
2360
                        "exceeds %d", in.Memo, len(in.Memo), maxMemoLength)
3✔
2361
        }
3✔
2362

2363
        // Check, if manually selected outpoints are present to fund a channel.
2364
        var outpoints []wire.OutPoint
3✔
2365
        if len(in.Outpoints) > 0 {
6✔
2366
                outpoints, err = toWireOutpoints(in.Outpoints)
3✔
2367
                if err != nil {
3✔
2368
                        return nil, fmt.Errorf("can't create outpoints %w", err)
×
2369
                }
×
2370
        }
2371

2372
        // Instruct the server to trigger the necessary events to attempt to
2373
        // open a new channel. A stream is returned in place, this stream will
2374
        // be used to consume updates of the state of the pending channel.
2375
        return &funding.InitFundingMsg{
3✔
2376
                TargetPubkey:    nodePubKey,
3✔
2377
                ChainHash:       *r.cfg.ActiveNetParams.GenesisHash,
3✔
2378
                LocalFundingAmt: localFundingAmt,
3✔
2379
                BaseFee:         channelBaseFee,
3✔
2380
                FeeRate:         channelFeeRate,
3✔
2381
                PushAmt: lnwire.NewMSatFromSatoshis(
3✔
2382
                        remoteInitialBalance,
3✔
2383
                ),
3✔
2384
                MinHtlcIn:         minHtlcIn,
3✔
2385
                FundingFeePerKw:   feeRate,
3✔
2386
                Private:           in.Private,
3✔
2387
                RemoteCsvDelay:    remoteCsvDelay,
3✔
2388
                RemoteChanReserve: remoteChanReserve,
3✔
2389
                MinConfs:          minConfs,
3✔
2390
                ShutdownScript:    script,
3✔
2391
                MaxValueInFlight:  maxValue,
3✔
2392
                MaxHtlcs:          maxHtlcs,
3✔
2393
                MaxLocalCsv:       uint16(in.MaxLocalCsv),
3✔
2394
                ChannelType:       channelType,
3✔
2395
                FundUpToMaxAmt:    fundUpToMaxAmt,
3✔
2396
                MinFundAmt:        minFundAmt,
3✔
2397
                Memo:              []byte(in.Memo),
3✔
2398
                Outpoints:         outpoints,
3✔
2399
        }, nil
3✔
2400
}
2401

2402
// toWireOutpoints converts a list of outpoints from the rpc format to the wire
2403
// format.
2404
func toWireOutpoints(outpoints []*lnrpc.OutPoint) ([]wire.OutPoint, error) {
3✔
2405
        var wireOutpoints []wire.OutPoint
3✔
2406
        for _, outpoint := range outpoints {
6✔
2407
                hash, err := chainhash.NewHashFromStr(outpoint.TxidStr)
3✔
2408
                if err != nil {
3✔
2409
                        return nil, fmt.Errorf("cannot create chainhash")
×
2410
                }
×
2411

2412
                wireOutpoint := wire.NewOutPoint(
3✔
2413
                        hash, outpoint.OutputIndex,
3✔
2414
                )
3✔
2415
                wireOutpoints = append(wireOutpoints, *wireOutpoint)
3✔
2416
        }
2417

2418
        return wireOutpoints, nil
3✔
2419
}
2420

2421
// OpenChannel attempts to open a singly funded channel specified in the
2422
// request to a remote peer.
2423
func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
2424
        updateStream lnrpc.Lightning_OpenChannelServer) error {
3✔
2425

3✔
2426
        if err := r.canOpenChannel(); err != nil {
3✔
2427
                return err
×
2428
        }
×
2429

2430
        req, err := r.parseOpenChannelReq(in, false)
3✔
2431
        if err != nil {
6✔
2432
                return err
3✔
2433
        }
3✔
2434

2435
        // If the user has provided a shim, then we'll now augment the based
2436
        // open channel request with this additional logic.
2437
        if in.FundingShim != nil {
6✔
2438
                switch {
3✔
2439
                // If we have a chan point shim, then this means the funding
2440
                // transaction was crafted externally. In this case we only
2441
                // need to hand a channel point down into the wallet.
2442
                case in.FundingShim.GetChanPointShim() != nil:
3✔
2443
                        chanPointShim := in.FundingShim.GetChanPointShim()
3✔
2444

3✔
2445
                        // Map the channel point shim into a new
3✔
2446
                        // chanfunding.CannedAssembler that the wallet will use
3✔
2447
                        // to obtain the channel point details.
3✔
2448
                        copy(req.PendingChanID[:], chanPointShim.PendingChanId)
3✔
2449
                        req.ChanFunder, err = newFundingShimAssembler(
3✔
2450
                                chanPointShim, true, r.server.cc.KeyRing,
3✔
2451
                        )
3✔
2452
                        if err != nil {
3✔
2453
                                return err
×
2454
                        }
×
2455

2456
                // If we have a PSBT shim, then this means the funding
2457
                // transaction will be crafted outside of the wallet, once the
2458
                // funding multisig output script is known. We'll create an
2459
                // intent that will supervise the multi-step process.
2460
                case in.FundingShim.GetPsbtShim() != nil:
3✔
2461
                        psbtShim := in.FundingShim.GetPsbtShim()
3✔
2462

3✔
2463
                        // Instruct the wallet to use the new
3✔
2464
                        // chanfunding.PsbtAssembler to construct the funding
3✔
2465
                        // transaction.
3✔
2466
                        copy(req.PendingChanID[:], psbtShim.PendingChanId)
3✔
2467

3✔
2468
                        // NOTE: For the PSBT case we do also allow unconfirmed
3✔
2469
                        // utxos to fund the psbt transaction because we make
3✔
2470
                        // sure we only use stable utxos.
3✔
2471
                        req.ChanFunder, err = newPsbtAssembler(
3✔
2472
                                in, psbtShim,
3✔
2473
                                &r.server.cc.Wallet.Cfg.NetParams,
3✔
2474
                        )
3✔
2475
                        if err != nil {
3✔
2476
                                return err
×
2477
                        }
×
2478
                }
2479
        }
2480

2481
        updateChan, errChan := r.server.OpenChannel(req)
3✔
2482

3✔
2483
        var outpoint wire.OutPoint
3✔
2484
out:
3✔
2485
        for {
6✔
2486
                select {
3✔
2487
                case err := <-errChan:
3✔
2488
                        rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
3✔
2489
                                req.TargetPubkey.SerializeCompressed(), err)
3✔
2490
                        return err
3✔
2491
                case fundingUpdate := <-updateChan:
3✔
2492
                        rpcsLog.Tracef("[openchannel] sending update: %v",
3✔
2493
                                fundingUpdate)
3✔
2494
                        if err := updateStream.Send(fundingUpdate); err != nil {
3✔
2495
                                return err
×
2496
                        }
×
2497

2498
                        // If a final channel open update is being sent, then
2499
                        // we can break out of our recv loop as we no longer
2500
                        // need to process any further updates.
2501
                        update, ok := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
3✔
2502
                        if ok {
6✔
2503
                                chanPoint := update.ChanOpen.ChannelPoint
3✔
2504
                                txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
3✔
2505
                                if err != nil {
3✔
2506
                                        return err
×
2507
                                }
×
2508
                                outpoint = wire.OutPoint{
3✔
2509
                                        Hash:  *txid,
3✔
2510
                                        Index: chanPoint.OutputIndex,
3✔
2511
                                }
3✔
2512

3✔
2513
                                break out
3✔
2514
                        }
2515
                case <-r.quit:
3✔
2516
                        return nil
3✔
2517
                }
2518
        }
2519

2520
        rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)",
3✔
2521
                req.TargetPubkey.SerializeCompressed(), outpoint)
3✔
2522
        return nil
3✔
2523
}
2524

2525
// OpenChannelSync is a synchronous version of the OpenChannel RPC call. This
2526
// call is meant to be consumed by clients to the REST proxy. As with all other
2527
// sync calls, all byte slices are instead to be populated as hex encoded
2528
// strings.
2529
func (r *rpcServer) OpenChannelSync(ctx context.Context,
2530
        in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) {
×
2531

×
2532
        if err := r.canOpenChannel(); err != nil {
×
2533
                return nil, err
×
2534
        }
×
2535

2536
        req, err := r.parseOpenChannelReq(in, true)
×
2537
        if err != nil {
×
2538
                return nil, err
×
2539
        }
×
2540

2541
        updateChan, errChan := r.server.OpenChannel(req)
×
2542
        select {
×
2543
        // If an error occurs them immediately return the error to the client.
2544
        case err := <-errChan:
×
2545
                rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
×
2546
                        req.TargetPubkey.SerializeCompressed(), err)
×
2547
                return nil, err
×
2548

2549
        // Otherwise, wait for the first channel update. The first update sent
2550
        // is when the funding transaction is broadcast to the network.
2551
        case fundingUpdate := <-updateChan:
×
2552
                rpcsLog.Tracef("[openchannel] sending update: %v",
×
2553
                        fundingUpdate)
×
2554

×
2555
                // Parse out the txid of the pending funding transaction. The
×
2556
                // sync client can use this to poll against the list of
×
2557
                // PendingChannels.
×
2558
                openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
×
2559
                chanUpdate := openUpdate.ChanPending
×
2560

×
2561
                return &lnrpc.ChannelPoint{
×
2562
                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
×
2563
                                FundingTxidBytes: chanUpdate.Txid,
×
2564
                        },
×
2565
                        OutputIndex: chanUpdate.OutputIndex,
×
2566
                }, nil
×
2567
        case <-r.quit:
×
2568
                return nil, nil
×
2569
        }
2570
}
2571

2572
// BatchOpenChannel attempts to open multiple single-funded channels in a
2573
// single transaction in an atomic way. This means either all channel open
2574
// requests succeed at once or all attempts are aborted if any of them fail.
2575
// This is the safer variant of using PSBTs to manually fund a batch of
2576
// channels through the OpenChannel RPC.
2577
func (r *rpcServer) BatchOpenChannel(ctx context.Context,
2578
        in *lnrpc.BatchOpenChannelRequest) (*lnrpc.BatchOpenChannelResponse,
2579
        error) {
3✔
2580

3✔
2581
        if err := r.canOpenChannel(); err != nil {
3✔
2582
                return nil, err
×
2583
        }
×
2584

2585
        // We need the wallet kit server to do the heavy lifting on the PSBT
2586
        // part. If we didn't rely on re-using the wallet kit server's logic we
2587
        // would need to re-implement everything here. Since we deliver lnd with
2588
        // the wallet kit server enabled by default we can assume it's okay to
2589
        // make this functionality dependent on that server being active.
2590
        var walletKitServer walletrpc.WalletKitServer
3✔
2591
        for _, subServer := range r.subServers {
6✔
2592
                if subServer.Name() == walletrpc.SubServerName {
6✔
2593
                        walletKitServer = subServer.(walletrpc.WalletKitServer)
3✔
2594
                }
3✔
2595
        }
2596
        if walletKitServer == nil {
3✔
2597
                return nil, fmt.Errorf("batch channel open is only possible " +
×
2598
                        "if walletrpc subserver is active")
×
2599
        }
×
2600

2601
        rpcsLog.Debugf("[batchopenchannel] request to open batch of %d "+
3✔
2602
                "channels", len(in.Channels))
3✔
2603

3✔
2604
        // Make sure there is at least one channel to open. We could say we want
3✔
2605
        // at least two channels for a batch. But maybe it's nice if developers
3✔
2606
        // can use the same API for a single channel as well as a batch of
3✔
2607
        // channels.
3✔
2608
        if len(in.Channels) == 0 {
3✔
2609
                return nil, fmt.Errorf("specify at least one channel")
×
2610
        }
×
2611

2612
        // In case we remove a pending channel from the database, we need to set
2613
        // a close height, so we'll just use the current best known height.
2614
        _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
2615
        if err != nil {
3✔
2616
                return nil, fmt.Errorf("error fetching best block: %w", err)
×
2617
        }
×
2618

2619
        // So far everything looks good and we can now start the heavy lifting
2620
        // that's done in the funding package.
2621
        requestParser := func(req *lnrpc.OpenChannelRequest) (
3✔
2622
                *funding.InitFundingMsg, error) {
6✔
2623

3✔
2624
                return r.parseOpenChannelReq(req, false)
3✔
2625
        }
3✔
2626
        channelAbandoner := func(point *wire.OutPoint) error {
3✔
2627
                return r.abandonChan(point, uint32(bestHeight))
×
2628
        }
×
2629
        batcher := funding.NewBatcher(&funding.BatchConfig{
3✔
2630
                RequestParser:    requestParser,
3✔
2631
                ChannelAbandoner: channelAbandoner,
3✔
2632
                ChannelOpener:    r.server.OpenChannel,
3✔
2633
                WalletKitServer:  walletKitServer,
3✔
2634
                Wallet:           r.server.cc.Wallet,
3✔
2635
                NetParams:        &r.server.cc.Wallet.Cfg.NetParams,
3✔
2636
                Quit:             r.quit,
3✔
2637
        })
3✔
2638
        rpcPoints, err := batcher.BatchFund(ctx, in)
3✔
2639
        if err != nil {
6✔
2640
                return nil, fmt.Errorf("batch funding failed: %w", err)
3✔
2641
        }
3✔
2642

2643
        // Now all that's left to do is send back the response with the channel
2644
        // points we created.
2645
        return &lnrpc.BatchOpenChannelResponse{
3✔
2646
                PendingChannels: rpcPoints,
3✔
2647
        }, nil
3✔
2648
}
2649

2650
// CloseChannel attempts to close an active channel identified by its channel
2651
// point. The actions of this method can additionally be augmented to attempt
2652
// a force close after a timeout period in the case of an inactive peer.
2653
func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
2654
        updateStream lnrpc.Lightning_CloseChannelServer) error {
3✔
2655

3✔
2656
        if !r.server.Started() {
3✔
2657
                return ErrServerNotActive
×
2658
        }
×
2659

2660
        // If the user didn't specify a channel point, then we'll reject this
2661
        // request all together.
2662
        if in.GetChannelPoint() == nil {
3✔
2663
                return fmt.Errorf("must specify channel point in close channel")
×
2664
        }
×
2665

2666
        // If force closing a channel, the fee set in the commitment transaction
2667
        // is used.
2668
        if in.Force && (in.SatPerByte != 0 || in.SatPerVbyte != 0 || // nolint:staticcheck
3✔
2669
                in.TargetConf != 0) {
3✔
2670

×
2671
                return fmt.Errorf("force closing a channel uses a pre-defined fee")
×
2672
        }
×
2673

2674
        force := in.Force
3✔
2675
        index := in.ChannelPoint.OutputIndex
3✔
2676
        txid, err := lnrpc.GetChanPointFundingTxid(in.GetChannelPoint())
3✔
2677
        if err != nil {
3✔
2678
                rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
×
2679
                return err
×
2680
        }
×
2681
        chanPoint := wire.NewOutPoint(txid, index)
3✔
2682

3✔
2683
        rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v",
3✔
2684
                chanPoint, force)
3✔
2685

3✔
2686
        var (
3✔
2687
                updateChan chan interface{}
3✔
2688
                errChan    chan error
3✔
2689
        )
3✔
2690

3✔
2691
        // TODO(roasbeef): if force and peer online then don't force?
3✔
2692

3✔
2693
        // First, we'll fetch the channel as is, as we'll need to examine it
3✔
2694
        // regardless of if this is a force close or not.
3✔
2695
        channel, err := r.server.chanStateDB.FetchChannel(*chanPoint)
3✔
2696
        if err != nil {
3✔
2697
                return err
×
2698
        }
×
2699

2700
        // We can't coop or force close restored channels or channels that have
2701
        // experienced local data loss. Normally we would detect this in the
2702
        // channel arbitrator if the channel has the status
2703
        // ChanStatusLocalDataLoss after connecting to its peer. But if no
2704
        // connection can be established, the channel arbitrator doesn't know it
2705
        // can't be force closed yet.
2706
        if channel.HasChanStatus(channeldb.ChanStatusRestored) ||
3✔
2707
                channel.HasChanStatus(channeldb.ChanStatusLocalDataLoss) {
6✔
2708

3✔
2709
                return fmt.Errorf("cannot close channel with state: %v",
3✔
2710
                        channel.ChanStatus())
3✔
2711
        }
3✔
2712

2713
        // Retrieve the best height of the chain, which we'll use to complete
2714
        // either closing flow.
2715
        _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
2716
        if err != nil {
3✔
2717
                return err
×
2718
        }
×
2719

2720
        // If a force closure was requested, then we'll handle all the details
2721
        // around the creation and broadcast of the unilateral closure
2722
        // transaction here rather than going to the switch as we don't require
2723
        // interaction from the peer.
2724
        if force {
6✔
2725
                // As we're force closing this channel, as a precaution, we'll
3✔
2726
                // ensure that the switch doesn't continue to see this channel
3✔
2727
                // as eligible for forwarding HTLC's. If the peer is online,
3✔
2728
                // then we'll also purge all of its indexes.
3✔
2729
                remotePub := channel.IdentityPub
3✔
2730
                if peer, err := r.server.FindPeer(remotePub); err == nil {
6✔
2731
                        // TODO(roasbeef): actually get the active channel
3✔
2732
                        // instead too?
3✔
2733
                        //  * so only need to grab from database
3✔
2734
                        peer.WipeChannel(&channel.FundingOutpoint)
3✔
2735
                } else {
6✔
2736
                        chanID := lnwire.NewChanIDFromOutPoint(
3✔
2737
                                channel.FundingOutpoint,
3✔
2738
                        )
3✔
2739
                        r.server.htlcSwitch.RemoveLink(chanID)
3✔
2740
                }
3✔
2741

2742
                // With the necessary indexes cleaned up, we'll now force close
2743
                // the channel.
2744
                chainArbitrator := r.server.chainArb
3✔
2745
                closingTx, err := chainArbitrator.ForceCloseContract(
3✔
2746
                        *chanPoint,
3✔
2747
                )
3✔
2748
                if err != nil {
3✔
2749
                        rpcsLog.Errorf("unable to force close transaction: %v", err)
×
2750
                        return err
×
2751
                }
×
2752

2753
                // Safety check which should never happen.
2754
                //
2755
                // TODO(ziggie): remove pointer as return value from
2756
                // ForceCloseContract.
2757
                if closingTx == nil {
3✔
2758
                        return fmt.Errorf("force close transaction is nil")
×
2759
                }
×
2760

2761
                closingTxid := closingTx.TxHash()
3✔
2762

3✔
2763
                // With the transaction broadcast, we send our first update to
3✔
2764
                // the client.
3✔
2765
                updateChan = make(chan interface{}, 2)
3✔
2766
                updateChan <- &peer.PendingUpdate{
3✔
2767
                        Txid: closingTxid[:],
3✔
2768
                }
3✔
2769

3✔
2770
                errChan = make(chan error, 1)
3✔
2771
                notifier := r.server.cc.ChainNotifier
3✔
2772
                go peer.WaitForChanToClose(
3✔
2773
                        uint32(bestHeight), notifier, errChan, chanPoint,
3✔
2774
                        &closingTxid, closingTx.TxOut[0].PkScript, func() {
6✔
2775
                                // Respond to the local subsystem which
3✔
2776
                                // requested the channel closure.
3✔
2777
                                updateChan <- &peer.ChannelCloseUpdate{
3✔
2778
                                        ClosingTxid: closingTxid[:],
3✔
2779
                                        Success:     true,
3✔
2780
                                        // Force closure transactions don't have
3✔
2781
                                        // additional local/remote outputs.
3✔
2782
                                }
3✔
2783
                        },
3✔
2784
                )
2785
        } else {
3✔
2786
                // If this is a frozen channel, then we only allow the co-op
3✔
2787
                // close to proceed if we were the responder to this channel if
3✔
2788
                // the absolute thaw height has not been met.
3✔
2789
                if channel.IsInitiator {
6✔
2790
                        absoluteThawHeight, err := channel.AbsoluteThawHeight()
3✔
2791
                        if err != nil {
3✔
2792
                                return err
×
2793
                        }
×
2794
                        if uint32(bestHeight) < absoluteThawHeight {
6✔
2795
                                return fmt.Errorf("cannot co-op close frozen "+
3✔
2796
                                        "channel as initiator until height=%v, "+
3✔
2797
                                        "(current_height=%v)",
3✔
2798
                                        absoluteThawHeight, bestHeight)
3✔
2799
                        }
3✔
2800
                }
2801

2802
                // If the link is not known by the switch, we cannot gracefully close
2803
                // the channel.
2804
                channelID := lnwire.NewChanIDFromOutPoint(*chanPoint)
3✔
2805
                if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil {
3✔
2806
                        rpcsLog.Debugf("Trying to non-force close offline channel with "+
×
2807
                                "chan_point=%v", chanPoint)
×
2808
                        return fmt.Errorf("unable to gracefully close channel while peer "+
×
2809
                                "is offline (try force closing it instead): %v", err)
×
2810
                }
×
2811

2812
                // Keep the old behavior prior to 0.18.0 - when the user
2813
                // doesn't set fee rate or conf target, the default conf target
2814
                // of 6 is used.
2815
                targetConf := maybeUseDefaultConf(
3✔
2816
                        in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf),
3✔
2817
                )
3✔
2818

3✔
2819
                // Based on the passed fee related parameters, we'll determine
3✔
2820
                // an appropriate fee rate for the cooperative closure
3✔
2821
                // transaction.
3✔
2822
                feeRate, err := lnrpc.CalculateFeeRate(
3✔
2823
                        uint64(in.SatPerByte), in.SatPerVbyte, // nolint:staticcheck
3✔
2824
                        targetConf, r.server.cc.FeeEstimator,
3✔
2825
                )
3✔
2826
                if err != nil {
3✔
2827
                        return err
×
2828
                }
×
2829

2830
                rpcsLog.Debugf("Target sat/kw for closing transaction: %v",
3✔
2831
                        int64(feeRate))
3✔
2832

3✔
2833
                // If the user hasn't specified NoWait, then before we attempt
3✔
2834
                // to close the channel we ensure there are no active HTLCs on
3✔
2835
                // the link.
3✔
2836
                if !in.NoWait && len(channel.ActiveHtlcs()) != 0 {
3✔
2837
                        return fmt.Errorf("cannot co-op close channel " +
×
2838
                                "with active htlcs")
×
2839
                }
×
2840

2841
                // Otherwise, the caller has requested a regular interactive
2842
                // cooperative channel closure. So we'll forward the request to
2843
                // the htlc switch which will handle the negotiation and
2844
                // broadcast details.
2845

2846
                var deliveryScript lnwire.DeliveryAddress
3✔
2847

3✔
2848
                // If a delivery address to close out to was specified, decode it.
3✔
2849
                if len(in.DeliveryAddress) > 0 {
6✔
2850
                        // Decode the address provided.
3✔
2851
                        addr, err := btcutil.DecodeAddress(
3✔
2852
                                in.DeliveryAddress, r.cfg.ActiveNetParams.Params,
3✔
2853
                        )
3✔
2854
                        if err != nil {
3✔
2855
                                return fmt.Errorf("invalid delivery address: "+
×
2856
                                        "%v", err)
×
2857
                        }
×
2858

2859
                        if !addr.IsForNet(r.cfg.ActiveNetParams.Params) {
3✔
2860
                                return fmt.Errorf("delivery address is not "+
×
2861
                                        "for %s",
×
2862
                                        r.cfg.ActiveNetParams.Params.Name)
×
2863
                        }
×
2864

2865
                        // Create a script to pay out to the address provided.
2866
                        deliveryScript, err = txscript.PayToAddrScript(addr)
3✔
2867
                        if err != nil {
3✔
2868
                                return err
×
2869
                        }
×
2870
                }
2871

2872
                maxFee := chainfee.SatPerKVByte(
3✔
2873
                        in.MaxFeePerVbyte * 1000,
3✔
2874
                ).FeePerKWeight()
3✔
2875
                updateChan, errChan = r.server.htlcSwitch.CloseLink(
3✔
2876
                        chanPoint, contractcourt.CloseRegular, feeRate,
3✔
2877
                        maxFee, deliveryScript,
3✔
2878
                )
3✔
2879
        }
2880

2881
        // If the user doesn't want to wait for the txid to come back then we
2882
        // will send an empty update to kick off the stream.
2883
        if in.NoWait {
6✔
2884
                rpcsLog.Trace("[closechannel] sending instant update")
3✔
2885
                if err := updateStream.Send(
3✔
2886
                        &lnrpc.CloseStatusUpdate{
3✔
2887
                                Update: &lnrpc.CloseStatusUpdate_CloseInstant{},
3✔
2888
                        },
3✔
2889
                ); err != nil {
3✔
2890
                        return err
×
2891
                }
×
2892
        }
2893
out:
3✔
2894
        for {
6✔
2895
                select {
3✔
2896
                case err := <-errChan:
×
2897
                        rpcsLog.Errorf("[closechannel] unable to close "+
×
2898
                                "ChannelPoint(%v): %v", chanPoint, err)
×
2899
                        return err
×
2900
                case closingUpdate := <-updateChan:
3✔
2901
                        rpcClosingUpdate, err := createRPCCloseUpdate(
3✔
2902
                                closingUpdate,
3✔
2903
                        )
3✔
2904
                        if err != nil {
3✔
2905
                                return err
×
2906
                        }
×
2907

2908
                        err = fn.MapOptionZ(
3✔
2909
                                r.server.implCfg.AuxDataParser,
3✔
2910
                                func(parser AuxDataParser) error {
3✔
2911
                                        return parser.InlineParseCustomData(
×
2912
                                                rpcClosingUpdate,
×
2913
                                        )
×
2914
                                },
×
2915
                        )
2916
                        if err != nil {
3✔
2917
                                return fmt.Errorf("error parsing custom data: "+
×
2918
                                        "%w", err)
×
2919
                        }
×
2920

2921
                        rpcsLog.Tracef("[closechannel] sending update: %v",
3✔
2922
                                rpcClosingUpdate)
3✔
2923

3✔
2924
                        if err := updateStream.Send(rpcClosingUpdate); err != nil {
3✔
2925
                                return err
×
2926
                        }
×
2927

2928
                        // If a final channel closing updates is being sent,
2929
                        // then we can break out of our dispatch loop as we no
2930
                        // longer need to process any further updates.
2931
                        switch closeUpdate := closingUpdate.(type) {
3✔
2932
                        case *peer.ChannelCloseUpdate:
3✔
2933
                                h, _ := chainhash.NewHash(closeUpdate.ClosingTxid)
3✔
2934
                                rpcsLog.Infof("[closechannel] close completed: "+
3✔
2935
                                        "txid(%v)", h)
3✔
2936
                                break out
3✔
2937
                        }
2938
                case <-r.quit:
3✔
2939
                        return nil
3✔
2940
                }
2941
        }
2942

2943
        return nil
3✔
2944
}
2945

2946
func createRPCCloseUpdate(
2947
        update interface{}) (*lnrpc.CloseStatusUpdate, error) {
3✔
2948

3✔
2949
        switch u := update.(type) {
3✔
2950
        case *peer.ChannelCloseUpdate:
3✔
2951
                ccu := &lnrpc.ChannelCloseUpdate{
3✔
2952
                        ClosingTxid: u.ClosingTxid,
3✔
2953
                        Success:     u.Success,
3✔
2954
                }
3✔
2955

3✔
2956
                err := fn.MapOptionZ(
3✔
2957
                        u.LocalCloseOutput,
3✔
2958
                        func(closeOut chancloser.CloseOutput) error {
6✔
2959
                                cr, err := closeOut.ShutdownRecords.Serialize()
3✔
2960
                                if err != nil {
3✔
2961
                                        return fmt.Errorf("error serializing "+
×
2962
                                                "local close out custom "+
×
2963
                                                "records: %w", err)
×
2964
                                }
×
2965

2966
                                rpcCloseOut := &lnrpc.CloseOutput{
3✔
2967
                                        AmountSat:         int64(closeOut.Amt),
3✔
2968
                                        PkScript:          closeOut.PkScript,
3✔
2969
                                        IsLocal:           true,
3✔
2970
                                        CustomChannelData: cr,
3✔
2971
                                }
3✔
2972
                                ccu.LocalCloseOutput = rpcCloseOut
3✔
2973

3✔
2974
                                return nil
3✔
2975
                        },
2976
                )
2977
                if err != nil {
3✔
2978
                        return nil, err
×
2979
                }
×
2980

2981
                err = fn.MapOptionZ(
3✔
2982
                        u.RemoteCloseOutput,
3✔
2983
                        func(closeOut chancloser.CloseOutput) error {
6✔
2984
                                cr, err := closeOut.ShutdownRecords.Serialize()
3✔
2985
                                if err != nil {
3✔
2986
                                        return fmt.Errorf("error serializing "+
×
2987
                                                "remote close out custom "+
×
2988
                                                "records: %w", err)
×
2989
                                }
×
2990

2991
                                rpcCloseOut := &lnrpc.CloseOutput{
3✔
2992
                                        AmountSat:         int64(closeOut.Amt),
3✔
2993
                                        PkScript:          closeOut.PkScript,
3✔
2994
                                        CustomChannelData: cr,
3✔
2995
                                }
3✔
2996
                                ccu.RemoteCloseOutput = rpcCloseOut
3✔
2997

3✔
2998
                                return nil
3✔
2999
                        },
3000
                )
3001
                if err != nil {
3✔
3002
                        return nil, err
×
3003
                }
×
3004

3005
                u.AuxOutputs.WhenSome(func(outs chancloser.AuxCloseOutputs) {
3✔
3006
                        for _, out := range outs.ExtraCloseOutputs {
×
3007
                                ccu.AdditionalOutputs = append(
×
3008
                                        ccu.AdditionalOutputs,
×
3009
                                        &lnrpc.CloseOutput{
×
3010
                                                AmountSat: out.Value,
×
3011
                                                PkScript:  out.PkScript,
×
3012
                                                IsLocal:   out.IsLocal,
×
3013
                                        },
×
3014
                                )
×
3015
                        }
×
3016
                })
3017

3018
                return &lnrpc.CloseStatusUpdate{
3✔
3019
                        Update: &lnrpc.CloseStatusUpdate_ChanClose{
3✔
3020
                                ChanClose: ccu,
3✔
3021
                        },
3✔
3022
                }, nil
3✔
3023

3024
        case *peer.PendingUpdate:
3✔
3025
                return &lnrpc.CloseStatusUpdate{
3✔
3026
                        Update: &lnrpc.CloseStatusUpdate_ClosePending{
3✔
3027
                                ClosePending: &lnrpc.PendingUpdate{
3✔
3028
                                        Txid:        u.Txid,
3✔
3029
                                        OutputIndex: u.OutputIndex,
3✔
3030
                                },
3✔
3031
                        },
3✔
3032
                }, nil
3✔
3033
        }
3034

3035
        return nil, errors.New("unknown close status update")
×
3036
}
3037

3038
// abandonChanFromGraph attempts to remove a channel from the channel graph. If
3039
// we can't find the chanID in the graph, then we assume it has already been
3040
// removed, and will return a nop.
3041
func abandonChanFromGraph(chanGraph *graphdb.ChannelGraph,
3042
        chanPoint *wire.OutPoint) error {
3✔
3043

3✔
3044
        // First, we'll obtain the channel ID. If we can't locate this, then
3✔
3045
        // it's the case that the channel may have already been removed from
3✔
3046
        // the graph, so we'll return a nil error.
3✔
3047
        chanID, err := chanGraph.ChannelID(chanPoint)
3✔
3048
        switch {
3✔
3049
        case errors.Is(err, graphdb.ErrEdgeNotFound):
3✔
3050
                return nil
3✔
3051
        case err != nil:
×
3052
                return err
×
3053
        }
3054

3055
        // If the channel ID is still in the graph, then that means the channel
3056
        // is still open, so we'll now move to purge it from the graph.
3057
        return chanGraph.DeleteChannelEdges(false, true, chanID)
3✔
3058
}
3059

3060
// abandonChan removes a channel from the database, graph and contract court.
3061
func (r *rpcServer) abandonChan(chanPoint *wire.OutPoint,
3062
        bestHeight uint32) error {
3✔
3063

3✔
3064
        // Before we remove the channel we cancel the rebroadcasting of the
3✔
3065
        // transaction. If this transaction does not exist in the rebroadcast
3✔
3066
        // queue anymore it is a noop.
3✔
3067
        txid, err := chainhash.NewHash(chanPoint.Hash[:])
3✔
3068
        if err != nil {
3✔
3069
                return err
×
3070
        }
×
3071
        r.server.cc.Wallet.CancelRebroadcast(*txid)
3✔
3072

3✔
3073
        // Abandoning a channel is a three-step process: remove from the open
3✔
3074
        // channel state, remove from the graph, remove from the contract
3✔
3075
        // court. Between any step it's possible that the users restarts the
3✔
3076
        // process all over again. As a result, each of the steps below are
3✔
3077
        // intended to be idempotent.
3✔
3078
        err = r.server.chanStateDB.AbandonChannel(chanPoint, bestHeight)
3✔
3079
        if err != nil {
3✔
3080
                return err
×
3081
        }
×
3082
        err = abandonChanFromGraph(r.server.graphDB, chanPoint)
3✔
3083
        if err != nil {
3✔
3084
                return err
×
3085
        }
×
3086
        err = r.server.chainArb.ResolveContract(*chanPoint)
3✔
3087
        if err != nil {
3✔
3088
                return err
×
3089
        }
×
3090

3091
        // If this channel was in the process of being closed, but didn't fully
3092
        // close, then it's possible that the nursery is hanging on to some
3093
        // state. To err on the side of caution, we'll now attempt to wipe any
3094
        // state for this channel from the nursery.
3095
        err = r.server.utxoNursery.RemoveChannel(chanPoint)
3✔
3096
        if err != nil && err != contractcourt.ErrContractNotFound {
3✔
3097
                return err
×
3098
        }
×
3099

3100
        // Finally, notify the backup listeners that the channel can be removed
3101
        // from any channel backups.
3102
        r.server.channelNotifier.NotifyClosedChannelEvent(*chanPoint)
3✔
3103

3✔
3104
        return nil
3✔
3105
}
3106

3107
// AbandonChannel removes all channel state from the database except for a
3108
// close summary. This method can be used to get rid of permanently unusable
3109
// channels due to bugs fixed in newer versions of lnd.
3110
func (r *rpcServer) AbandonChannel(_ context.Context,
3111
        in *lnrpc.AbandonChannelRequest) (*lnrpc.AbandonChannelResponse, error) {
3✔
3112

3✔
3113
        // If this isn't the dev build, then we won't allow the RPC to be
3✔
3114
        // executed, as it's an advanced feature and won't be activated in
3✔
3115
        // regular production/release builds except for the explicit case of
3✔
3116
        // externally funded channels that are still pending. Due to repeated
3✔
3117
        // requests, we also allow this requirement to be overwritten by a new
3✔
3118
        // flag that attests to the user knowing what they're doing and the risk
3✔
3119
        // associated with the command/RPC.
3✔
3120
        if !in.IKnowWhatIAmDoing && !in.PendingFundingShimOnly &&
3✔
3121
                !build.IsDevBuild() {
3✔
3122

×
3123
                return nil, fmt.Errorf("AbandonChannel RPC call only " +
×
3124
                        "available in dev builds")
×
3125
        }
×
3126

3127
        // We'll parse out the arguments to we can obtain the chanPoint of the
3128
        // target channel.
3129
        txid, err := lnrpc.GetChanPointFundingTxid(in.GetChannelPoint())
3✔
3130
        if err != nil {
3✔
3131
                return nil, err
×
3132
        }
×
3133
        index := in.ChannelPoint.OutputIndex
3✔
3134
        chanPoint := wire.NewOutPoint(txid, index)
3✔
3135

3✔
3136
        // When we remove the channel from the database, we need to set a close
3✔
3137
        // height, so we'll just use the current best known height.
3✔
3138
        _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
3139
        if err != nil {
3✔
3140
                return nil, err
×
3141
        }
×
3142

3143
        dbChan, err := r.server.chanStateDB.FetchChannel(*chanPoint)
3✔
3144
        switch {
3✔
3145
        // If the channel isn't found in the set of open channels, then we can
3146
        // continue on as it can't be loaded into the link/peer.
3147
        case err == channeldb.ErrChannelNotFound:
3✔
3148
                break
3✔
3149

3150
        // If the channel is still known to be open, then before we modify any
3151
        // on-disk state, we'll remove the channel from the switch and peer
3152
        // state if it's been loaded in.
3153
        case err == nil:
3✔
3154
                // If the user requested the more safe version that only allows
3✔
3155
                // the removal of externally (shim) funded channels that are
3✔
3156
                // still pending, we enforce this option now that we know the
3✔
3157
                // state of the channel.
3✔
3158
                //
3✔
3159
                // TODO(guggero): Properly store the funding type (wallet, shim,
3✔
3160
                // PSBT) on the channel so we don't need to use the thaw height.
3✔
3161
                isShimFunded := dbChan.ThawHeight > 0
3✔
3162
                isPendingShimFunded := isShimFunded && dbChan.IsPending
3✔
3163
                if !in.IKnowWhatIAmDoing && in.PendingFundingShimOnly &&
3✔
3164
                        !isPendingShimFunded {
3✔
3165

×
3166
                        return nil, fmt.Errorf("channel %v is not externally "+
×
3167
                                "funded or not pending", chanPoint)
×
3168
                }
×
3169

3170
                // We'll mark the channel as borked before we remove the state
3171
                // from the switch/peer so it won't be loaded back in if the
3172
                // peer reconnects.
3173
                if err := dbChan.MarkBorked(); err != nil {
3✔
3174
                        return nil, err
×
3175
                }
×
3176
                remotePub := dbChan.IdentityPub
3✔
3177
                if peer, err := r.server.FindPeer(remotePub); err == nil {
6✔
3178
                        peer.WipeChannel(chanPoint)
3✔
3179
                }
3✔
3180

3181
        default:
×
3182
                return nil, err
×
3183
        }
3184

3185
        // Remove the channel from the graph, database and contract court.
3186
        if err := r.abandonChan(chanPoint, uint32(bestHeight)); err != nil {
3✔
3187
                return nil, err
×
3188
        }
×
3189

3190
        return &lnrpc.AbandonChannelResponse{
3✔
3191
                Status: fmt.Sprintf("channel %v abandoned", chanPoint.String()),
3✔
3192
        }, nil
3✔
3193
}
3194

3195
// GetInfo returns general information concerning the lightning node including
3196
// its identity pubkey, alias, the chains it is connected to, and information
3197
// concerning the number of open+pending channels.
3198
func (r *rpcServer) GetInfo(_ context.Context,
3199
        _ *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) {
3✔
3200

3✔
3201
        serverPeers := r.server.Peers()
3✔
3202

3✔
3203
        openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
3✔
3204
        if err != nil {
3✔
3205
                return nil, err
×
3206
        }
×
3207

3208
        var activeChannels uint32
3✔
3209
        for _, channel := range openChannels {
6✔
3210
                chanID := lnwire.NewChanIDFromOutPoint(channel.FundingOutpoint)
3✔
3211
                if r.server.htlcSwitch.HasActiveLink(chanID) {
6✔
3212
                        activeChannels++
3✔
3213
                }
3✔
3214
        }
3215

3216
        inactiveChannels := uint32(len(openChannels)) - activeChannels
3✔
3217

3✔
3218
        pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
3✔
3219
        if err != nil {
3✔
3220
                return nil, fmt.Errorf("unable to get retrieve pending "+
×
3221
                        "channels: %v", err)
×
3222
        }
×
3223
        nPendingChannels := uint32(len(pendingChannels))
3✔
3224

3✔
3225
        idPub := r.server.identityECDH.PubKey().SerializeCompressed()
3✔
3226
        encodedIDPub := hex.EncodeToString(idPub)
3✔
3227

3✔
3228
        // Get the system's chain sync info.
3✔
3229
        syncInfo, err := r.getChainSyncInfo()
3✔
3230
        if err != nil {
3✔
3231
                return nil, err
×
3232
        }
×
3233

3234
        network := lncfg.NormalizeNetwork(r.cfg.ActiveNetParams.Name)
3✔
3235
        activeChains := []*lnrpc.Chain{
3✔
3236
                {
3✔
3237
                        Chain:   BitcoinChainName,
3✔
3238
                        Network: network,
3✔
3239
                },
3✔
3240
        }
3✔
3241

3✔
3242
        // Check if external IP addresses were provided to lnd and use them
3✔
3243
        // to set the URIs.
3✔
3244
        nodeAnn := r.server.getNodeAnnouncement()
3✔
3245

3✔
3246
        addrs := nodeAnn.Addresses
3✔
3247
        uris := make([]string, len(addrs))
3✔
3248
        for i, addr := range addrs {
6✔
3249
                uris[i] = fmt.Sprintf("%s@%s", encodedIDPub, addr.String())
3✔
3250
        }
3✔
3251

3252
        isGraphSynced := r.server.authGossiper.SyncManager().IsGraphSynced()
3✔
3253

3✔
3254
        features := make(map[uint32]*lnrpc.Feature)
3✔
3255
        sets := r.server.featureMgr.ListSets()
3✔
3256

3✔
3257
        for _, set := range sets {
6✔
3258
                // Get the a list of lnrpc features for each set we support.
3✔
3259
                featureVector := r.server.featureMgr.Get(set)
3✔
3260
                rpcFeatures := invoicesrpc.CreateRPCFeatures(featureVector)
3✔
3261

3✔
3262
                // Add the features to our map of features, allowing over writing of
3✔
3263
                // existing values because features in different sets with the same bit
3✔
3264
                // are duplicated across sets.
3✔
3265
                for bit, feature := range rpcFeatures {
6✔
3266
                        features[bit] = feature
3✔
3267
                }
3✔
3268
        }
3269

3270
        // TODO(roasbeef): add synced height n stuff
3271

3272
        isTestNet := chainreg.IsTestnet(&r.cfg.ActiveNetParams)
3✔
3273
        nodeColor := graph.EncodeHexColor(nodeAnn.RGBColor)
3✔
3274
        version := build.Version() + " commit=" + build.Commit
3✔
3275

3✔
3276
        return &lnrpc.GetInfoResponse{
3✔
3277
                IdentityPubkey:            encodedIDPub,
3✔
3278
                NumPendingChannels:        nPendingChannels,
3✔
3279
                NumActiveChannels:         activeChannels,
3✔
3280
                NumInactiveChannels:       inactiveChannels,
3✔
3281
                NumPeers:                  uint32(len(serverPeers)),
3✔
3282
                BlockHeight:               uint32(syncInfo.bestHeight),
3✔
3283
                BlockHash:                 syncInfo.blockHash.String(),
3✔
3284
                SyncedToChain:             syncInfo.isSynced,
3✔
3285
                Testnet:                   isTestNet,
3✔
3286
                Chains:                    activeChains,
3✔
3287
                Uris:                      uris,
3✔
3288
                Alias:                     nodeAnn.Alias.String(),
3✔
3289
                Color:                     nodeColor,
3✔
3290
                BestHeaderTimestamp:       syncInfo.timestamp,
3✔
3291
                Version:                   version,
3✔
3292
                CommitHash:                build.CommitHash,
3✔
3293
                SyncedToGraph:             isGraphSynced,
3✔
3294
                Features:                  features,
3✔
3295
                RequireHtlcInterceptor:    r.cfg.RequireInterceptor,
3✔
3296
                StoreFinalHtlcResolutions: r.cfg.StoreFinalHtlcResolutions,
3✔
3297
        }, nil
3✔
3298
}
3299

3300
// GetDebugInfo returns debug information concerning the state of the daemon
3301
// and its subsystems. This includes the full configuration and the latest log
3302
// entries from the log file.
3303
func (r *rpcServer) GetDebugInfo(_ context.Context,
3304
        _ *lnrpc.GetDebugInfoRequest) (*lnrpc.GetDebugInfoResponse, error) {
×
3305

×
3306
        flatConfig, _, err := configToFlatMap(*r.cfg)
×
3307
        if err != nil {
×
3308
                return nil, fmt.Errorf("error converting config to flat map: "+
×
3309
                        "%w", err)
×
3310
        }
×
3311

3312
        logFileName := filepath.Join(r.cfg.LogDir, defaultLogFilename)
×
3313
        logContent, err := os.ReadFile(logFileName)
×
3314
        if err != nil {
×
3315
                return nil, fmt.Errorf("error reading log file '%s': %w",
×
3316
                        logFileName, err)
×
3317
        }
×
3318

3319
        return &lnrpc.GetDebugInfoResponse{
×
3320
                Config: flatConfig,
×
3321
                Log:    strings.Split(string(logContent), "\n"),
×
3322
        }, nil
×
3323
}
3324

3325
// GetRecoveryInfo returns a boolean indicating whether the wallet is started
3326
// in recovery mode, whether the recovery is finished, and the progress made
3327
// so far.
3328
func (r *rpcServer) GetRecoveryInfo(ctx context.Context,
3329
        in *lnrpc.GetRecoveryInfoRequest) (*lnrpc.GetRecoveryInfoResponse, error) {
3✔
3330

3✔
3331
        isRecoveryMode, progress, err := r.server.cc.Wallet.GetRecoveryInfo()
3✔
3332
        if err != nil {
3✔
3333
                return nil, fmt.Errorf("unable to get wallet recovery info: %w",
×
3334
                        err)
×
3335
        }
×
3336

3337
        rpcsLog.Debugf("[getrecoveryinfo] is recovery mode=%v, progress=%v",
3✔
3338
                isRecoveryMode, progress)
3✔
3339

3✔
3340
        return &lnrpc.GetRecoveryInfoResponse{
3✔
3341
                RecoveryMode:     isRecoveryMode,
3✔
3342
                RecoveryFinished: progress == 1,
3✔
3343
                Progress:         progress,
3✔
3344
        }, nil
3✔
3345
}
3346

3347
// ListPeers returns a verbose listing of all currently active peers.
3348
func (r *rpcServer) ListPeers(ctx context.Context,
3349
        in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) {
3✔
3350

3✔
3351
        serverPeers := r.server.Peers()
3✔
3352
        resp := &lnrpc.ListPeersResponse{
3✔
3353
                Peers: make([]*lnrpc.Peer, 0, len(serverPeers)),
3✔
3354
        }
3✔
3355

3✔
3356
        for _, serverPeer := range serverPeers {
6✔
3357
                var (
3✔
3358
                        satSent int64
3✔
3359
                        satRecv int64
3✔
3360
                )
3✔
3361

3✔
3362
                // In order to display the total number of satoshis of outbound
3✔
3363
                // (sent) and inbound (recv'd) satoshis that have been
3✔
3364
                // transported through this peer, we'll sum up the sent/recv'd
3✔
3365
                // values for each of the active channels we have with the
3✔
3366
                // peer.
3✔
3367
                chans := serverPeer.ChannelSnapshots()
3✔
3368
                for _, c := range chans {
6✔
3369
                        satSent += int64(c.TotalMSatSent.ToSatoshis())
3✔
3370
                        satRecv += int64(c.TotalMSatReceived.ToSatoshis())
3✔
3371
                }
3✔
3372

3373
                nodePub := serverPeer.PubKey()
3✔
3374

3✔
3375
                // Retrieve the peer's sync type. If we don't currently have a
3✔
3376
                // syncer for the peer, then we'll default to a passive sync.
3✔
3377
                // This can happen if the RPC is called while a peer is
3✔
3378
                // initializing.
3✔
3379
                syncer, ok := r.server.authGossiper.SyncManager().GossipSyncer(
3✔
3380
                        nodePub,
3✔
3381
                )
3✔
3382

3✔
3383
                var lnrpcSyncType lnrpc.Peer_SyncType
3✔
3384
                if !ok {
4✔
3385
                        rpcsLog.Warnf("Gossip syncer for peer=%x not found",
1✔
3386
                                nodePub)
1✔
3387
                        lnrpcSyncType = lnrpc.Peer_UNKNOWN_SYNC
1✔
3388
                } else {
4✔
3389
                        syncType := syncer.SyncType()
3✔
3390
                        switch syncType {
3✔
3391
                        case discovery.ActiveSync:
3✔
3392
                                lnrpcSyncType = lnrpc.Peer_ACTIVE_SYNC
3✔
3393
                        case discovery.PassiveSync:
3✔
3394
                                lnrpcSyncType = lnrpc.Peer_PASSIVE_SYNC
3✔
3395
                        case discovery.PinnedSync:
3✔
3396
                                lnrpcSyncType = lnrpc.Peer_PINNED_SYNC
3✔
3397
                        default:
×
3398
                                return nil, fmt.Errorf("unhandled sync type %v",
×
3399
                                        syncType)
×
3400
                        }
3401
                }
3402

3403
                features := invoicesrpc.CreateRPCFeatures(
3✔
3404
                        serverPeer.RemoteFeatures(),
3✔
3405
                )
3✔
3406

3✔
3407
                rpcPeer := &lnrpc.Peer{
3✔
3408
                        PubKey:          hex.EncodeToString(nodePub[:]),
3✔
3409
                        Address:         serverPeer.Conn().RemoteAddr().String(),
3✔
3410
                        Inbound:         serverPeer.Inbound(),
3✔
3411
                        BytesRecv:       serverPeer.BytesReceived(),
3✔
3412
                        BytesSent:       serverPeer.BytesSent(),
3✔
3413
                        SatSent:         satSent,
3✔
3414
                        SatRecv:         satRecv,
3✔
3415
                        PingTime:        serverPeer.PingTime(),
3✔
3416
                        SyncType:        lnrpcSyncType,
3✔
3417
                        Features:        features,
3✔
3418
                        LastPingPayload: serverPeer.LastRemotePingPayload(),
3✔
3419
                }
3✔
3420

3✔
3421
                var peerErrors []interface{}
3✔
3422

3✔
3423
                // If we only want the most recent error, get the most recent
3✔
3424
                // error from the buffer and add it to our list of errors if
3✔
3425
                // it is non-nil. If we want all the stored errors, simply
3✔
3426
                // add the full list to our set of errors.
3✔
3427
                if in.LatestError {
3✔
3428
                        latestErr := serverPeer.ErrorBuffer().Latest()
×
3429
                        if latestErr != nil {
×
3430
                                peerErrors = []interface{}{latestErr}
×
3431
                        }
×
3432
                } else {
3✔
3433
                        peerErrors = serverPeer.ErrorBuffer().List()
3✔
3434
                }
3✔
3435

3436
                // Add the relevant peer errors to our response.
3437
                for _, error := range peerErrors {
6✔
3438
                        tsError := error.(*peer.TimestampedError)
3✔
3439

3✔
3440
                        rpcErr := &lnrpc.TimestampedError{
3✔
3441
                                Timestamp: uint64(tsError.Timestamp.Unix()),
3✔
3442
                                Error:     tsError.Error.Error(),
3✔
3443
                        }
3✔
3444

3✔
3445
                        rpcPeer.Errors = append(rpcPeer.Errors, rpcErr)
3✔
3446
                }
3✔
3447

3448
                // If the server has started, we can query the event store
3449
                // for our peer's flap count. If we do so when the server has
3450
                // not started, the request will block.
3451
                if r.server.Started() {
6✔
3452
                        vertex, err := route.NewVertexFromBytes(nodePub[:])
3✔
3453
                        if err != nil {
3✔
3454
                                return nil, err
×
3455
                        }
×
3456

3457
                        flap, ts, err := r.server.chanEventStore.FlapCount(
3✔
3458
                                vertex,
3✔
3459
                        )
3✔
3460
                        if err != nil {
3✔
3461
                                return nil, err
×
3462
                        }
×
3463

3464
                        // If our timestamp is non-nil, we have values for our
3465
                        // peer's flap count, so we set them.
3466
                        if ts != nil {
6✔
3467
                                rpcPeer.FlapCount = int32(flap)
3✔
3468
                                rpcPeer.LastFlapNs = ts.UnixNano()
3✔
3469
                        }
3✔
3470
                }
3471

3472
                resp.Peers = append(resp.Peers, rpcPeer)
3✔
3473
        }
3474

3475
        rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
3✔
3476

3✔
3477
        return resp, nil
3✔
3478
}
3479

3480
// SubscribePeerEvents returns a uni-directional stream (server -> client)
3481
// for notifying the client of peer online and offline events.
3482
func (r *rpcServer) SubscribePeerEvents(req *lnrpc.PeerEventSubscription,
3483
        eventStream lnrpc.Lightning_SubscribePeerEventsServer) error {
3✔
3484

3✔
3485
        peerEventSub, err := r.server.peerNotifier.SubscribePeerEvents()
3✔
3486
        if err != nil {
3✔
3487
                return err
×
3488
        }
×
3489
        defer peerEventSub.Cancel()
3✔
3490

3✔
3491
        for {
6✔
3492
                select {
3✔
3493
                // A new update has been sent by the peer notifier, we'll
3494
                // marshal it into the form expected by the gRPC client, then
3495
                // send it off to the client.
3496
                case e := <-peerEventSub.Updates():
3✔
3497
                        var event *lnrpc.PeerEvent
3✔
3498

3✔
3499
                        switch peerEvent := e.(type) {
3✔
3500
                        case peernotifier.PeerOfflineEvent:
3✔
3501
                                event = &lnrpc.PeerEvent{
3✔
3502
                                        PubKey: hex.EncodeToString(peerEvent.PubKey[:]),
3✔
3503
                                        Type:   lnrpc.PeerEvent_PEER_OFFLINE,
3✔
3504
                                }
3✔
3505

3506
                        case peernotifier.PeerOnlineEvent:
×
3507
                                event = &lnrpc.PeerEvent{
×
3508
                                        PubKey: hex.EncodeToString(peerEvent.PubKey[:]),
×
3509
                                        Type:   lnrpc.PeerEvent_PEER_ONLINE,
×
3510
                                }
×
3511

3512
                        default:
×
3513
                                return fmt.Errorf("unexpected peer event: %v", event)
×
3514
                        }
3515

3516
                        if err := eventStream.Send(event); err != nil {
3✔
3517
                                return err
×
3518
                        }
×
3519

3520
                // The response stream's context for whatever reason has been
3521
                // closed. If context is closed by an exceeded deadline we will
3522
                // return an error.
3523
                case <-eventStream.Context().Done():
3✔
3524
                        if errors.Is(eventStream.Context().Err(), context.Canceled) {
6✔
3525
                                return nil
3✔
3526
                        }
3✔
3527
                        return eventStream.Context().Err()
×
3528

3529
                case <-r.quit:
×
3530
                        return nil
×
3531
                }
3532
        }
3533
}
3534

3535
// WalletBalance returns total unspent outputs(confirmed and unconfirmed), all
3536
// confirmed unspent outputs and all unconfirmed unspent outputs under control
3537
// by the wallet. This method can be modified by having the request specify
3538
// only witness outputs should be factored into the final output sum.
3539
// TODO(roasbeef): add async hooks into wallet balance changes.
3540
func (r *rpcServer) WalletBalance(ctx context.Context,
3541
        in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) {
3✔
3542

3✔
3543
        // Retrieve all existing wallet accounts. We'll compute the confirmed
3✔
3544
        // and unconfirmed balance for each and tally them up.
3✔
3545
        accounts, err := r.server.cc.Wallet.ListAccounts(in.Account, nil)
3✔
3546
        if err != nil {
3✔
3547
                return nil, err
×
3548
        }
×
3549

3550
        var totalBalance, confirmedBalance, unconfirmedBalance btcutil.Amount
3✔
3551
        rpcAccountBalances := make(
3✔
3552
                map[string]*lnrpc.WalletAccountBalance, len(accounts),
3✔
3553
        )
3✔
3554
        for _, account := range accounts {
6✔
3555
                // There are two default accounts, one for NP2WKH outputs and
3✔
3556
                // another for P2WKH outputs. The balance will be computed for
3✔
3557
                // both given one call to ConfirmedBalance with the default
3✔
3558
                // wallet and imported account, so we'll skip the second
3✔
3559
                // instance to avoid inflating the balance.
3✔
3560
                switch account.AccountName {
3✔
3561
                case waddrmgr.ImportedAddrAccountName:
3✔
3562
                        // Omit the imported account from the response unless we
3✔
3563
                        // actually have any keys imported.
3✔
3564
                        if account.ImportedKeyCount == 0 {
6✔
3565
                                continue
3✔
3566
                        }
3567

3568
                        fallthrough
3✔
3569

3570
                case lnwallet.DefaultAccountName:
3✔
3571
                        if _, ok := rpcAccountBalances[account.AccountName]; ok {
6✔
3572
                                continue
3✔
3573
                        }
3574

3575
                default:
3✔
3576
                }
3577

3578
                // There now also are the accounts for the internal channel
3579
                // related keys. We skip those as they'll never have any direct
3580
                // balance.
3581
                if account.KeyScope.Purpose == keychain.BIP0043Purpose {
6✔
3582
                        continue
3✔
3583
                }
3584

3585
                // Get total balance, from txs that have >= 0 confirmations.
3586
                totalBal, err := r.server.cc.Wallet.ConfirmedBalance(
3✔
3587
                        0, account.AccountName,
3✔
3588
                )
3✔
3589
                if err != nil {
3✔
3590
                        return nil, err
×
3591
                }
×
3592
                totalBalance += totalBal
3✔
3593

3✔
3594
                // Get confirmed balance, from txs that have >= 1 confirmations.
3✔
3595
                // TODO(halseth): get both unconfirmed and confirmed balance in
3✔
3596
                // one call, as this is racy.
3✔
3597
                if in.MinConfs <= 0 {
6✔
3598
                        in.MinConfs = 1
3✔
3599
                }
3✔
3600
                confirmedBal, err := r.server.cc.Wallet.ConfirmedBalance(
3✔
3601
                        in.MinConfs, account.AccountName,
3✔
3602
                )
3✔
3603
                if err != nil {
3✔
3604
                        return nil, err
×
3605
                }
×
3606
                confirmedBalance += confirmedBal
3✔
3607

3✔
3608
                // Get unconfirmed balance, from txs with 0 confirmations.
3✔
3609
                unconfirmedBal := totalBal - confirmedBal
3✔
3610
                unconfirmedBalance += unconfirmedBal
3✔
3611

3✔
3612
                rpcAccountBalances[account.AccountName] = &lnrpc.WalletAccountBalance{
3✔
3613
                        ConfirmedBalance:   int64(confirmedBal),
3✔
3614
                        UnconfirmedBalance: int64(unconfirmedBal),
3✔
3615
                }
3✔
3616
        }
3617

3618
        // Now that we have the base balance accounted for with each account,
3619
        // we'll look at the set of locked UTXOs to tally that as well. If we
3620
        // don't display this, then anytime we attempt a funding reservation,
3621
        // the outputs will chose as being "gone" until they're confirmed on
3622
        // chain.
3623
        var lockedBalance btcutil.Amount
3✔
3624
        leases, err := r.server.cc.Wallet.ListLeasedOutputs()
3✔
3625
        if err != nil {
3✔
3626
                return nil, err
×
3627
        }
×
3628
        for _, leasedOutput := range leases {
6✔
3629
                lockedBalance += btcutil.Amount(leasedOutput.Value)
3✔
3630
        }
3✔
3631

3632
        // Get the current number of non-private anchor channels.
3633
        currentNumAnchorChans, err := r.server.cc.Wallet.CurrentNumAnchorChans()
3✔
3634
        if err != nil {
3✔
3635
                return nil, err
×
3636
        }
×
3637

3638
        // Get the required reserve for the wallet.
3639
        requiredReserve := r.server.cc.Wallet.RequiredReserve(
3✔
3640
                uint32(currentNumAnchorChans),
3✔
3641
        )
3✔
3642

3✔
3643
        rpcsLog.Debugf("[walletbalance] Total balance=%v (confirmed=%v, "+
3✔
3644
                "unconfirmed=%v)", totalBalance, confirmedBalance,
3✔
3645
                unconfirmedBalance)
3✔
3646

3✔
3647
        return &lnrpc.WalletBalanceResponse{
3✔
3648
                TotalBalance:              int64(totalBalance),
3✔
3649
                ConfirmedBalance:          int64(confirmedBalance),
3✔
3650
                UnconfirmedBalance:        int64(unconfirmedBalance),
3✔
3651
                LockedBalance:             int64(lockedBalance),
3✔
3652
                ReservedBalanceAnchorChan: int64(requiredReserve),
3✔
3653
                AccountBalance:            rpcAccountBalances,
3✔
3654
        }, nil
3✔
3655
}
3656

3657
// ChannelBalance returns the total available channel flow across all open
3658
// channels in satoshis.
3659
func (r *rpcServer) ChannelBalance(ctx context.Context,
3660
        in *lnrpc.ChannelBalanceRequest) (
3661
        *lnrpc.ChannelBalanceResponse, error) {
3✔
3662

3✔
3663
        var (
3✔
3664
                localBalance             lnwire.MilliSatoshi
3✔
3665
                remoteBalance            lnwire.MilliSatoshi
3✔
3666
                unsettledLocalBalance    lnwire.MilliSatoshi
3✔
3667
                unsettledRemoteBalance   lnwire.MilliSatoshi
3✔
3668
                pendingOpenLocalBalance  lnwire.MilliSatoshi
3✔
3669
                pendingOpenRemoteBalance lnwire.MilliSatoshi
3✔
3670
                customDataBuf            bytes.Buffer
3✔
3671
        )
3✔
3672

3✔
3673
        openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
3✔
3674
        if err != nil {
3✔
3675
                return nil, err
×
3676
        }
×
3677

3678
        // Encode the number of open channels to the custom data buffer.
3679
        err = wire.WriteVarInt(&customDataBuf, 0, uint64(len(openChannels)))
3✔
3680
        if err != nil {
3✔
3681
                return nil, err
×
3682
        }
×
3683

3684
        for _, channel := range openChannels {
6✔
3685
                c := channel.LocalCommitment
3✔
3686
                localBalance += c.LocalBalance
3✔
3687
                remoteBalance += c.RemoteBalance
3✔
3688

3✔
3689
                // Add pending htlc amount.
3✔
3690
                for _, htlc := range c.Htlcs {
6✔
3691
                        if htlc.Incoming {
6✔
3692
                                unsettledLocalBalance += htlc.Amt
3✔
3693
                        } else {
6✔
3694
                                unsettledRemoteBalance += htlc.Amt
3✔
3695
                        }
3✔
3696
                }
3697

3698
                // Encode the custom data for this open channel.
3699
                openChanData := channel.LocalCommitment.CustomBlob.UnwrapOr(nil)
3✔
3700
                err = wire.WriteVarBytes(&customDataBuf, 0, openChanData)
3✔
3701
                if err != nil {
3✔
3702
                        return nil, err
×
3703
                }
×
3704
        }
3705

3706
        pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
3✔
3707
        if err != nil {
3✔
3708
                return nil, err
×
3709
        }
×
3710

3711
        // Encode the number of pending channels to the custom data buffer.
3712
        err = wire.WriteVarInt(&customDataBuf, 0, uint64(len(pendingChannels)))
3✔
3713
        if err != nil {
3✔
3714
                return nil, err
×
3715
        }
×
3716

3717
        for _, channel := range pendingChannels {
6✔
3718
                c := channel.LocalCommitment
3✔
3719
                pendingOpenLocalBalance += c.LocalBalance
3✔
3720
                pendingOpenRemoteBalance += c.RemoteBalance
3✔
3721

3✔
3722
                // Encode the custom data for this pending channel.
3✔
3723
                openChanData := channel.LocalCommitment.CustomBlob.UnwrapOr(nil)
3✔
3724
                err = wire.WriteVarBytes(&customDataBuf, 0, openChanData)
3✔
3725
                if err != nil {
3✔
3726
                        return nil, err
×
3727
                }
×
3728
        }
3729

3730
        rpcsLog.Debugf("[channelbalance] local_balance=%v remote_balance=%v "+
3✔
3731
                "unsettled_local_balance=%v unsettled_remote_balance=%v "+
3✔
3732
                "pending_open_local_balance=%v pending_open_remote_balance=%v",
3✔
3733
                localBalance, remoteBalance, unsettledLocalBalance,
3✔
3734
                unsettledRemoteBalance, pendingOpenLocalBalance,
3✔
3735
                pendingOpenRemoteBalance)
3✔
3736

3✔
3737
        resp := &lnrpc.ChannelBalanceResponse{
3✔
3738
                LocalBalance: &lnrpc.Amount{
3✔
3739
                        Sat:  uint64(localBalance.ToSatoshis()),
3✔
3740
                        Msat: uint64(localBalance),
3✔
3741
                },
3✔
3742
                RemoteBalance: &lnrpc.Amount{
3✔
3743
                        Sat:  uint64(remoteBalance.ToSatoshis()),
3✔
3744
                        Msat: uint64(remoteBalance),
3✔
3745
                },
3✔
3746
                UnsettledLocalBalance: &lnrpc.Amount{
3✔
3747
                        Sat:  uint64(unsettledLocalBalance.ToSatoshis()),
3✔
3748
                        Msat: uint64(unsettledLocalBalance),
3✔
3749
                },
3✔
3750
                UnsettledRemoteBalance: &lnrpc.Amount{
3✔
3751
                        Sat:  uint64(unsettledRemoteBalance.ToSatoshis()),
3✔
3752
                        Msat: uint64(unsettledRemoteBalance),
3✔
3753
                },
3✔
3754
                PendingOpenLocalBalance: &lnrpc.Amount{
3✔
3755
                        Sat:  uint64(pendingOpenLocalBalance.ToSatoshis()),
3✔
3756
                        Msat: uint64(pendingOpenLocalBalance),
3✔
3757
                },
3✔
3758
                PendingOpenRemoteBalance: &lnrpc.Amount{
3✔
3759
                        Sat:  uint64(pendingOpenRemoteBalance.ToSatoshis()),
3✔
3760
                        Msat: uint64(pendingOpenRemoteBalance),
3✔
3761
                },
3✔
3762
                CustomChannelData: customDataBuf.Bytes(),
3✔
3763

3✔
3764
                // Deprecated fields.
3✔
3765
                Balance:            int64(localBalance.ToSatoshis()),
3✔
3766
                PendingOpenBalance: int64(pendingOpenLocalBalance.ToSatoshis()),
3✔
3767
        }
3✔
3768

3✔
3769
        err = fn.MapOptionZ(
3✔
3770
                r.server.implCfg.AuxDataParser,
3✔
3771
                func(parser AuxDataParser) error {
3✔
3772
                        return parser.InlineParseCustomData(resp)
×
3773
                },
×
3774
        )
3775
        if err != nil {
3✔
3776
                return nil, fmt.Errorf("error parsing custom data: %w", err)
×
3777
        }
×
3778

3779
        return resp, nil
3✔
3780
}
3781

3782
type (
3783
        pendingOpenChannels  []*lnrpc.PendingChannelsResponse_PendingOpenChannel
3784
        pendingForceClose    []*lnrpc.PendingChannelsResponse_ForceClosedChannel
3785
        waitingCloseChannels []*lnrpc.PendingChannelsResponse_WaitingCloseChannel
3786
)
3787

3788
// fetchPendingOpenChannels queries the database for a list of channels that
3789
// have pending open state. The returned result is used in the response of the
3790
// PendingChannels RPC.
3791
func (r *rpcServer) fetchPendingOpenChannels() (pendingOpenChannels, error) {
3✔
3792
        // First, we'll populate the response with all the channels that are
3✔
3793
        // soon to be opened. We can easily fetch this data from the database
3✔
3794
        // and map the db struct to the proto response.
3✔
3795
        channels, err := r.server.chanStateDB.FetchPendingChannels()
3✔
3796
        if err != nil {
3✔
3797
                rpcsLog.Errorf("unable to fetch pending channels: %v", err)
×
3798
                return nil, err
×
3799
        }
×
3800

3801
        _, currentHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
3802
        if err != nil {
3✔
3803
                return nil, err
×
3804
        }
×
3805

3806
        result := make(pendingOpenChannels, len(channels))
3✔
3807
        for i, pendingChan := range channels {
6✔
3808
                pub := pendingChan.IdentityPub.SerializeCompressed()
3✔
3809

3✔
3810
                // As this is required for display purposes, we'll calculate
3✔
3811
                // the weight of the commitment transaction. We also add on the
3✔
3812
                // estimated weight of the witness to calculate the weight of
3✔
3813
                // the transaction if it were to be immediately unilaterally
3✔
3814
                // broadcast.
3✔
3815
                // TODO(roasbeef): query for funding tx from wallet, display
3✔
3816
                // that also?
3✔
3817
                var witnessWeight int64
3✔
3818
                if pendingChan.ChanType.IsTaproot() {
6✔
3819
                        witnessWeight = input.TaprootKeyPathWitnessSize
3✔
3820
                } else {
6✔
3821
                        witnessWeight = input.WitnessCommitmentTxWeight
3✔
3822
                }
3✔
3823

3824
                localCommitment := pendingChan.LocalCommitment
3✔
3825
                utx := btcutil.NewTx(localCommitment.CommitTx)
3✔
3826
                commitBaseWeight := blockchain.GetTransactionWeight(utx)
3✔
3827
                commitWeight := commitBaseWeight + witnessWeight
3✔
3828

3✔
3829
                // FundingExpiryBlocks is the distance from the current block
3✔
3830
                // height to the broadcast height + MaxWaitNumBlocksFundingConf.
3✔
3831
                maxFundingHeight := funding.MaxWaitNumBlocksFundingConf +
3✔
3832
                        pendingChan.BroadcastHeight()
3✔
3833
                fundingExpiryBlocks := int32(maxFundingHeight) - currentHeight
3✔
3834

3✔
3835
                customChanBytes, err := encodeCustomChanData(pendingChan)
3✔
3836
                if err != nil {
3✔
3837
                        return nil, fmt.Errorf("unable to encode open chan "+
×
3838
                                "data: %w", err)
×
3839
                }
×
3840

3841
                result[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{
3✔
3842
                        Channel: &lnrpc.PendingChannelsResponse_PendingChannel{
3✔
3843
                                RemoteNodePub:        hex.EncodeToString(pub),
3✔
3844
                                ChannelPoint:         pendingChan.FundingOutpoint.String(),
3✔
3845
                                Capacity:             int64(pendingChan.Capacity),
3✔
3846
                                LocalBalance:         int64(localCommitment.LocalBalance.ToSatoshis()),
3✔
3847
                                RemoteBalance:        int64(localCommitment.RemoteBalance.ToSatoshis()),
3✔
3848
                                LocalChanReserveSat:  int64(pendingChan.LocalChanCfg.ChanReserve),
3✔
3849
                                RemoteChanReserveSat: int64(pendingChan.RemoteChanCfg.ChanReserve),
3✔
3850
                                Initiator:            rpcInitiator(pendingChan.IsInitiator),
3✔
3851
                                CommitmentType:       rpcCommitmentType(pendingChan.ChanType),
3✔
3852
                                Private:              isPrivate(pendingChan),
3✔
3853
                                Memo:                 string(pendingChan.Memo),
3✔
3854
                                CustomChannelData:    customChanBytes,
3✔
3855
                        },
3✔
3856
                        CommitWeight:        commitWeight,
3✔
3857
                        CommitFee:           int64(localCommitment.CommitFee),
3✔
3858
                        FeePerKw:            int64(localCommitment.FeePerKw),
3✔
3859
                        FundingExpiryBlocks: fundingExpiryBlocks,
3✔
3860
                        // TODO(roasbeef): need to track confirmation height
3✔
3861
                }
3✔
3862
        }
3863

3864
        return result, nil
3✔
3865
}
3866

3867
// fetchPendingForceCloseChannels queries the database for a list of channels
3868
// that have their closing transactions confirmed but not fully resolved yet.
3869
// The returned result is used in the response of the PendingChannels RPC.
3870
func (r *rpcServer) fetchPendingForceCloseChannels() (pendingForceClose,
3871
        int64, error) {
3✔
3872

3✔
3873
        _, currentHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
3874
        if err != nil {
3✔
3875
                return nil, 0, err
×
3876
        }
×
3877

3878
        // Next, we'll examine the channels that are soon to be closed so we
3879
        // can populate these fields within the response.
3880
        channels, err := r.server.chanStateDB.FetchClosedChannels(true)
3✔
3881
        if err != nil {
3✔
3882
                rpcsLog.Errorf("unable to fetch closed channels: %v", err)
×
3883
                return nil, 0, err
×
3884
        }
×
3885

3886
        result := make(pendingForceClose, 0)
3✔
3887
        limboBalance := int64(0)
3✔
3888

3✔
3889
        for _, pendingClose := range channels {
6✔
3890
                // First construct the channel struct itself, this will be
3✔
3891
                // needed regardless of how this channel was closed.
3✔
3892
                pub := pendingClose.RemotePub.SerializeCompressed()
3✔
3893
                chanPoint := pendingClose.ChanPoint
3✔
3894

3✔
3895
                // Create the pending channel. If this channel was closed before
3✔
3896
                // we started storing historical channel data, we will not know
3✔
3897
                // who initiated the channel, so we set the initiator field to
3✔
3898
                // unknown.
3✔
3899
                channel := &lnrpc.PendingChannelsResponse_PendingChannel{
3✔
3900
                        RemoteNodePub:  hex.EncodeToString(pub),
3✔
3901
                        ChannelPoint:   chanPoint.String(),
3✔
3902
                        Capacity:       int64(pendingClose.Capacity),
3✔
3903
                        LocalBalance:   int64(pendingClose.SettledBalance),
3✔
3904
                        CommitmentType: lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE,
3✔
3905
                        Initiator:      lnrpc.Initiator_INITIATOR_UNKNOWN,
3✔
3906
                }
3✔
3907

3✔
3908
                // Lookup the channel in the historical channel bucket to obtain
3✔
3909
                // initiator information. If the historical channel bucket was
3✔
3910
                // not found, or the channel itself, this channel was closed
3✔
3911
                // in a version before we started persisting historical
3✔
3912
                // channels, so we silence the error.
3✔
3913
                historical, err := r.server.chanStateDB.FetchHistoricalChannel(
3✔
3914
                        &pendingClose.ChanPoint,
3✔
3915
                )
3✔
3916
                switch err {
3✔
3917
                // If the channel was closed in a version that did not record
3918
                // historical channels, ignore the error.
3919
                case channeldb.ErrNoHistoricalBucket:
×
3920
                case channeldb.ErrChannelNotFound:
×
3921

3922
                case nil:
3✔
3923
                        channel.Initiator = rpcInitiator(historical.IsInitiator)
3✔
3924
                        channel.CommitmentType = rpcCommitmentType(
3✔
3925
                                historical.ChanType,
3✔
3926
                        )
3✔
3927

3✔
3928
                        // Get the number of forwarding packages from the
3✔
3929
                        // historical channel.
3✔
3930
                        fwdPkgs, err := historical.LoadFwdPkgs()
3✔
3931
                        if err != nil {
3✔
3932
                                rpcsLog.Errorf("unable to load forwarding "+
×
3933
                                        "packages for channel:%s, %v",
×
3934
                                        historical.ShortChannelID, err)
×
3935
                                return nil, 0, err
×
3936
                        }
×
3937
                        channel.NumForwardingPackages = int64(len(fwdPkgs))
3✔
3938

3✔
3939
                        channel.RemoteBalance = int64(
3✔
3940
                                historical.LocalCommitment.RemoteBalance.ToSatoshis(),
3✔
3941
                        )
3✔
3942

3✔
3943
                        channel.Private = isPrivate(historical)
3✔
3944
                        channel.Memo = string(historical.Memo)
3✔
3945

3946
                // If the error is non-nil, and not due to older versions of lnd
3947
                // not persisting historical channels, return it.
3948
                default:
×
3949
                        return nil, 0, err
×
3950
                }
3951

3952
                closeTXID := pendingClose.ClosingTXID.String()
3✔
3953

3✔
3954
                switch pendingClose.CloseType {
3✔
3955

3956
                // A coop closed channel should never be in the "pending close"
3957
                // state. If a node upgraded from an older lnd version in the
3958
                // middle of a their channel confirming, it will be in this
3959
                // state. We log a warning that the channel will not be included
3960
                // in the now deprecated pending close channels field.
3961
                case channeldb.CooperativeClose:
×
3962
                        rpcsLog.Warnf("channel %v cooperatively closed and "+
×
3963
                                "in pending close state",
×
3964
                                pendingClose.ChanPoint)
×
3965

3966
                // If the channel was force closed, then we'll need to query
3967
                // the utxoNursery for additional information.
3968
                // TODO(halseth): distinguish remote and local case?
3969
                case channeldb.LocalForceClose, channeldb.RemoteForceClose:
3✔
3970
                        forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{
3✔
3971
                                Channel:     channel,
3✔
3972
                                ClosingTxid: closeTXID,
3✔
3973
                        }
3✔
3974

3✔
3975
                        // Fetch reports from both nursery and resolvers. At the
3✔
3976
                        // moment this is not an atomic snapshot. This is
3✔
3977
                        // planned to be resolved when the nursery is removed
3✔
3978
                        // and channel arbitrator will be the single source for
3✔
3979
                        // these kind of reports.
3✔
3980
                        err := r.nurseryPopulateForceCloseResp(
3✔
3981
                                &chanPoint, currentHeight, forceClose,
3✔
3982
                        )
3✔
3983
                        if err != nil {
3✔
3984
                                rpcsLog.Errorf("unable to populate nursery "+
×
3985
                                        "force close resp:%s, %v",
×
3986
                                        chanPoint, err)
×
3987
                                return nil, 0, err
×
3988
                        }
×
3989

3990
                        err = r.arbitratorPopulateForceCloseResp(
3✔
3991
                                &chanPoint, currentHeight, forceClose,
3✔
3992
                        )
3✔
3993
                        if err != nil {
3✔
3994
                                rpcsLog.Errorf("unable to populate arbitrator "+
×
3995
                                        "force close resp:%s, %v",
×
3996
                                        chanPoint, err)
×
3997
                                return nil, 0, err
×
3998
                        }
×
3999

4000
                        limboBalance += forceClose.LimboBalance
3✔
4001
                        result = append(result, forceClose)
3✔
4002
                }
4003
        }
4004

4005
        return result, limboBalance, nil
3✔
4006
}
4007

4008
// fetchWaitingCloseChannels queries the database for a list of channels
4009
// that have their closing transactions broadcast but not confirmed yet.
4010
// The returned result is used in the response of the PendingChannels RPC.
4011
func (r *rpcServer) fetchWaitingCloseChannels(
4012
        includeRawTx bool) (waitingCloseChannels, int64, error) {
3✔
4013

3✔
4014
        // We'll also fetch all channels that are open, but have had their
3✔
4015
        // commitment broadcasted, meaning they are waiting for the closing
3✔
4016
        // transaction to confirm.
3✔
4017
        channels, err := r.server.chanStateDB.FetchWaitingCloseChannels()
3✔
4018
        if err != nil {
3✔
4019
                rpcsLog.Errorf("unable to fetch channels waiting close: %v",
×
4020
                        err)
×
4021
                return nil, 0, err
×
4022
        }
×
4023

4024
        result := make(waitingCloseChannels, 0)
3✔
4025
        limboBalance := int64(0)
3✔
4026

3✔
4027
        // getClosingTx is a helper closure that tries to find the closing tx of
3✔
4028
        // a given waiting close channel. Notice that if the remote closes the
3✔
4029
        // channel, we may not have the closing tx.
3✔
4030
        getClosingTx := func(c *channeldb.OpenChannel) (*wire.MsgTx, error) {
6✔
4031
                var (
3✔
4032
                        tx  *wire.MsgTx
3✔
4033
                        err error
3✔
4034
                )
3✔
4035

3✔
4036
                // First, we try to locate the force closing tx. If not found,
3✔
4037
                // we will then try to find its coop closing tx.
3✔
4038
                tx, err = c.BroadcastedCommitment()
3✔
4039
                if err == nil {
6✔
4040
                        return tx, nil
3✔
4041
                }
3✔
4042

4043
                // If the error returned is not ErrNoCloseTx, something
4044
                // unexpected happened and we will return the error.
4045
                if err != channeldb.ErrNoCloseTx {
3✔
4046
                        return nil, err
×
4047
                }
×
4048

4049
                // Otherwise, we continue to locate its coop closing tx.
4050
                tx, err = c.BroadcastedCooperative()
3✔
4051
                if err == nil {
6✔
4052
                        return tx, nil
3✔
4053
                }
3✔
4054

4055
                // Return the error if it's not ErrNoCloseTx.
4056
                if err != channeldb.ErrNoCloseTx {
3✔
4057
                        return nil, err
×
4058
                }
×
4059

4060
                // Otherwise return an empty tx. This can happen if the remote
4061
                // broadcast the closing tx and we haven't recorded it yet.
4062
                return nil, nil
3✔
4063
        }
4064

4065
        for _, waitingClose := range channels {
6✔
4066
                pub := waitingClose.IdentityPub.SerializeCompressed()
3✔
4067
                chanPoint := waitingClose.FundingOutpoint
3✔
4068

3✔
4069
                var commitments lnrpc.PendingChannelsResponse_Commitments
3✔
4070

3✔
4071
                // Report local commit. May not be present when DLP is active.
3✔
4072
                if waitingClose.LocalCommitment.CommitTx != nil {
6✔
4073
                        commitments.LocalTxid =
3✔
4074
                                waitingClose.LocalCommitment.CommitTx.TxHash().
3✔
4075
                                        String()
3✔
4076

3✔
4077
                        commitments.LocalCommitFeeSat = uint64(
3✔
4078
                                waitingClose.LocalCommitment.CommitFee,
3✔
4079
                        )
3✔
4080
                }
3✔
4081

4082
                // Report remote commit. May not be present when DLP is active.
4083
                if waitingClose.RemoteCommitment.CommitTx != nil {
6✔
4084
                        commitments.RemoteTxid =
3✔
4085
                                waitingClose.RemoteCommitment.CommitTx.TxHash().
3✔
4086
                                        String()
3✔
4087

3✔
4088
                        commitments.RemoteCommitFeeSat = uint64(
3✔
4089
                                waitingClose.RemoteCommitment.CommitFee,
3✔
4090
                        )
3✔
4091
                }
3✔
4092

4093
                // Report the remote pending commit if any.
4094
                remoteCommitDiff, err := waitingClose.RemoteCommitChainTip()
3✔
4095

3✔
4096
                switch {
3✔
4097
                // Don't set hash if there is no pending remote commit.
4098
                case err == channeldb.ErrNoPendingCommit:
3✔
4099

4100
                // An unexpected error occurred.
4101
                case err != nil:
×
4102
                        return nil, 0, err
×
4103

4104
                // There is a pending remote commit. Set its hash in the
4105
                // response.
4106
                default:
×
4107
                        hash := remoteCommitDiff.Commitment.CommitTx.TxHash()
×
4108
                        commitments.RemotePendingTxid = hash.String()
×
4109
                        commitments.RemoteCommitFeeSat = uint64(
×
4110
                                remoteCommitDiff.Commitment.CommitFee,
×
4111
                        )
×
4112
                }
4113

4114
                fwdPkgs, err := waitingClose.LoadFwdPkgs()
3✔
4115
                if err != nil {
3✔
4116
                        rpcsLog.Errorf("unable to load forwarding packages "+
×
4117
                                "for channel:%s, %v",
×
4118
                                waitingClose.ShortChannelID, err)
×
4119
                        return nil, 0, err
×
4120
                }
×
4121

4122
                // Get the closing tx.
4123
                // NOTE: the closing tx could be nil here if it's the remote
4124
                // that broadcasted the closing tx.
4125
                closingTx, err := getClosingTx(waitingClose)
3✔
4126
                if err != nil {
3✔
4127
                        rpcsLog.Errorf("unable to find closing tx for "+
×
4128
                                "channel:%s, %v",
×
4129
                                waitingClose.ShortChannelID, err)
×
4130
                        return nil, 0, err
×
4131
                }
×
4132

4133
                channel := &lnrpc.PendingChannelsResponse_PendingChannel{
3✔
4134
                        RemoteNodePub:         hex.EncodeToString(pub),
3✔
4135
                        ChannelPoint:          chanPoint.String(),
3✔
4136
                        Capacity:              int64(waitingClose.Capacity),
3✔
4137
                        LocalBalance:          int64(waitingClose.LocalCommitment.LocalBalance.ToSatoshis()),
3✔
4138
                        RemoteBalance:         int64(waitingClose.LocalCommitment.RemoteBalance.ToSatoshis()),
3✔
4139
                        LocalChanReserveSat:   int64(waitingClose.LocalChanCfg.ChanReserve),
3✔
4140
                        RemoteChanReserveSat:  int64(waitingClose.RemoteChanCfg.ChanReserve),
3✔
4141
                        Initiator:             rpcInitiator(waitingClose.IsInitiator),
3✔
4142
                        CommitmentType:        rpcCommitmentType(waitingClose.ChanType),
3✔
4143
                        NumForwardingPackages: int64(len(fwdPkgs)),
3✔
4144
                        ChanStatusFlags:       waitingClose.ChanStatus().String(),
3✔
4145
                        Private:               isPrivate(waitingClose),
3✔
4146
                        Memo:                  string(waitingClose.Memo),
3✔
4147
                }
3✔
4148

3✔
4149
                var closingTxid, closingTxHex string
3✔
4150
                if closingTx != nil {
6✔
4151
                        closingTxid = closingTx.TxHash().String()
3✔
4152
                        if includeRawTx {
6✔
4153
                                var txBuf bytes.Buffer
3✔
4154
                                err = closingTx.Serialize(&txBuf)
3✔
4155
                                if err != nil {
3✔
4156
                                        return nil, 0, fmt.Errorf("failed to "+
×
4157
                                                "serialize closing transaction"+
×
4158
                                                ": %w", err)
×
4159
                                }
×
4160
                                closingTxHex = hex.EncodeToString(txBuf.Bytes())
3✔
4161
                        }
4162
                }
4163

4164
                waitingCloseResp := &lnrpc.PendingChannelsResponse_WaitingCloseChannel{
3✔
4165
                        Channel:      channel,
3✔
4166
                        LimboBalance: channel.LocalBalance,
3✔
4167
                        Commitments:  &commitments,
3✔
4168
                        ClosingTxid:  closingTxid,
3✔
4169
                        ClosingTxHex: closingTxHex,
3✔
4170
                }
3✔
4171

3✔
4172
                // A close tx has been broadcasted, all our balance will be in
3✔
4173
                // limbo until it confirms.
3✔
4174
                result = append(result, waitingCloseResp)
3✔
4175
                limboBalance += channel.LocalBalance
3✔
4176
        }
4177

4178
        return result, limboBalance, nil
3✔
4179
}
4180

4181
// PendingChannels returns a list of all the channels that are currently
4182
// considered "pending". A channel is pending if it has finished the funding
4183
// workflow and is waiting for confirmations for the funding txn, or is in the
4184
// process of closure, either initiated cooperatively or non-cooperatively.
4185
func (r *rpcServer) PendingChannels(ctx context.Context,
4186
        in *lnrpc.PendingChannelsRequest) (
4187
        *lnrpc.PendingChannelsResponse, error) {
3✔
4188

3✔
4189
        resp := &lnrpc.PendingChannelsResponse{}
3✔
4190

3✔
4191
        // First, we find all the channels that will soon be opened.
3✔
4192
        pendingOpenChannels, err := r.fetchPendingOpenChannels()
3✔
4193
        if err != nil {
3✔
4194
                return nil, err
×
4195
        }
×
4196
        resp.PendingOpenChannels = pendingOpenChannels
3✔
4197

3✔
4198
        // Second, we fetch all channels that considered pending force closing.
3✔
4199
        // This means the channels here have their closing transactions
3✔
4200
        // confirmed but not considered fully resolved yet. For instance, they
3✔
4201
        // may have a second level HTLCs to be resolved onchain.
3✔
4202
        pendingCloseChannels, limbo, err := r.fetchPendingForceCloseChannels()
3✔
4203
        if err != nil {
3✔
4204
                return nil, err
×
4205
        }
×
4206
        resp.PendingForceClosingChannels = pendingCloseChannels
3✔
4207
        resp.TotalLimboBalance = limbo
3✔
4208

3✔
4209
        // Third, we fetch all channels that are open, but have had their
3✔
4210
        // commitment broadcasted, meaning they are waiting for the closing
3✔
4211
        // transaction to confirm.
3✔
4212
        waitingCloseChannels, limbo, err := r.fetchWaitingCloseChannels(
3✔
4213
                in.IncludeRawTx,
3✔
4214
        )
3✔
4215
        if err != nil {
3✔
4216
                return nil, err
×
4217
        }
×
4218
        resp.WaitingCloseChannels = waitingCloseChannels
3✔
4219
        resp.TotalLimboBalance += limbo
3✔
4220

3✔
4221
        err = fn.MapOptionZ(
3✔
4222
                r.server.implCfg.AuxDataParser,
3✔
4223
                func(parser AuxDataParser) error {
3✔
4224
                        return parser.InlineParseCustomData(resp)
×
4225
                },
×
4226
        )
4227
        if err != nil {
3✔
4228
                return nil, fmt.Errorf("error parsing custom data: %w", err)
×
4229
        }
×
4230

4231
        return resp, nil
3✔
4232
}
4233

4234
// arbitratorPopulateForceCloseResp populates the pending channels response
4235
// message with channel resolution information from the contract resolvers.
4236
func (r *rpcServer) arbitratorPopulateForceCloseResp(chanPoint *wire.OutPoint,
4237
        currentHeight int32,
4238
        forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
3✔
4239

3✔
4240
        // Query for contract resolvers state.
3✔
4241
        arbitrator, err := r.server.chainArb.GetChannelArbitrator(*chanPoint)
3✔
4242
        if err != nil {
3✔
4243
                return err
×
4244
        }
×
4245
        reports := arbitrator.Report()
3✔
4246

3✔
4247
        for _, report := range reports {
6✔
4248
                switch report.Type {
3✔
4249
                // For a direct output, populate/update the top level
4250
                // response properties.
4251
                case contractcourt.ReportOutputUnencumbered:
3✔
4252
                        // Populate the maturity height fields for the direct
3✔
4253
                        // commitment output to us.
3✔
4254
                        forceClose.MaturityHeight = report.MaturityHeight
3✔
4255

3✔
4256
                        // If the transaction has been confirmed, then we can
3✔
4257
                        // compute how many blocks it has left.
3✔
4258
                        if forceClose.MaturityHeight != 0 {
6✔
4259
                                forceClose.BlocksTilMaturity =
3✔
4260
                                        int32(forceClose.MaturityHeight) -
3✔
4261
                                                currentHeight
3✔
4262
                        }
3✔
4263

4264
                // Add htlcs to the PendingHtlcs response property.
4265
                case contractcourt.ReportOutputIncomingHtlc,
4266
                        contractcourt.ReportOutputOutgoingHtlc:
3✔
4267

3✔
4268
                        // Don't report details on htlcs that are no longer in
3✔
4269
                        // limbo.
3✔
4270
                        if report.LimboBalance == 0 {
3✔
4271
                                break
×
4272
                        }
4273

4274
                        incoming := report.Type == contractcourt.ReportOutputIncomingHtlc
3✔
4275
                        htlc := &lnrpc.PendingHTLC{
3✔
4276
                                Incoming:       incoming,
3✔
4277
                                Amount:         int64(report.Amount),
3✔
4278
                                Outpoint:       report.Outpoint.String(),
3✔
4279
                                MaturityHeight: report.MaturityHeight,
3✔
4280
                                Stage:          report.Stage,
3✔
4281
                        }
3✔
4282

3✔
4283
                        if htlc.MaturityHeight != 0 {
6✔
4284
                                htlc.BlocksTilMaturity =
3✔
4285
                                        int32(htlc.MaturityHeight) - currentHeight
3✔
4286
                        }
3✔
4287

4288
                        forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc)
3✔
4289

4290
                case contractcourt.ReportOutputAnchor:
3✔
4291
                        // There are three resolution states for the anchor:
3✔
4292
                        // limbo, lost and recovered. Derive the current state
3✔
4293
                        // from the limbo and recovered balances.
3✔
4294
                        switch {
3✔
4295
                        case report.RecoveredBalance != 0:
3✔
4296
                                forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_RECOVERED
3✔
4297

4298
                        case report.LimboBalance != 0:
3✔
4299
                                forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LIMBO
3✔
4300

4301
                        default:
3✔
4302
                                forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST
3✔
4303
                        }
4304

4305
                default:
×
4306
                        return fmt.Errorf("unknown report output type: %v",
×
4307
                                report.Type)
×
4308
                }
4309

4310
                forceClose.LimboBalance += int64(report.LimboBalance)
3✔
4311
                forceClose.RecoveredBalance += int64(report.RecoveredBalance)
3✔
4312
        }
4313

4314
        return nil
3✔
4315
}
4316

4317
// nurseryPopulateForceCloseResp populates the pending channels response
4318
// message with contract resolution information from utxonursery.
4319
func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint,
4320
        currentHeight int32,
4321
        forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
3✔
4322

3✔
4323
        // Query for the maturity state for this force closed channel. If we
3✔
4324
        // didn't have any time-locked outputs, then the nursery may not know of
3✔
4325
        // the contract.
3✔
4326
        nurseryInfo, err := r.server.utxoNursery.NurseryReport(chanPoint)
3✔
4327
        if err == contractcourt.ErrContractNotFound {
6✔
4328
                return nil
3✔
4329
        }
3✔
4330
        if err != nil {
×
4331
                return fmt.Errorf("unable to obtain "+
×
4332
                        "nursery report for ChannelPoint(%v): %v",
×
4333
                        chanPoint, err)
×
4334
        }
×
4335

4336
        // If the nursery knows of this channel, then we can populate
4337
        // information detailing exactly how much funds are time locked and also
4338
        // the height in which we can ultimately sweep the funds into the
4339
        // wallet.
4340
        forceClose.LimboBalance = int64(nurseryInfo.LimboBalance)
×
4341
        forceClose.RecoveredBalance = int64(nurseryInfo.RecoveredBalance)
×
4342

×
4343
        for _, htlcReport := range nurseryInfo.Htlcs {
×
4344
                // TODO(conner) set incoming flag appropriately after handling
×
4345
                // incoming incubation
×
4346
                htlc := &lnrpc.PendingHTLC{
×
4347
                        Incoming:       false,
×
4348
                        Amount:         int64(htlcReport.Amount),
×
4349
                        Outpoint:       htlcReport.Outpoint.String(),
×
4350
                        MaturityHeight: htlcReport.MaturityHeight,
×
4351
                        Stage:          htlcReport.Stage,
×
4352
                }
×
4353

×
4354
                if htlc.MaturityHeight != 0 {
×
4355
                        htlc.BlocksTilMaturity =
×
4356
                                int32(htlc.MaturityHeight) -
×
4357
                                        currentHeight
×
4358
                }
×
4359

4360
                forceClose.PendingHtlcs = append(forceClose.PendingHtlcs,
×
4361
                        htlc)
×
4362
        }
4363

4364
        return nil
×
4365
}
4366

4367
// ClosedChannels returns a list of all the channels have been closed.
4368
// This does not include channels that are still in the process of closing.
4369
func (r *rpcServer) ClosedChannels(ctx context.Context,
4370
        in *lnrpc.ClosedChannelsRequest) (*lnrpc.ClosedChannelsResponse,
4371
        error) {
3✔
4372

3✔
4373
        // Show all channels when no filter flags are set.
3✔
4374
        filterResults := in.Cooperative || in.LocalForce ||
3✔
4375
                in.RemoteForce || in.Breach || in.FundingCanceled ||
3✔
4376
                in.Abandoned
3✔
4377

3✔
4378
        resp := &lnrpc.ClosedChannelsResponse{}
3✔
4379

3✔
4380
        dbChannels, err := r.server.chanStateDB.FetchClosedChannels(false)
3✔
4381
        if err != nil {
3✔
4382
                return nil, err
×
4383
        }
×
4384

4385
        // In order to make the response easier to parse for clients, we'll
4386
        // sort the set of closed channels by their closing height before
4387
        // serializing the proto response.
4388
        sort.Slice(dbChannels, func(i, j int) bool {
3✔
4389
                return dbChannels[i].CloseHeight < dbChannels[j].CloseHeight
×
4390
        })
×
4391

4392
        for _, dbChannel := range dbChannels {
6✔
4393
                if dbChannel.IsPending {
3✔
4394
                        continue
×
4395
                }
4396

4397
                switch dbChannel.CloseType {
3✔
4398
                case channeldb.CooperativeClose:
3✔
4399
                        if filterResults && !in.Cooperative {
3✔
4400
                                continue
×
4401
                        }
4402
                case channeldb.LocalForceClose:
3✔
4403
                        if filterResults && !in.LocalForce {
3✔
4404
                                continue
×
4405
                        }
4406
                case channeldb.RemoteForceClose:
3✔
4407
                        if filterResults && !in.RemoteForce {
3✔
4408
                                continue
×
4409
                        }
4410
                case channeldb.BreachClose:
×
4411
                        if filterResults && !in.Breach {
×
4412
                                continue
×
4413
                        }
4414
                case channeldb.FundingCanceled:
×
4415
                        if filterResults && !in.FundingCanceled {
×
4416
                                continue
×
4417
                        }
4418
                case channeldb.Abandoned:
3✔
4419
                        if filterResults && !in.Abandoned {
3✔
4420
                                continue
×
4421
                        }
4422
                }
4423

4424
                channel, err := r.createRPCClosedChannel(dbChannel)
3✔
4425
                if err != nil {
3✔
4426
                        return nil, err
×
4427
                }
×
4428

4429
                resp.Channels = append(resp.Channels, channel)
3✔
4430
        }
4431

4432
        return resp, nil
3✔
4433
}
4434

4435
// LookupHtlcResolution retrieves a final htlc resolution from the database. If
4436
// the htlc has no final resolution yet, a NotFound grpc status code is
4437
// returned.
4438
func (r *rpcServer) LookupHtlcResolution(
4439
        _ context.Context, in *lnrpc.LookupHtlcResolutionRequest) (
4440
        *lnrpc.LookupHtlcResolutionResponse, error) {
3✔
4441

3✔
4442
        if !r.cfg.StoreFinalHtlcResolutions {
6✔
4443
                return nil, status.Error(codes.Unavailable, "cannot lookup "+
3✔
4444
                        "with flag --store-final-htlc-resolutions=false")
3✔
4445
        }
3✔
4446

4447
        chanID := lnwire.NewShortChanIDFromInt(in.ChanId)
3✔
4448

3✔
4449
        info, err := r.server.chanStateDB.LookupFinalHtlc(chanID, in.HtlcIndex)
3✔
4450
        switch {
3✔
4451
        case errors.Is(err, channeldb.ErrHtlcUnknown):
×
4452
                return nil, status.Error(codes.NotFound, err.Error())
×
4453

4454
        case err != nil:
×
4455
                return nil, err
×
4456
        }
4457

4458
        return &lnrpc.LookupHtlcResolutionResponse{
3✔
4459
                Settled:  info.Settled,
3✔
4460
                Offchain: info.Offchain,
3✔
4461
        }, nil
3✔
4462
}
4463

4464
// ListChannels returns a description of all the open channels that this node
4465
// is a participant in.
4466
func (r *rpcServer) ListChannels(ctx context.Context,
4467
        in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) {
3✔
4468

3✔
4469
        if in.ActiveOnly && in.InactiveOnly {
6✔
4470
                return nil, fmt.Errorf("either `active_only` or " +
3✔
4471
                        "`inactive_only` can be set, but not both")
3✔
4472
        }
3✔
4473

4474
        if in.PublicOnly && in.PrivateOnly {
3✔
4475
                return nil, fmt.Errorf("either `public_only` or " +
×
4476
                        "`private_only` can be set, but not both")
×
4477
        }
×
4478

4479
        if len(in.Peer) > 0 && len(in.Peer) != 33 {
3✔
4480
                _, err := route.NewVertexFromBytes(in.Peer)
×
4481
                return nil, fmt.Errorf("invalid `peer` key: %w", err)
×
4482
        }
×
4483

4484
        resp := &lnrpc.ListChannelsResponse{}
3✔
4485

3✔
4486
        dbChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
3✔
4487
        if err != nil {
3✔
4488
                return nil, err
×
4489
        }
×
4490

4491
        rpcsLog.Debugf("[listchannels] fetched %v channels from DB",
3✔
4492
                len(dbChannels))
3✔
4493

3✔
4494
        for _, dbChannel := range dbChannels {
6✔
4495
                nodePub := dbChannel.IdentityPub
3✔
4496
                nodePubBytes := nodePub.SerializeCompressed()
3✔
4497
                chanPoint := dbChannel.FundingOutpoint
3✔
4498

3✔
4499
                // If the caller requested channels for a target node, skip any
3✔
4500
                // that don't match the provided pubkey.
3✔
4501
                if len(in.Peer) > 0 && !bytes.Equal(nodePubBytes, in.Peer) {
6✔
4502
                        continue
3✔
4503
                }
4504

4505
                var peerOnline bool
3✔
4506
                if _, err := r.server.FindPeer(nodePub); err == nil {
6✔
4507
                        peerOnline = true
3✔
4508
                }
3✔
4509

4510
                channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
4511
                var linkActive bool
3✔
4512
                if link, err := r.server.htlcSwitch.GetLink(channelID); err == nil {
6✔
4513
                        // A channel is only considered active if it is known
3✔
4514
                        // by the switch *and* able to forward
3✔
4515
                        // incoming/outgoing payments.
3✔
4516
                        linkActive = link.EligibleToForward()
3✔
4517
                }
3✔
4518

4519
                // Next, we'll determine whether we should add this channel to
4520
                // our list depending on the type of channels requested to us.
4521
                isActive := peerOnline && linkActive
3✔
4522
                channel, err := createRPCOpenChannel(
3✔
4523
                        r, dbChannel, isActive, in.PeerAliasLookup,
3✔
4524
                )
3✔
4525
                if err != nil {
3✔
4526
                        return nil, err
×
4527
                }
×
4528

4529
                // We'll only skip returning this channel if we were requested
4530
                // for a specific kind and this channel doesn't satisfy it.
4531
                switch {
3✔
4532
                case in.ActiveOnly && !isActive:
3✔
4533
                        continue
3✔
4534
                case in.InactiveOnly && isActive:
×
4535
                        continue
×
4536
                case in.PublicOnly && channel.Private:
×
4537
                        continue
×
4538
                case in.PrivateOnly && !channel.Private:
3✔
4539
                        continue
3✔
4540
                }
4541

4542
                resp.Channels = append(resp.Channels, channel)
3✔
4543
        }
4544

4545
        err = fn.MapOptionZ(
3✔
4546
                r.server.implCfg.AuxDataParser,
3✔
4547
                func(parser AuxDataParser) error {
3✔
4548
                        return parser.InlineParseCustomData(resp)
×
4549
                },
×
4550
        )
4551
        if err != nil {
3✔
4552
                return nil, fmt.Errorf("error parsing custom data: %w", err)
×
4553
        }
×
4554

4555
        return resp, nil
3✔
4556
}
4557

4558
// rpcCommitmentType takes the channel type and converts it to an rpc commitment
4559
// type value.
4560
func rpcCommitmentType(chanType channeldb.ChannelType) lnrpc.CommitmentType {
3✔
4561
        // Extract the commitment type from the channel type flags. We must
3✔
4562
        // first check whether it has anchors, since in that case it would also
3✔
4563
        // be tweakless.
3✔
4564
        switch {
3✔
4565
        case chanType.HasTapscriptRoot():
×
4566
                return lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY
×
4567

4568
        case chanType.IsTaproot():
3✔
4569
                return lnrpc.CommitmentType_SIMPLE_TAPROOT
3✔
4570

4571
        case chanType.HasLeaseExpiration():
3✔
4572
                return lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
3✔
4573

4574
        case chanType.HasAnchors():
3✔
4575
                return lnrpc.CommitmentType_ANCHORS
3✔
4576

4577
        case chanType.IsTweakless():
3✔
4578
                return lnrpc.CommitmentType_STATIC_REMOTE_KEY
3✔
4579

4580
        default:
3✔
4581

3✔
4582
                return lnrpc.CommitmentType_LEGACY
3✔
4583
        }
4584
}
4585

4586
// createChannelConstraint creates a *lnrpc.ChannelConstraints using the
4587
// *Channeldb.ChannelConfig.
4588
func createChannelConstraint(
4589
        chanCfg *channeldb.ChannelConfig) *lnrpc.ChannelConstraints {
3✔
4590
        return &lnrpc.ChannelConstraints{
3✔
4591
                CsvDelay:          uint32(chanCfg.CsvDelay),
3✔
4592
                ChanReserveSat:    uint64(chanCfg.ChanReserve),
3✔
4593
                DustLimitSat:      uint64(chanCfg.DustLimit),
3✔
4594
                MaxPendingAmtMsat: uint64(chanCfg.MaxPendingAmount),
3✔
4595
                MinHtlcMsat:       uint64(chanCfg.MinHTLC),
3✔
4596
                MaxAcceptedHtlcs:  uint32(chanCfg.MaxAcceptedHtlcs),
3✔
4597
        }
3✔
4598
}
3✔
4599

4600
// isPrivate evaluates the ChannelFlags of the db channel to determine if the
4601
// channel is private or not.
4602
func isPrivate(dbChannel *channeldb.OpenChannel) bool {
3✔
4603
        if dbChannel == nil {
3✔
4604
                return false
×
4605
        }
×
4606
        return dbChannel.ChannelFlags&lnwire.FFAnnounceChannel != 1
3✔
4607
}
4608

4609
// encodeCustomChanData encodes the custom channel data for the open channel.
4610
// It encodes that data as a pair of var bytes blobs.
4611
func encodeCustomChanData(lnChan *channeldb.OpenChannel) ([]byte, error) {
3✔
4612
        customOpenChanData := lnChan.CustomBlob.UnwrapOr(nil)
3✔
4613
        customLocalCommitData := lnChan.LocalCommitment.CustomBlob.UnwrapOr(nil)
3✔
4614

3✔
4615
        // Don't write any custom data if both blobs are empty.
3✔
4616
        if len(customOpenChanData) == 0 && len(customLocalCommitData) == 0 {
6✔
4617
                return nil, nil
3✔
4618
        }
3✔
4619

4620
        // We'll encode our custom channel data as two blobs. The first is a
4621
        // set of var bytes encoding of the open chan data, the second is an
4622
        // encoding of the local commitment data.
4623
        var customChanDataBuf bytes.Buffer
×
4624
        err := wire.WriteVarBytes(&customChanDataBuf, 0, customOpenChanData)
×
4625
        if err != nil {
×
4626
                return nil, fmt.Errorf("unable to encode open chan "+
×
4627
                        "data: %w", err)
×
4628
        }
×
4629
        err = wire.WriteVarBytes(&customChanDataBuf, 0, customLocalCommitData)
×
4630
        if err != nil {
×
4631
                return nil, fmt.Errorf("unable to encode local commit "+
×
4632
                        "data: %w", err)
×
4633
        }
×
4634

4635
        return customChanDataBuf.Bytes(), nil
×
4636
}
4637

4638
// createRPCOpenChannel creates an *lnrpc.Channel from the *channeldb.Channel.
4639
func createRPCOpenChannel(r *rpcServer, dbChannel *channeldb.OpenChannel,
4640
        isActive, peerAliasLookup bool) (*lnrpc.Channel, error) {
3✔
4641

3✔
4642
        nodePub := dbChannel.IdentityPub
3✔
4643
        nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
3✔
4644
        chanPoint := dbChannel.FundingOutpoint
3✔
4645
        chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
3✔
4646

3✔
4647
        // As this is required for display purposes, we'll calculate
3✔
4648
        // the weight of the commitment transaction. We also add on the
3✔
4649
        // estimated weight of the witness to calculate the weight of
3✔
4650
        // the transaction if it were to be immediately unilaterally
3✔
4651
        // broadcast.
3✔
4652
        var witnessWeight int64
3✔
4653
        if dbChannel.ChanType.IsTaproot() {
6✔
4654
                witnessWeight = input.TaprootKeyPathWitnessSize
3✔
4655
        } else {
6✔
4656
                witnessWeight = input.WitnessCommitmentTxWeight
3✔
4657
        }
3✔
4658

4659
        localCommit := dbChannel.LocalCommitment
3✔
4660
        utx := btcutil.NewTx(localCommit.CommitTx)
3✔
4661
        commitBaseWeight := blockchain.GetTransactionWeight(utx)
3✔
4662
        commitWeight := commitBaseWeight + witnessWeight
3✔
4663

3✔
4664
        localBalance := localCommit.LocalBalance
3✔
4665
        remoteBalance := localCommit.RemoteBalance
3✔
4666

3✔
4667
        // As an artifact of our usage of mSAT internally, either party
3✔
4668
        // may end up in a state where they're holding a fractional
3✔
4669
        // amount of satoshis which can't be expressed within the
3✔
4670
        // actual commitment output. Since we round down when going
3✔
4671
        // from mSAT -> SAT, we may at any point be adding an
3✔
4672
        // additional SAT to miners fees. As a result, we display a
3✔
4673
        // commitment fee that accounts for this externally.
3✔
4674
        var sumOutputs btcutil.Amount
3✔
4675
        for _, txOut := range localCommit.CommitTx.TxOut {
6✔
4676
                sumOutputs += btcutil.Amount(txOut.Value)
3✔
4677
        }
3✔
4678
        externalCommitFee := dbChannel.Capacity - sumOutputs
3✔
4679

3✔
4680
        // Extract the commitment type from the channel type flags.
3✔
4681
        commitmentType := rpcCommitmentType(dbChannel.ChanType)
3✔
4682

3✔
4683
        dbScid := dbChannel.ShortChannelID
3✔
4684

3✔
4685
        // Fetch the set of aliases for the channel.
3✔
4686
        channelAliases := r.server.aliasMgr.GetAliases(dbScid)
3✔
4687

3✔
4688
        // Fetch the peer alias. If one does not exist, errNoPeerAlias
3✔
4689
        // is returned and peerScidAlias will be an empty ShortChannelID.
3✔
4690
        peerScidAlias, _ := r.server.aliasMgr.GetPeerAlias(chanID)
3✔
4691

3✔
4692
        // Finally we'll attempt to encode the custom channel data if any
3✔
4693
        // exists.
3✔
4694
        customChanBytes, err := encodeCustomChanData(dbChannel)
3✔
4695
        if err != nil {
3✔
4696
                return nil, fmt.Errorf("unable to encode open chan data: %w",
×
4697
                        err)
×
4698
        }
×
4699

4700
        channel := &lnrpc.Channel{
3✔
4701
                Active:                isActive,
3✔
4702
                Private:               isPrivate(dbChannel),
3✔
4703
                RemotePubkey:          nodeID,
3✔
4704
                ChannelPoint:          chanPoint.String(),
3✔
4705
                ChanId:                dbScid.ToUint64(),
3✔
4706
                Capacity:              int64(dbChannel.Capacity),
3✔
4707
                LocalBalance:          int64(localBalance.ToSatoshis()),
3✔
4708
                RemoteBalance:         int64(remoteBalance.ToSatoshis()),
3✔
4709
                CommitFee:             int64(externalCommitFee),
3✔
4710
                CommitWeight:          commitWeight,
3✔
4711
                FeePerKw:              int64(localCommit.FeePerKw),
3✔
4712
                TotalSatoshisSent:     int64(dbChannel.TotalMSatSent.ToSatoshis()),
3✔
4713
                TotalSatoshisReceived: int64(dbChannel.TotalMSatReceived.ToSatoshis()),
3✔
4714
                NumUpdates:            localCommit.CommitHeight,
3✔
4715
                PendingHtlcs:          make([]*lnrpc.HTLC, len(localCommit.Htlcs)),
3✔
4716
                Initiator:             dbChannel.IsInitiator,
3✔
4717
                ChanStatusFlags:       dbChannel.ChanStatus().String(),
3✔
4718
                StaticRemoteKey:       commitmentType == lnrpc.CommitmentType_STATIC_REMOTE_KEY,
3✔
4719
                CommitmentType:        commitmentType,
3✔
4720
                ThawHeight:            dbChannel.ThawHeight,
3✔
4721
                LocalConstraints: createChannelConstraint(
3✔
4722
                        &dbChannel.LocalChanCfg,
3✔
4723
                ),
3✔
4724
                RemoteConstraints: createChannelConstraint(
3✔
4725
                        &dbChannel.RemoteChanCfg,
3✔
4726
                ),
3✔
4727
                AliasScids:            make([]uint64, 0, len(channelAliases)),
3✔
4728
                PeerScidAlias:         peerScidAlias.ToUint64(),
3✔
4729
                ZeroConf:              dbChannel.IsZeroConf(),
3✔
4730
                ZeroConfConfirmedScid: dbChannel.ZeroConfRealScid().ToUint64(),
3✔
4731
                Memo:                  string(dbChannel.Memo),
3✔
4732
                CustomChannelData:     customChanBytes,
3✔
4733
                // TODO: remove the following deprecated fields
3✔
4734
                CsvDelay:             uint32(dbChannel.LocalChanCfg.CsvDelay),
3✔
4735
                LocalChanReserveSat:  int64(dbChannel.LocalChanCfg.ChanReserve),
3✔
4736
                RemoteChanReserveSat: int64(dbChannel.RemoteChanCfg.ChanReserve),
3✔
4737
        }
3✔
4738

3✔
4739
        // Look up our channel peer's node alias if the caller requests it.
3✔
4740
        if peerAliasLookup {
6✔
4741
                peerAlias, err := r.server.graphDB.LookupAlias(nodePub)
3✔
4742
                if err != nil {
3✔
4743
                        peerAlias = fmt.Sprintf("unable to lookup "+
×
4744
                                "peer alias: %v", err)
×
4745
                }
×
4746
                channel.PeerAlias = peerAlias
3✔
4747
        }
4748

4749
        // Populate the set of aliases.
4750
        for _, chanAlias := range channelAliases {
6✔
4751
                channel.AliasScids = append(
3✔
4752
                        channel.AliasScids, chanAlias.ToUint64(),
3✔
4753
                )
3✔
4754
        }
3✔
4755

4756
        for i, htlc := range localCommit.Htlcs {
6✔
4757
                var rHash [32]byte
3✔
4758
                copy(rHash[:], htlc.RHash[:])
3✔
4759

3✔
4760
                circuitMap := r.server.htlcSwitch.CircuitLookup()
3✔
4761

3✔
4762
                var forwardingChannel, forwardingHtlcIndex uint64
3✔
4763
                switch {
3✔
4764
                case htlc.Incoming:
3✔
4765
                        circuit := circuitMap.LookupCircuit(
3✔
4766
                                htlcswitch.CircuitKey{
3✔
4767
                                        ChanID: dbChannel.ShortChannelID,
3✔
4768
                                        HtlcID: htlc.HtlcIndex,
3✔
4769
                                },
3✔
4770
                        )
3✔
4771
                        if circuit != nil && circuit.Outgoing != nil {
6✔
4772
                                forwardingChannel = circuit.Outgoing.ChanID.
3✔
4773
                                        ToUint64()
3✔
4774

3✔
4775
                                forwardingHtlcIndex = circuit.Outgoing.HtlcID
3✔
4776
                        }
3✔
4777

4778
                case !htlc.Incoming:
3✔
4779
                        circuit := circuitMap.LookupOpenCircuit(
3✔
4780
                                htlcswitch.CircuitKey{
3✔
4781
                                        ChanID: dbChannel.ShortChannelID,
3✔
4782
                                        HtlcID: htlc.HtlcIndex,
3✔
4783
                                },
3✔
4784
                        )
3✔
4785

3✔
4786
                        // If the incoming channel id is the special hop.Source
3✔
4787
                        // value, the htlc index is a local payment identifier.
3✔
4788
                        // In this case, report nothing.
3✔
4789
                        if circuit != nil &&
3✔
4790
                                circuit.Incoming.ChanID != hop.Source {
6✔
4791

3✔
4792
                                forwardingChannel = circuit.Incoming.ChanID.
3✔
4793
                                        ToUint64()
3✔
4794

3✔
4795
                                forwardingHtlcIndex = circuit.Incoming.HtlcID
3✔
4796
                        }
3✔
4797
                }
4798

4799
                channel.PendingHtlcs[i] = &lnrpc.HTLC{
3✔
4800
                        Incoming:            htlc.Incoming,
3✔
4801
                        Amount:              int64(htlc.Amt.ToSatoshis()),
3✔
4802
                        HashLock:            rHash[:],
3✔
4803
                        ExpirationHeight:    htlc.RefundTimeout,
3✔
4804
                        HtlcIndex:           htlc.HtlcIndex,
3✔
4805
                        ForwardingChannel:   forwardingChannel,
3✔
4806
                        ForwardingHtlcIndex: forwardingHtlcIndex,
3✔
4807
                }
3✔
4808

3✔
4809
                // Add the Pending Htlc Amount to UnsettledBalance field.
3✔
4810
                channel.UnsettledBalance += channel.PendingHtlcs[i].Amount
3✔
4811
        }
4812

4813
        // If we initiated opening the channel, the zero height remote balance
4814
        // is the push amount. Otherwise, our starting balance is the push
4815
        // amount. If there is no push amount, these values will simply be zero.
4816
        if dbChannel.IsInitiator {
6✔
4817
                amt := dbChannel.InitialRemoteBalance.ToSatoshis()
3✔
4818
                channel.PushAmountSat = uint64(amt)
3✔
4819
        } else {
6✔
4820
                amt := dbChannel.InitialLocalBalance.ToSatoshis()
3✔
4821
                channel.PushAmountSat = uint64(amt)
3✔
4822
        }
3✔
4823

4824
        if len(dbChannel.LocalShutdownScript) > 0 {
6✔
4825
                _, addresses, _, err := txscript.ExtractPkScriptAddrs(
3✔
4826
                        dbChannel.LocalShutdownScript, r.cfg.ActiveNetParams.Params,
3✔
4827
                )
3✔
4828
                if err != nil {
3✔
4829
                        return nil, err
×
4830
                }
×
4831

4832
                // We only expect one upfront shutdown address for a channel. If
4833
                // LocalShutdownScript is non-zero, there should be one payout
4834
                // address set.
4835
                if len(addresses) != 1 {
3✔
4836
                        return nil, fmt.Errorf("expected one upfront shutdown "+
×
4837
                                "address, got: %v", len(addresses))
×
4838
                }
×
4839

4840
                channel.CloseAddress = addresses[0].String()
3✔
4841
        }
4842

4843
        // If the server hasn't fully started yet, it's possible that the
4844
        // channel event store hasn't either, so it won't be able to consume any
4845
        // requests until then. To prevent blocking, we'll just omit the uptime
4846
        // related fields for now.
4847
        if !r.server.Started() {
3✔
4848
                return channel, nil
×
4849
        }
×
4850

4851
        peer, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
4852
        if err != nil {
3✔
4853
                return nil, err
×
4854
        }
×
4855

4856
        // Query the event store for additional information about the channel.
4857
        // Do not fail if it is not available, because there is a potential
4858
        // race between a channel being added to our node and the event store
4859
        // being notified of it.
4860
        outpoint := dbChannel.FundingOutpoint
3✔
4861
        info, err := r.server.chanEventStore.GetChanInfo(outpoint, peer)
3✔
4862
        switch err {
3✔
4863
        // If the store does not know about the channel, we just log it.
4864
        case chanfitness.ErrChannelNotFound:
3✔
4865
                rpcsLog.Infof("channel: %v not found by channel event store",
3✔
4866
                        outpoint)
3✔
4867

4868
        // If we got our channel info, we further populate the channel.
4869
        case nil:
3✔
4870
                channel.Uptime = int64(info.Uptime.Seconds())
3✔
4871
                channel.Lifetime = int64(info.Lifetime.Seconds())
3✔
4872

4873
        // If we get an unexpected error, we return it.
4874
        default:
×
4875
                return nil, err
×
4876
        }
4877

4878
        return channel, nil
3✔
4879
}
4880

4881
// createRPCClosedChannel creates an *lnrpc.ClosedChannelSummary from a
4882
// *channeldb.ChannelCloseSummary.
4883
func (r *rpcServer) createRPCClosedChannel(
4884
        dbChannel *channeldb.ChannelCloseSummary) (*lnrpc.ChannelCloseSummary, error) {
3✔
4885

3✔
4886
        nodePub := dbChannel.RemotePub
3✔
4887
        nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
3✔
4888

3✔
4889
        var (
3✔
4890
                closeType      lnrpc.ChannelCloseSummary_ClosureType
3✔
4891
                openInit       lnrpc.Initiator
3✔
4892
                closeInitiator lnrpc.Initiator
3✔
4893
                err            error
3✔
4894
        )
3✔
4895

3✔
4896
        // Lookup local and remote cooperative initiators. If these values
3✔
4897
        // are not known they will just return unknown.
3✔
4898
        openInit, closeInitiator, err = r.getInitiators(
3✔
4899
                &dbChannel.ChanPoint,
3✔
4900
        )
3✔
4901
        if err != nil {
3✔
4902
                return nil, err
×
4903
        }
×
4904

4905
        // Convert the close type to rpc type.
4906
        switch dbChannel.CloseType {
3✔
4907
        case channeldb.CooperativeClose:
3✔
4908
                closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
3✔
4909
        case channeldb.LocalForceClose:
3✔
4910
                closeType = lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE
3✔
4911
        case channeldb.RemoteForceClose:
3✔
4912
                closeType = lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE
3✔
4913
        case channeldb.BreachClose:
×
4914
                closeType = lnrpc.ChannelCloseSummary_BREACH_CLOSE
×
4915
        case channeldb.FundingCanceled:
×
4916
                closeType = lnrpc.ChannelCloseSummary_FUNDING_CANCELED
×
4917
        case channeldb.Abandoned:
3✔
4918
                closeType = lnrpc.ChannelCloseSummary_ABANDONED
3✔
4919
        }
4920

4921
        dbScid := dbChannel.ShortChanID
3✔
4922

3✔
4923
        // Fetch the set of aliases for this channel.
3✔
4924
        channelAliases := r.server.aliasMgr.GetAliases(dbScid)
3✔
4925

3✔
4926
        channel := &lnrpc.ChannelCloseSummary{
3✔
4927
                Capacity:          int64(dbChannel.Capacity),
3✔
4928
                RemotePubkey:      nodeID,
3✔
4929
                CloseHeight:       dbChannel.CloseHeight,
3✔
4930
                CloseType:         closeType,
3✔
4931
                ChannelPoint:      dbChannel.ChanPoint.String(),
3✔
4932
                ChanId:            dbChannel.ShortChanID.ToUint64(),
3✔
4933
                SettledBalance:    int64(dbChannel.SettledBalance),
3✔
4934
                TimeLockedBalance: int64(dbChannel.TimeLockedBalance),
3✔
4935
                ChainHash:         dbChannel.ChainHash.String(),
3✔
4936
                ClosingTxHash:     dbChannel.ClosingTXID.String(),
3✔
4937
                OpenInitiator:     openInit,
3✔
4938
                CloseInitiator:    closeInitiator,
3✔
4939
                AliasScids:        make([]uint64, 0, len(channelAliases)),
3✔
4940
        }
3✔
4941

3✔
4942
        // Populate the set of aliases.
3✔
4943
        for _, chanAlias := range channelAliases {
3✔
4944
                channel.AliasScids = append(
×
4945
                        channel.AliasScids, chanAlias.ToUint64(),
×
4946
                )
×
4947
        }
×
4948

4949
        // Populate any historical data that the summary needs.
4950
        histChan, err := r.server.chanStateDB.FetchHistoricalChannel(
3✔
4951
                &dbChannel.ChanPoint,
3✔
4952
        )
3✔
4953
        switch err {
3✔
4954
        // The channel was closed in a pre-historic version of lnd. Ignore the
4955
        // error.
4956
        case channeldb.ErrNoHistoricalBucket:
×
4957
        case channeldb.ErrChannelNotFound:
×
4958

4959
        case nil:
3✔
4960
                if histChan.IsZeroConf() && histChan.ZeroConfConfirmed() {
3✔
4961
                        // If the channel was zero-conf, it may have confirmed.
×
4962
                        // Populate the confirmed SCID if so.
×
4963
                        confirmedScid := histChan.ZeroConfRealScid().ToUint64()
×
4964
                        channel.ZeroConfConfirmedScid = confirmedScid
×
4965
                }
×
4966

4967
        // Non-nil error not due to older versions of lnd.
4968
        default:
×
4969
                return nil, err
×
4970
        }
4971

4972
        reports, err := r.server.miscDB.FetchChannelReports(
3✔
4973
                *r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint,
3✔
4974
        )
3✔
4975
        switch err {
3✔
4976
        // If the channel does not have its resolver outcomes stored,
4977
        // ignore it.
4978
        case channeldb.ErrNoChainHashBucket:
3✔
4979
                fallthrough
3✔
4980
        case channeldb.ErrNoChannelSummaries:
3✔
4981
                return channel, nil
3✔
4982

4983
        // If there is no error, fallthrough the switch to process reports.
4984
        case nil:
3✔
4985

4986
        // If another error occurred, return it.
4987
        default:
×
4988
                return nil, err
×
4989
        }
4990

4991
        for _, report := range reports {
6✔
4992
                rpcResolution, err := rpcChannelResolution(report)
3✔
4993
                if err != nil {
3✔
4994
                        return nil, err
×
4995
                }
×
4996

4997
                channel.Resolutions = append(channel.Resolutions, rpcResolution)
3✔
4998
        }
4999

5000
        return channel, nil
3✔
5001
}
5002

5003
func rpcChannelResolution(report *channeldb.ResolverReport) (*lnrpc.Resolution,
5004
        error) {
3✔
5005

3✔
5006
        res := &lnrpc.Resolution{
3✔
5007
                AmountSat: uint64(report.Amount),
3✔
5008
                Outpoint:  lnrpc.MarshalOutPoint(&report.OutPoint),
3✔
5009
        }
3✔
5010

3✔
5011
        if report.SpendTxID != nil {
6✔
5012
                res.SweepTxid = report.SpendTxID.String()
3✔
5013
        }
3✔
5014

5015
        switch report.ResolverType {
3✔
5016
        case channeldb.ResolverTypeAnchor:
×
5017
                res.ResolutionType = lnrpc.ResolutionType_ANCHOR
×
5018

5019
        case channeldb.ResolverTypeIncomingHtlc:
3✔
5020
                res.ResolutionType = lnrpc.ResolutionType_INCOMING_HTLC
3✔
5021

5022
        case channeldb.ResolverTypeOutgoingHtlc:
3✔
5023
                res.ResolutionType = lnrpc.ResolutionType_OUTGOING_HTLC
3✔
5024

5025
        case channeldb.ResolverTypeCommit:
3✔
5026
                res.ResolutionType = lnrpc.ResolutionType_COMMIT
3✔
5027

5028
        default:
×
5029
                return nil, fmt.Errorf("unknown resolver type: %v",
×
5030
                        report.ResolverType)
×
5031
        }
5032

5033
        switch report.ResolverOutcome {
3✔
5034
        case channeldb.ResolverOutcomeClaimed:
3✔
5035
                res.Outcome = lnrpc.ResolutionOutcome_CLAIMED
3✔
5036

5037
        case channeldb.ResolverOutcomeUnclaimed:
×
5038
                res.Outcome = lnrpc.ResolutionOutcome_UNCLAIMED
×
5039

5040
        case channeldb.ResolverOutcomeAbandoned:
×
5041
                res.Outcome = lnrpc.ResolutionOutcome_ABANDONED
×
5042

5043
        case channeldb.ResolverOutcomeFirstStage:
3✔
5044
                res.Outcome = lnrpc.ResolutionOutcome_FIRST_STAGE
3✔
5045

5046
        case channeldb.ResolverOutcomeTimeout:
3✔
5047
                res.Outcome = lnrpc.ResolutionOutcome_TIMEOUT
3✔
5048

5049
        default:
×
5050
                return nil, fmt.Errorf("unknown outcome: %v",
×
5051
                        report.ResolverOutcome)
×
5052
        }
5053

5054
        return res, nil
3✔
5055
}
5056

5057
// getInitiators returns an initiator enum that provides information about the
5058
// party that initiated channel's open and close. This information is obtained
5059
// from the historical channel bucket, so unknown values are returned when the
5060
// channel is not present (which indicates that it was closed before we started
5061
// writing channels to the historical close bucket).
5062
func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) (
5063
        lnrpc.Initiator,
5064
        lnrpc.Initiator, error) {
3✔
5065

3✔
5066
        var (
3✔
5067
                openInitiator  = lnrpc.Initiator_INITIATOR_UNKNOWN
3✔
5068
                closeInitiator = lnrpc.Initiator_INITIATOR_UNKNOWN
3✔
5069
        )
3✔
5070

3✔
5071
        // To get the close initiator for cooperative closes, we need
3✔
5072
        // to get the channel status from the historical channel bucket.
3✔
5073
        histChan, err := r.server.chanStateDB.FetchHistoricalChannel(chanPoint)
3✔
5074
        switch {
3✔
5075
        // The node has upgraded from a version where we did not store
5076
        // historical channels, and has not closed a channel since. Do
5077
        // not return an error, initiator values are unknown.
5078
        case err == channeldb.ErrNoHistoricalBucket:
×
5079
                return openInitiator, closeInitiator, nil
×
5080

5081
        // The channel was closed before we started storing historical
5082
        // channels. Do  not return an error, initiator values are unknown.
5083
        case err == channeldb.ErrChannelNotFound:
×
5084
                return openInitiator, closeInitiator, nil
×
5085

5086
        case err != nil:
×
5087
                return 0, 0, err
×
5088
        }
5089

5090
        // If we successfully looked up the channel, determine initiator based
5091
        // on channels status.
5092
        if histChan.IsInitiator {
6✔
5093
                openInitiator = lnrpc.Initiator_INITIATOR_LOCAL
3✔
5094
        } else {
6✔
5095
                openInitiator = lnrpc.Initiator_INITIATOR_REMOTE
3✔
5096
        }
3✔
5097

5098
        localInit := histChan.HasChanStatus(
3✔
5099
                channeldb.ChanStatusLocalCloseInitiator,
3✔
5100
        )
3✔
5101

3✔
5102
        remoteInit := histChan.HasChanStatus(
3✔
5103
                channeldb.ChanStatusRemoteCloseInitiator,
3✔
5104
        )
3✔
5105

3✔
5106
        switch {
3✔
5107
        // There is a possible case where closes were attempted by both parties.
5108
        // We return the initiator as both in this case to provide full
5109
        // information about the close.
5110
        case localInit && remoteInit:
×
5111
                closeInitiator = lnrpc.Initiator_INITIATOR_BOTH
×
5112

5113
        case localInit:
3✔
5114
                closeInitiator = lnrpc.Initiator_INITIATOR_LOCAL
3✔
5115

5116
        case remoteInit:
3✔
5117
                closeInitiator = lnrpc.Initiator_INITIATOR_REMOTE
3✔
5118
        }
5119

5120
        return openInitiator, closeInitiator, nil
3✔
5121
}
5122

5123
// SubscribeChannelEvents returns a uni-directional stream (server -> client)
5124
// for notifying the client of newly active, inactive or closed channels.
5125
func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
5126
        updateStream lnrpc.Lightning_SubscribeChannelEventsServer) error {
3✔
5127

3✔
5128
        channelEventSub, err := r.server.channelNotifier.SubscribeChannelEvents()
3✔
5129
        if err != nil {
3✔
5130
                return err
×
5131
        }
×
5132

5133
        // Ensure that the resources for the client is cleaned up once either
5134
        // the server, or client exits.
5135
        defer channelEventSub.Cancel()
3✔
5136

3✔
5137
        for {
6✔
5138
                select {
3✔
5139
                // A new update has been sent by the channel router, we'll
5140
                // marshal it into the form expected by the gRPC client, then
5141
                // send it off to the client(s).
5142
                case e := <-channelEventSub.Updates():
3✔
5143
                        var update *lnrpc.ChannelEventUpdate
3✔
5144
                        switch event := e.(type) {
3✔
5145
                        case channelnotifier.PendingOpenChannelEvent:
3✔
5146
                                update = &lnrpc.ChannelEventUpdate{
3✔
5147
                                        Type: lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL,
3✔
5148
                                        Channel: &lnrpc.ChannelEventUpdate_PendingOpenChannel{
3✔
5149
                                                PendingOpenChannel: &lnrpc.PendingUpdate{
3✔
5150
                                                        Txid:        event.ChannelPoint.Hash[:],
3✔
5151
                                                        OutputIndex: event.ChannelPoint.Index,
3✔
5152
                                                },
3✔
5153
                                        },
3✔
5154
                                }
3✔
5155
                        case channelnotifier.OpenChannelEvent:
3✔
5156
                                channel, err := createRPCOpenChannel(
3✔
5157
                                        r, event.Channel, true, false,
3✔
5158
                                )
3✔
5159
                                if err != nil {
3✔
5160
                                        return err
×
5161
                                }
×
5162

5163
                                update = &lnrpc.ChannelEventUpdate{
3✔
5164
                                        Type: lnrpc.ChannelEventUpdate_OPEN_CHANNEL,
3✔
5165
                                        Channel: &lnrpc.ChannelEventUpdate_OpenChannel{
3✔
5166
                                                OpenChannel: channel,
3✔
5167
                                        },
3✔
5168
                                }
3✔
5169

5170
                        case channelnotifier.ClosedChannelEvent:
3✔
5171
                                closedChannel, err := r.createRPCClosedChannel(
3✔
5172
                                        event.CloseSummary,
3✔
5173
                                )
3✔
5174
                                if err != nil {
3✔
5175
                                        return err
×
5176
                                }
×
5177

5178
                                update = &lnrpc.ChannelEventUpdate{
3✔
5179
                                        Type: lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
3✔
5180
                                        Channel: &lnrpc.ChannelEventUpdate_ClosedChannel{
3✔
5181
                                                ClosedChannel: closedChannel,
3✔
5182
                                        },
3✔
5183
                                }
3✔
5184

5185
                        case channelnotifier.ActiveChannelEvent:
3✔
5186
                                update = &lnrpc.ChannelEventUpdate{
3✔
5187
                                        Type: lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL,
3✔
5188
                                        Channel: &lnrpc.ChannelEventUpdate_ActiveChannel{
3✔
5189
                                                ActiveChannel: &lnrpc.ChannelPoint{
3✔
5190
                                                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
3✔
5191
                                                                FundingTxidBytes: event.ChannelPoint.Hash[:],
3✔
5192
                                                        },
3✔
5193
                                                        OutputIndex: event.ChannelPoint.Index,
3✔
5194
                                                },
3✔
5195
                                        },
3✔
5196
                                }
3✔
5197

5198
                        case channelnotifier.InactiveChannelEvent:
3✔
5199
                                update = &lnrpc.ChannelEventUpdate{
3✔
5200
                                        Type: lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
3✔
5201
                                        Channel: &lnrpc.ChannelEventUpdate_InactiveChannel{
3✔
5202
                                                InactiveChannel: &lnrpc.ChannelPoint{
3✔
5203
                                                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
3✔
5204
                                                                FundingTxidBytes: event.ChannelPoint.Hash[:],
3✔
5205
                                                        },
3✔
5206
                                                        OutputIndex: event.ChannelPoint.Index,
3✔
5207
                                                },
3✔
5208
                                        },
3✔
5209
                                }
3✔
5210

5211
                        // Completely ignore ActiveLinkEvent and
5212
                        // InactiveLinkEvent as this is explicitly not exposed
5213
                        // to the RPC.
5214
                        case channelnotifier.ActiveLinkEvent,
5215
                                channelnotifier.InactiveLinkEvent:
3✔
5216

3✔
5217
                                continue
3✔
5218

5219
                        case channelnotifier.FullyResolvedChannelEvent:
3✔
5220
                                update = &lnrpc.ChannelEventUpdate{
3✔
5221
                                        Type: lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
3✔
5222
                                        Channel: &lnrpc.ChannelEventUpdate_FullyResolvedChannel{
3✔
5223
                                                FullyResolvedChannel: &lnrpc.ChannelPoint{
3✔
5224
                                                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
3✔
5225
                                                                FundingTxidBytes: event.ChannelPoint.Hash[:],
3✔
5226
                                                        },
3✔
5227
                                                        OutputIndex: event.ChannelPoint.Index,
3✔
5228
                                                },
3✔
5229
                                        },
3✔
5230
                                }
3✔
5231

5232
                        default:
×
5233
                                return fmt.Errorf("unexpected channel event update: %v", event)
×
5234
                        }
5235

5236
                        if err := updateStream.Send(update); err != nil {
3✔
5237
                                return err
×
5238
                        }
×
5239

5240
                // The response stream's context for whatever reason has been
5241
                // closed. If context is closed by an exceeded deadline we will
5242
                // return an error.
5243
                case <-updateStream.Context().Done():
3✔
5244
                        if errors.Is(updateStream.Context().Err(), context.Canceled) {
6✔
5245
                                return nil
3✔
5246
                        }
3✔
5247
                        return updateStream.Context().Err()
×
5248

5249
                case <-r.quit:
×
5250
                        return nil
×
5251
                }
5252
        }
5253
}
5254

5255
// paymentStream enables different types of payment streams, such as:
5256
// lnrpc.Lightning_SendPaymentServer and lnrpc.Lightning_SendToRouteServer to
5257
// execute sendPayment. We use this struct as a sort of bridge to enable code
5258
// re-use between SendPayment and SendToRoute.
5259
type paymentStream struct {
5260
        recv func() (*rpcPaymentRequest, error)
5261
        send func(*lnrpc.SendResponse) error
5262
}
5263

5264
// rpcPaymentRequest wraps lnrpc.SendRequest so that routes from
5265
// lnrpc.SendToRouteRequest can be passed to sendPayment.
5266
type rpcPaymentRequest struct {
5267
        *lnrpc.SendRequest
5268
        route *route.Route
5269
}
5270

5271
// SendPayment dispatches a bi-directional streaming RPC for sending payments
5272
// through the Lightning Network. A single RPC invocation creates a persistent
5273
// bi-directional stream allowing clients to rapidly send payments through the
5274
// Lightning Network with a single persistent connection.
5275
func (r *rpcServer) SendPayment(stream lnrpc.Lightning_SendPaymentServer) error {
×
5276
        var lock sync.Mutex
×
5277

×
5278
        return r.sendPayment(&paymentStream{
×
5279
                recv: func() (*rpcPaymentRequest, error) {
×
5280
                        req, err := stream.Recv()
×
5281
                        if err != nil {
×
5282
                                return nil, err
×
5283
                        }
×
5284

5285
                        return &rpcPaymentRequest{
×
5286
                                SendRequest: req,
×
5287
                        }, nil
×
5288
                },
5289
                send: func(r *lnrpc.SendResponse) error {
×
5290
                        // Calling stream.Send concurrently is not safe.
×
5291
                        lock.Lock()
×
5292
                        defer lock.Unlock()
×
5293
                        return stream.Send(r)
×
5294
                },
×
5295
        })
5296
}
5297

5298
// SendToRoute dispatches a bi-directional streaming RPC for sending payments
5299
// through the Lightning Network via predefined routes passed in. A single RPC
5300
// invocation creates a persistent bi-directional stream allowing clients to
5301
// rapidly send payments through the Lightning Network with a single persistent
5302
// connection.
5303
func (r *rpcServer) SendToRoute(stream lnrpc.Lightning_SendToRouteServer) error {
3✔
5304
        var lock sync.Mutex
3✔
5305

3✔
5306
        return r.sendPayment(&paymentStream{
3✔
5307
                recv: func() (*rpcPaymentRequest, error) {
6✔
5308
                        req, err := stream.Recv()
3✔
5309
                        if err != nil {
6✔
5310
                                return nil, err
3✔
5311
                        }
3✔
5312

5313
                        return r.unmarshallSendToRouteRequest(req)
3✔
5314
                },
5315
                send: func(r *lnrpc.SendResponse) error {
3✔
5316
                        // Calling stream.Send concurrently is not safe.
3✔
5317
                        lock.Lock()
3✔
5318
                        defer lock.Unlock()
3✔
5319
                        return stream.Send(r)
3✔
5320
                },
3✔
5321
        })
5322
}
5323

5324
// unmarshallSendToRouteRequest unmarshalls an rpc sendtoroute request
5325
func (r *rpcServer) unmarshallSendToRouteRequest(
5326
        req *lnrpc.SendToRouteRequest) (*rpcPaymentRequest, error) {
3✔
5327

3✔
5328
        if req.Route == nil {
3✔
5329
                return nil, fmt.Errorf("unable to send, no route provided")
×
5330
        }
×
5331

5332
        route, err := r.routerBackend.UnmarshallRoute(req.Route)
3✔
5333
        if err != nil {
3✔
5334
                return nil, err
×
5335
        }
×
5336

5337
        return &rpcPaymentRequest{
3✔
5338
                SendRequest: &lnrpc.SendRequest{
3✔
5339
                        PaymentHash:       req.PaymentHash,
3✔
5340
                        PaymentHashString: req.PaymentHashString,
3✔
5341
                },
3✔
5342
                route: route,
3✔
5343
        }, nil
3✔
5344
}
5345

5346
// rpcPaymentIntent is a small wrapper struct around the of values we can
5347
// receive from a client over RPC if they wish to send a payment. We'll either
5348
// extract these fields from a payment request (which may include routing
5349
// hints), or we'll get a fully populated route from the user that we'll pass
5350
// directly to the channel router for dispatching.
5351
type rpcPaymentIntent struct {
5352
        msat               lnwire.MilliSatoshi
5353
        feeLimit           lnwire.MilliSatoshi
5354
        cltvLimit          uint32
5355
        dest               route.Vertex
5356
        rHash              [32]byte
5357
        cltvDelta          uint16
5358
        routeHints         [][]zpay32.HopHint
5359
        outgoingChannelIDs []uint64
5360
        lastHop            *route.Vertex
5361
        destFeatures       *lnwire.FeatureVector
5362
        paymentAddr        fn.Option[[32]byte]
5363
        payReq             []byte
5364
        metadata           []byte
5365
        blindedPathSet     *routing.BlindedPaymentPathSet
5366

5367
        destCustomRecords record.CustomSet
5368

5369
        route *route.Route
5370
}
5371

5372
// extractPaymentIntent attempts to parse the complete details required to
5373
// dispatch a client from the information presented by an RPC client. There are
5374
// three ways a client can specify their payment details: a payment request,
5375
// via manual details, or via a complete route.
5376
func (r *rpcServer) extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) {
3✔
5377
        payIntent := rpcPaymentIntent{}
3✔
5378

3✔
5379
        // If a route was specified, then we can use that directly.
3✔
5380
        if rpcPayReq.route != nil {
6✔
5381
                // If the user is using the REST interface, then they'll be
3✔
5382
                // passing the payment hash as a hex encoded string.
3✔
5383
                if rpcPayReq.PaymentHashString != "" {
3✔
5384
                        paymentHash, err := hex.DecodeString(
×
5385
                                rpcPayReq.PaymentHashString,
×
5386
                        )
×
5387
                        if err != nil {
×
5388
                                return payIntent, err
×
5389
                        }
×
5390

5391
                        copy(payIntent.rHash[:], paymentHash)
×
5392
                } else {
3✔
5393
                        copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
3✔
5394
                }
3✔
5395

5396
                payIntent.route = rpcPayReq.route
3✔
5397
                return payIntent, nil
3✔
5398
        }
5399

5400
        // If there are no routes specified, pass along a outgoing channel
5401
        // restriction if specified. The main server rpc does not support
5402
        // multiple channel restrictions.
5403
        if rpcPayReq.OutgoingChanId != 0 {
×
5404
                payIntent.outgoingChannelIDs = []uint64{
×
5405
                        rpcPayReq.OutgoingChanId,
×
5406
                }
×
5407
        }
×
5408

5409
        // Pass along a last hop restriction if specified.
5410
        if len(rpcPayReq.LastHopPubkey) > 0 {
×
5411
                lastHop, err := route.NewVertexFromBytes(
×
5412
                        rpcPayReq.LastHopPubkey,
×
5413
                )
×
5414
                if err != nil {
×
5415
                        return payIntent, err
×
5416
                }
×
5417
                payIntent.lastHop = &lastHop
×
5418
        }
5419

5420
        // Take the CLTV limit from the request if set, otherwise use the max.
5421
        cltvLimit, err := routerrpc.ValidateCLTVLimit(
×
5422
                rpcPayReq.CltvLimit, r.cfg.MaxOutgoingCltvExpiry,
×
5423
        )
×
5424
        if err != nil {
×
5425
                return payIntent, err
×
5426
        }
×
5427
        payIntent.cltvLimit = cltvLimit
×
5428

×
5429
        customRecords := record.CustomSet(rpcPayReq.DestCustomRecords)
×
5430
        if err := customRecords.Validate(); err != nil {
×
5431
                return payIntent, err
×
5432
        }
×
5433
        payIntent.destCustomRecords = customRecords
×
5434

×
5435
        validateDest := func(dest route.Vertex) error {
×
5436
                if rpcPayReq.AllowSelfPayment {
×
5437
                        return nil
×
5438
                }
×
5439

5440
                if dest == r.selfNode {
×
5441
                        return errors.New("self-payments not allowed")
×
5442
                }
×
5443

5444
                return nil
×
5445
        }
5446

5447
        // If the payment request field isn't blank, then the details of the
5448
        // invoice are encoded entirely within the encoded payReq.  So we'll
5449
        // attempt to decode it, populating the payment accordingly.
5450
        if rpcPayReq.PaymentRequest != "" {
×
5451
                payReq, err := zpay32.Decode(
×
5452
                        rpcPayReq.PaymentRequest, r.cfg.ActiveNetParams.Params,
×
5453
                        zpay32.WithErrorOnUnknownFeatureBit(),
×
5454
                )
×
5455
                if err != nil {
×
5456
                        return payIntent, err
×
5457
                }
×
5458

5459
                // Next, we'll ensure that this payreq hasn't already expired.
5460
                err = routerrpc.ValidatePayReqExpiry(payReq)
×
5461
                if err != nil {
×
5462
                        return payIntent, err
×
5463
                }
×
5464

5465
                // If the amount was not included in the invoice, then we let
5466
                // the payer specify the amount of satoshis they wish to send.
5467
                // We override the amount to pay with the amount provided from
5468
                // the payment request.
5469
                if payReq.MilliSat == nil {
×
5470
                        amt, err := lnrpc.UnmarshallAmt(
×
5471
                                rpcPayReq.Amt, rpcPayReq.AmtMsat,
×
5472
                        )
×
5473
                        if err != nil {
×
5474
                                return payIntent, err
×
5475
                        }
×
5476
                        if amt == 0 {
×
5477
                                return payIntent, errors.New("amount must be " +
×
5478
                                        "specified when paying a zero amount " +
×
5479
                                        "invoice")
×
5480
                        }
×
5481

5482
                        payIntent.msat = amt
×
5483
                } else {
×
5484
                        payIntent.msat = *payReq.MilliSat
×
5485
                }
×
5486

5487
                // Calculate the fee limit that should be used for this payment.
5488
                payIntent.feeLimit = lnrpc.CalculateFeeLimit(
×
5489
                        rpcPayReq.FeeLimit, payIntent.msat,
×
5490
                )
×
5491

×
5492
                copy(payIntent.rHash[:], payReq.PaymentHash[:])
×
5493
                destKey := payReq.Destination.SerializeCompressed()
×
5494
                copy(payIntent.dest[:], destKey)
×
5495
                payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry())
×
5496
                payIntent.routeHints = payReq.RouteHints
×
5497
                payIntent.payReq = []byte(rpcPayReq.PaymentRequest)
×
5498
                payIntent.destFeatures = payReq.Features
×
5499
                payIntent.paymentAddr = payReq.PaymentAddr
×
5500
                payIntent.metadata = payReq.Metadata
×
5501

×
5502
                if len(payReq.BlindedPaymentPaths) > 0 {
×
5503
                        pathSet, err := routerrpc.BuildBlindedPathSet(
×
5504
                                payReq.BlindedPaymentPaths,
×
5505
                        )
×
5506
                        if err != nil {
×
5507
                                return payIntent, err
×
5508
                        }
×
5509
                        payIntent.blindedPathSet = pathSet
×
5510

×
5511
                        // Replace the destination node with the target public
×
5512
                        // key of the blinded path set.
×
5513
                        copy(
×
5514
                                payIntent.dest[:],
×
5515
                                pathSet.TargetPubKey().SerializeCompressed(),
×
5516
                        )
×
5517

×
5518
                        pathFeatures := pathSet.Features()
×
5519
                        if !pathFeatures.IsEmpty() {
×
5520
                                payIntent.destFeatures = pathFeatures.Clone()
×
5521
                        }
×
5522
                }
5523

5524
                if err := validateDest(payIntent.dest); err != nil {
×
5525
                        return payIntent, err
×
5526
                }
×
5527

5528
                // Do bounds checking with the block padding.
5529
                err = routing.ValidateCLTVLimit(
×
5530
                        payIntent.cltvLimit, payIntent.cltvDelta, true,
×
5531
                )
×
5532
                if err != nil {
×
5533
                        return payIntent, err
×
5534
                }
×
5535

5536
                return payIntent, nil
×
5537
        }
5538

5539
        // At this point, a destination MUST be specified, so we'll convert it
5540
        // into the proper representation now. The destination will either be
5541
        // encoded as raw bytes, or via a hex string.
5542
        var pubBytes []byte
×
5543
        if len(rpcPayReq.Dest) != 0 {
×
5544
                pubBytes = rpcPayReq.Dest
×
5545
        } else {
×
5546
                var err error
×
5547
                pubBytes, err = hex.DecodeString(rpcPayReq.DestString)
×
5548
                if err != nil {
×
5549
                        return payIntent, err
×
5550
                }
×
5551
        }
5552
        if len(pubBytes) != 33 {
×
5553
                return payIntent, errors.New("invalid key length")
×
5554
        }
×
5555
        copy(payIntent.dest[:], pubBytes)
×
5556

×
5557
        if err := validateDest(payIntent.dest); err != nil {
×
5558
                return payIntent, err
×
5559
        }
×
5560

5561
        // Payment address may not be needed by legacy invoices.
5562
        if len(rpcPayReq.PaymentAddr) != 0 && len(rpcPayReq.PaymentAddr) != 32 {
×
5563
                return payIntent, errors.New("invalid payment address length")
×
5564
        }
×
5565

5566
        // Set the payment address if it was explicitly defined with the
5567
        // rpcPaymentRequest.
5568
        // Note that the payment address for the payIntent should be nil if none
5569
        // was provided with the rpcPaymentRequest.
5570
        if len(rpcPayReq.PaymentAddr) != 0 {
×
5571
                var addr [32]byte
×
5572
                copy(addr[:], rpcPayReq.PaymentAddr)
×
5573
                payIntent.paymentAddr = fn.Some(addr)
×
5574
        }
×
5575

5576
        // Otherwise, If the payment request field was not specified
5577
        // (and a custom route wasn't specified), construct the payment
5578
        // from the other fields.
5579
        payIntent.msat, err = lnrpc.UnmarshallAmt(
×
5580
                rpcPayReq.Amt, rpcPayReq.AmtMsat,
×
5581
        )
×
5582
        if err != nil {
×
5583
                return payIntent, err
×
5584
        }
×
5585

5586
        // Calculate the fee limit that should be used for this payment.
5587
        payIntent.feeLimit = lnrpc.CalculateFeeLimit(
×
5588
                rpcPayReq.FeeLimit, payIntent.msat,
×
5589
        )
×
5590

×
5591
        if rpcPayReq.FinalCltvDelta != 0 {
×
5592
                payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta)
×
5593
        } else {
×
5594
                // If no final cltv delta is given, assume the default that we
×
5595
                // use when creating an invoice. We do not assume the default of
×
5596
                // 9 blocks that is defined in BOLT-11, because this is never
×
5597
                // enough for other lnd nodes.
×
5598
                payIntent.cltvDelta = uint16(r.cfg.Bitcoin.TimeLockDelta)
×
5599
        }
×
5600

5601
        // Do bounds checking with the block padding so the router isn't left
5602
        // with a zombie payment in case the user messes up.
5603
        err = routing.ValidateCLTVLimit(
×
5604
                payIntent.cltvLimit, payIntent.cltvDelta, true,
×
5605
        )
×
5606
        if err != nil {
×
5607
                return payIntent, err
×
5608
        }
×
5609

5610
        // If the user is manually specifying payment details, then the payment
5611
        // hash may be encoded as a string.
5612
        switch {
×
5613
        case rpcPayReq.PaymentHashString != "":
×
5614
                paymentHash, err := hex.DecodeString(
×
5615
                        rpcPayReq.PaymentHashString,
×
5616
                )
×
5617
                if err != nil {
×
5618
                        return payIntent, err
×
5619
                }
×
5620

5621
                copy(payIntent.rHash[:], paymentHash)
×
5622

5623
        default:
×
5624
                copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
×
5625
        }
5626

5627
        // Unmarshal any custom destination features.
5628
        payIntent.destFeatures, err = routerrpc.UnmarshalFeatures(
×
5629
                rpcPayReq.DestFeatures,
×
5630
        )
×
5631
        if err != nil {
×
5632
                return payIntent, err
×
5633
        }
×
5634

5635
        return payIntent, nil
×
5636
}
5637

5638
type paymentIntentResponse struct {
5639
        Route    *route.Route
5640
        Preimage [32]byte
5641
        Err      error
5642
}
5643

5644
// dispatchPaymentIntent attempts to fully dispatch an RPC payment intent.
5645
// We'll either pass the payment as a whole to the channel router, or give it a
5646
// pre-built route. The first error this method returns denotes if we were
5647
// unable to save the payment. The second error returned denotes if the payment
5648
// didn't succeed.
5649
func (r *rpcServer) dispatchPaymentIntent(
5650
        payIntent *rpcPaymentIntent) (*paymentIntentResponse, error) {
3✔
5651

3✔
5652
        // Construct a payment request to send to the channel router. If the
3✔
5653
        // payment is successful, the route chosen will be returned. Otherwise,
3✔
5654
        // we'll get a non-nil error.
3✔
5655
        var (
3✔
5656
                preImage  [32]byte
3✔
5657
                route     *route.Route
3✔
5658
                routerErr error
3✔
5659
        )
3✔
5660

3✔
5661
        // If a route was specified, then we'll pass the route directly to the
3✔
5662
        // router, otherwise we'll create a payment session to execute it.
3✔
5663
        if payIntent.route == nil {
3✔
5664
                payment := &routing.LightningPayment{
×
5665
                        Target:             payIntent.dest,
×
5666
                        Amount:             payIntent.msat,
×
5667
                        FinalCLTVDelta:     payIntent.cltvDelta,
×
5668
                        FeeLimit:           payIntent.feeLimit,
×
5669
                        CltvLimit:          payIntent.cltvLimit,
×
5670
                        RouteHints:         payIntent.routeHints,
×
5671
                        OutgoingChannelIDs: payIntent.outgoingChannelIDs,
×
5672
                        LastHop:            payIntent.lastHop,
×
5673
                        PaymentRequest:     payIntent.payReq,
×
5674
                        PayAttemptTimeout:  routing.DefaultPayAttemptTimeout,
×
5675
                        DestCustomRecords:  payIntent.destCustomRecords,
×
5676
                        DestFeatures:       payIntent.destFeatures,
×
5677
                        PaymentAddr:        payIntent.paymentAddr,
×
5678
                        Metadata:           payIntent.metadata,
×
5679
                        BlindedPathSet:     payIntent.blindedPathSet,
×
5680

×
5681
                        // Don't enable multi-part payments on the main rpc.
×
5682
                        // Users need to use routerrpc for that.
×
5683
                        MaxParts: 1,
×
5684
                }
×
5685
                err := payment.SetPaymentHash(payIntent.rHash)
×
5686
                if err != nil {
×
5687
                        return nil, err
×
5688
                }
×
5689

5690
                preImage, route, routerErr = r.server.chanRouter.SendPayment(
×
5691
                        payment,
×
5692
                )
×
5693
        } else {
3✔
5694
                var attempt *channeldb.HTLCAttempt
3✔
5695
                attempt, routerErr = r.server.chanRouter.SendToRoute(
3✔
5696
                        payIntent.rHash, payIntent.route, nil,
3✔
5697
                )
3✔
5698

3✔
5699
                if routerErr == nil {
6✔
5700
                        preImage = attempt.Settle.Preimage
3✔
5701
                }
3✔
5702

5703
                route = payIntent.route
3✔
5704
        }
5705

5706
        // If the route failed, then we'll return a nil save err, but a non-nil
5707
        // routing err.
5708
        if routerErr != nil {
6✔
5709
                rpcsLog.Warnf("Unable to send payment: %v", routerErr)
3✔
5710

3✔
5711
                return &paymentIntentResponse{
3✔
5712
                        Err: routerErr,
3✔
5713
                }, nil
3✔
5714
        }
3✔
5715

5716
        return &paymentIntentResponse{
3✔
5717
                Route:    route,
3✔
5718
                Preimage: preImage,
3✔
5719
        }, nil
3✔
5720
}
5721

5722
// sendPayment takes a paymentStream (a source of pre-built routes or payment
5723
// requests) and continually attempt to dispatch payment requests written to
5724
// the write end of the stream. Responses will also be streamed back to the
5725
// client via the write end of the stream. This method is by both SendToRoute
5726
// and SendPayment as the logic is virtually identical.
5727
func (r *rpcServer) sendPayment(stream *paymentStream) error {
3✔
5728
        payChan := make(chan *rpcPaymentIntent)
3✔
5729
        errChan := make(chan error, 1)
3✔
5730

3✔
5731
        // We don't allow payments to be sent while the daemon itself is still
3✔
5732
        // syncing as we may be trying to sent a payment over a "stale"
3✔
5733
        // channel.
3✔
5734
        if !r.server.Started() {
3✔
5735
                return ErrServerNotActive
×
5736
        }
×
5737

5738
        // TODO(roasbeef): check payment filter to see if already used?
5739

5740
        // In order to limit the level of concurrency and prevent a client from
5741
        // attempting to OOM the server, we'll set up a semaphore to create an
5742
        // upper ceiling on the number of outstanding payments.
5743
        const numOutstandingPayments = 2000
3✔
5744
        htlcSema := make(chan struct{}, numOutstandingPayments)
3✔
5745
        for i := 0; i < numOutstandingPayments; i++ {
6✔
5746
                htlcSema <- struct{}{}
3✔
5747
        }
3✔
5748

5749
        // We keep track of the running goroutines and set up a quit signal we
5750
        // can use to request them to exit if the method returns because of an
5751
        // encountered error.
5752
        var wg sync.WaitGroup
3✔
5753
        reqQuit := make(chan struct{})
3✔
5754
        defer close(reqQuit)
3✔
5755

3✔
5756
        // Launch a new goroutine to handle reading new payment requests from
3✔
5757
        // the client. This way we can handle errors independently of blocking
3✔
5758
        // and waiting for the next payment request to come through.
3✔
5759
        // TODO(joostjager): Callers expect result to come in in the same order
3✔
5760
        // as the request were sent, but this is far from guarantueed in the
3✔
5761
        // code below.
3✔
5762
        wg.Add(1)
3✔
5763
        go func() {
6✔
5764
                defer wg.Done()
3✔
5765

3✔
5766
                for {
6✔
5767
                        select {
3✔
5768
                        case <-reqQuit:
×
5769
                                return
×
5770

5771
                        default:
3✔
5772
                                // Receive the next pending payment within the
3✔
5773
                                // stream sent by the client. If we read the
3✔
5774
                                // EOF sentinel, then the client has closed the
3✔
5775
                                // stream, and we can exit normally.
3✔
5776
                                nextPayment, err := stream.recv()
3✔
5777
                                if err == io.EOF {
3✔
5778
                                        close(payChan)
×
5779
                                        return
×
5780
                                } else if err != nil {
6✔
5781
                                        rpcsLog.Errorf("Failed receiving from "+
3✔
5782
                                                "stream: %v", err)
3✔
5783

3✔
5784
                                        select {
3✔
5785
                                        case errChan <- err:
3✔
5786
                                        default:
×
5787
                                        }
5788
                                        return
3✔
5789
                                }
5790

5791
                                // Populate the next payment, either from the
5792
                                // payment request, or from the explicitly set
5793
                                // fields. If the payment proto wasn't well
5794
                                // formed, then we'll send an error reply and
5795
                                // wait for the next payment.
5796
                                payIntent, err := r.extractPaymentIntent(
3✔
5797
                                        nextPayment,
3✔
5798
                                )
3✔
5799
                                if err != nil {
3✔
5800
                                        if err := stream.send(&lnrpc.SendResponse{
×
5801
                                                PaymentError: err.Error(),
×
5802
                                                PaymentHash:  payIntent.rHash[:],
×
5803
                                        }); err != nil {
×
5804
                                                rpcsLog.Errorf("Failed "+
×
5805
                                                        "sending on "+
×
5806
                                                        "stream: %v", err)
×
5807

×
5808
                                                select {
×
5809
                                                case errChan <- err:
×
5810
                                                default:
×
5811
                                                }
5812
                                                return
×
5813
                                        }
5814
                                        continue
×
5815
                                }
5816

5817
                                // If the payment was well formed, then we'll
5818
                                // send to the dispatch goroutine, or exit,
5819
                                // which ever comes first.
5820
                                select {
3✔
5821
                                case payChan <- &payIntent:
3✔
5822
                                case <-reqQuit:
×
5823
                                        return
×
5824
                                }
5825
                        }
5826
                }
5827
        }()
5828

5829
sendLoop:
3✔
5830
        for {
6✔
5831
                select {
3✔
5832

5833
                // If we encounter and error either during sending or
5834
                // receiving, we return directly, closing the stream.
5835
                case err := <-errChan:
3✔
5836
                        return err
3✔
5837

5838
                case <-r.quit:
×
5839
                        return errors.New("rpc server shutting down")
×
5840

5841
                case payIntent, ok := <-payChan:
3✔
5842
                        // If the receive loop is done, we break the send loop
3✔
5843
                        // and wait for the ongoing payments to finish before
3✔
5844
                        // exiting.
3✔
5845
                        if !ok {
3✔
5846
                                break sendLoop
×
5847
                        }
5848

5849
                        // We launch a new goroutine to execute the current
5850
                        // payment so we can continue to serve requests while
5851
                        // this payment is being dispatched.
5852
                        wg.Add(1)
3✔
5853
                        go func(payIntent *rpcPaymentIntent) {
6✔
5854
                                defer wg.Done()
3✔
5855

3✔
5856
                                // Attempt to grab a free semaphore slot, using
3✔
5857
                                // a defer to eventually release the slot
3✔
5858
                                // regardless of payment success.
3✔
5859
                                select {
3✔
5860
                                case <-htlcSema:
3✔
5861
                                case <-reqQuit:
×
5862
                                        return
×
5863
                                }
5864
                                defer func() {
6✔
5865
                                        htlcSema <- struct{}{}
3✔
5866
                                }()
3✔
5867

5868
                                resp, saveErr := r.dispatchPaymentIntent(
3✔
5869
                                        payIntent,
3✔
5870
                                )
3✔
5871

3✔
5872
                                switch {
3✔
5873
                                // If we were unable to save the state of the
5874
                                // payment, then we'll return the error to the
5875
                                // user, and terminate.
5876
                                case saveErr != nil:
×
5877
                                        rpcsLog.Errorf("Failed dispatching "+
×
5878
                                                "payment intent: %v", saveErr)
×
5879

×
5880
                                        select {
×
5881
                                        case errChan <- saveErr:
×
5882
                                        default:
×
5883
                                        }
5884
                                        return
×
5885

5886
                                // If we receive payment error than, instead of
5887
                                // terminating the stream, send error response
5888
                                // to the user.
5889
                                case resp.Err != nil:
3✔
5890
                                        err := stream.send(&lnrpc.SendResponse{
3✔
5891
                                                PaymentError: resp.Err.Error(),
3✔
5892
                                                PaymentHash:  payIntent.rHash[:],
3✔
5893
                                        })
3✔
5894
                                        if err != nil {
3✔
5895
                                                rpcsLog.Errorf("Failed "+
×
5896
                                                        "sending error "+
×
5897
                                                        "response: %v", err)
×
5898

×
5899
                                                select {
×
5900
                                                case errChan <- err:
×
5901
                                                default:
×
5902
                                                }
5903
                                        }
5904
                                        return
3✔
5905
                                }
5906

5907
                                backend := r.routerBackend
3✔
5908
                                marshalledRouted, err := backend.MarshallRoute(
3✔
5909
                                        resp.Route,
3✔
5910
                                )
3✔
5911
                                if err != nil {
3✔
5912
                                        errChan <- err
×
5913
                                        return
×
5914
                                }
×
5915

5916
                                err = stream.send(&lnrpc.SendResponse{
3✔
5917
                                        PaymentHash:     payIntent.rHash[:],
3✔
5918
                                        PaymentPreimage: resp.Preimage[:],
3✔
5919
                                        PaymentRoute:    marshalledRouted,
3✔
5920
                                })
3✔
5921
                                if err != nil {
3✔
5922
                                        rpcsLog.Errorf("Failed sending "+
×
5923
                                                "response: %v", err)
×
5924

×
5925
                                        select {
×
5926
                                        case errChan <- err:
×
5927
                                        default:
×
5928
                                        }
5929
                                        return
×
5930
                                }
5931
                        }(payIntent)
5932
                }
5933
        }
5934

5935
        // Wait for all goroutines to finish before closing the stream.
5936
        wg.Wait()
×
5937
        return nil
×
5938
}
5939

5940
// SendPaymentSync is the synchronous non-streaming version of SendPayment.
5941
// This RPC is intended to be consumed by clients of the REST proxy.
5942
// Additionally, this RPC expects the destination's public key and the payment
5943
// hash (if any) to be encoded as hex strings.
5944
func (r *rpcServer) SendPaymentSync(ctx context.Context,
5945
        nextPayment *lnrpc.SendRequest) (*lnrpc.SendResponse, error) {
×
5946

×
5947
        return r.sendPaymentSync(&rpcPaymentRequest{
×
5948
                SendRequest: nextPayment,
×
5949
        })
×
5950
}
×
5951

5952
// SendToRouteSync is the synchronous non-streaming version of SendToRoute.
5953
// This RPC is intended to be consumed by clients of the REST proxy.
5954
// Additionally, this RPC expects the payment hash (if any) to be encoded as
5955
// hex strings.
5956
func (r *rpcServer) SendToRouteSync(ctx context.Context,
5957
        req *lnrpc.SendToRouteRequest) (*lnrpc.SendResponse, error) {
3✔
5958

3✔
5959
        if req.Route == nil {
3✔
5960
                return nil, fmt.Errorf("unable to send, no routes provided")
×
5961
        }
×
5962

5963
        paymentRequest, err := r.unmarshallSendToRouteRequest(req)
3✔
5964
        if err != nil {
3✔
5965
                return nil, err
×
5966
        }
×
5967

5968
        return r.sendPaymentSync(paymentRequest)
3✔
5969
}
5970

5971
// sendPaymentSync is the synchronous variant of sendPayment. It will block and
5972
// wait until the payment has been fully completed.
5973
func (r *rpcServer) sendPaymentSync(
5974
        nextPayment *rpcPaymentRequest) (*lnrpc.SendResponse, error) {
3✔
5975

3✔
5976
        // We don't allow payments to be sent while the daemon itself is still
3✔
5977
        // syncing as we may be trying to sent a payment over a "stale"
3✔
5978
        // channel.
3✔
5979
        if !r.server.Started() {
3✔
5980
                return nil, ErrServerNotActive
×
5981
        }
×
5982

5983
        // First we'll attempt to map the proto describing the next payment to
5984
        // an intent that we can pass to local sub-systems.
5985
        payIntent, err := r.extractPaymentIntent(nextPayment)
3✔
5986
        if err != nil {
3✔
5987
                return nil, err
×
5988
        }
×
5989

5990
        // With the payment validated, we'll now attempt to dispatch the
5991
        // payment.
5992
        resp, saveErr := r.dispatchPaymentIntent(&payIntent)
3✔
5993
        switch {
3✔
5994
        case saveErr != nil:
×
5995
                return nil, saveErr
×
5996

5997
        case resp.Err != nil:
×
5998
                return &lnrpc.SendResponse{
×
5999
                        PaymentError: resp.Err.Error(),
×
6000
                        PaymentHash:  payIntent.rHash[:],
×
6001
                }, nil
×
6002
        }
6003

6004
        rpcRoute, err := r.routerBackend.MarshallRoute(resp.Route)
3✔
6005
        if err != nil {
3✔
6006
                return nil, err
×
6007
        }
×
6008

6009
        return &lnrpc.SendResponse{
3✔
6010
                PaymentHash:     payIntent.rHash[:],
3✔
6011
                PaymentPreimage: resp.Preimage[:],
3✔
6012
                PaymentRoute:    rpcRoute,
3✔
6013
        }, nil
3✔
6014
}
6015

6016
// AddInvoice attempts to add a new invoice to the invoice database. Any
6017
// duplicated invoices are rejected, therefore all invoices *must* have a
6018
// unique payment preimage.
6019
func (r *rpcServer) AddInvoice(ctx context.Context,
6020
        invoice *lnrpc.Invoice) (*lnrpc.AddInvoiceResponse, error) {
3✔
6021

3✔
6022
        var (
3✔
6023
                defaultDelta = r.cfg.Bitcoin.TimeLockDelta
3✔
6024
                blindCfg     = invoice.BlindedPathConfig
3✔
6025
                blind        = invoice.IsBlinded
3✔
6026
        )
3✔
6027

3✔
6028
        globalBlindCfg := r.server.cfg.Routing.BlindedPaths
3✔
6029
        blindingRestrictions := &routing.BlindedPathRestrictions{
3✔
6030
                MinDistanceFromIntroNode: globalBlindCfg.MinNumRealHops,
3✔
6031
                NumHops:                  globalBlindCfg.NumHops,
3✔
6032
                MaxNumPaths:              globalBlindCfg.MaxNumPaths,
3✔
6033
                NodeOmissionSet:          fn.NewSet[route.Vertex](),
3✔
6034
        }
3✔
6035

3✔
6036
        if blindCfg != nil && !blind {
3✔
6037
                return nil, fmt.Errorf("blinded path config provided but " +
×
6038
                        "IsBlinded not set")
×
6039
        }
×
6040

6041
        if blind && blindCfg != nil {
6✔
6042
                if blindCfg.MinNumRealHops != nil {
6✔
6043
                        blindingRestrictions.MinDistanceFromIntroNode =
3✔
6044
                                uint8(*blindCfg.MinNumRealHops)
3✔
6045
                }
3✔
6046
                if blindCfg.NumHops != nil {
6✔
6047
                        blindingRestrictions.NumHops = uint8(*blindCfg.NumHops)
3✔
6048
                }
3✔
6049
                if blindCfg.MaxNumPaths != nil {
3✔
6050
                        blindingRestrictions.MaxNumPaths =
×
6051
                                uint8(*blindCfg.MaxNumPaths)
×
6052
                }
×
6053

6054
                for _, nodeIDBytes := range blindCfg.NodeOmissionList {
3✔
6055
                        vertex, err := route.NewVertexFromBytes(nodeIDBytes)
×
6056
                        if err != nil {
×
6057
                                return nil, err
×
6058
                        }
×
6059

6060
                        blindingRestrictions.NodeOmissionSet.Add(vertex)
×
6061
                }
6062
        }
6063

6064
        if blindingRestrictions.MinDistanceFromIntroNode >
3✔
6065
                blindingRestrictions.NumHops {
3✔
6066

×
6067
                return nil, fmt.Errorf("the minimum number of real " +
×
6068
                        "hops in a blinded path must be smaller than " +
×
6069
                        "or equal to the number of hops expected to " +
×
6070
                        "be included in each path")
×
6071
        }
×
6072

6073
        addInvoiceCfg := &invoicesrpc.AddInvoiceConfig{
3✔
6074
                AddInvoice:        r.server.invoices.AddInvoice,
3✔
6075
                IsChannelActive:   r.server.htlcSwitch.HasActiveLink,
3✔
6076
                ChainParams:       r.cfg.ActiveNetParams.Params,
3✔
6077
                NodeSigner:        r.server.nodeSigner,
3✔
6078
                DefaultCLTVExpiry: defaultDelta,
3✔
6079
                ChanDB:            r.server.chanStateDB,
3✔
6080
                Graph:             r.server.graphDB,
3✔
6081
                GenInvoiceFeatures: func() *lnwire.FeatureVector {
6✔
6082
                        v := r.server.featureMgr.Get(feature.SetInvoice)
3✔
6083

3✔
6084
                        if blind {
6✔
6085
                                // If an invoice includes blinded paths, then a
3✔
6086
                                // payment address is not required since we use
3✔
6087
                                // the PathID in the final hop's encrypted data
3✔
6088
                                // as equivalent to the payment address
3✔
6089
                                v.Unset(lnwire.PaymentAddrRequired)
3✔
6090
                                v.Set(lnwire.PaymentAddrOptional)
3✔
6091

3✔
6092
                                // The invoice payer will also need to
3✔
6093
                                // understand the new BOLT 11 tagged field
3✔
6094
                                // containing the blinded path, so we switch
3✔
6095
                                // the bit to required.
3✔
6096
                                v = feature.SetBit(
3✔
6097
                                        v, lnwire.Bolt11BlindedPathsRequired,
3✔
6098
                                )
3✔
6099
                        }
3✔
6100

6101
                        return v
3✔
6102
                },
6103
                GenAmpInvoiceFeatures: func() *lnwire.FeatureVector {
3✔
6104
                        return r.server.featureMgr.Get(feature.SetInvoiceAmp)
3✔
6105
                },
3✔
6106
                GetAlias:   r.server.aliasMgr.GetPeerAlias,
6107
                BestHeight: r.server.cc.BestBlockTracker.BestHeight,
6108
                QueryBlindedRoutes: func(amt lnwire.MilliSatoshi) (
6109
                        []*route.Route, error) {
3✔
6110

3✔
6111
                        return r.server.chanRouter.FindBlindedPaths(
3✔
6112
                                r.selfNode, amt,
3✔
6113
                                r.server.defaultMC.GetProbability,
3✔
6114
                                blindingRestrictions,
3✔
6115
                        )
3✔
6116
                },
3✔
6117
        }
6118

6119
        value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMsat)
3✔
6120
        if err != nil {
3✔
6121
                return nil, err
×
6122
        }
×
6123

6124
        // Convert the passed routing hints to the required format.
6125
        routeHints, err := invoicesrpc.CreateZpay32HopHints(invoice.RouteHints)
3✔
6126
        if err != nil {
3✔
6127
                return nil, err
×
6128
        }
×
6129

6130
        var blindedPathCfg *invoicesrpc.BlindedPathConfig
3✔
6131
        if blind {
6✔
6132
                bpConfig := r.server.cfg.Routing.BlindedPaths
3✔
6133

3✔
6134
                blindedPathCfg = &invoicesrpc.BlindedPathConfig{
3✔
6135
                        RoutePolicyIncrMultiplier: bpConfig.
3✔
6136
                                PolicyIncreaseMultiplier,
3✔
6137
                        RoutePolicyDecrMultiplier: bpConfig.
3✔
6138
                                PolicyDecreaseMultiplier,
3✔
6139
                        DefaultDummyHopPolicy: &blindedpath.BlindedHopPolicy{
3✔
6140
                                CLTVExpiryDelta: uint16(defaultDelta),
3✔
6141
                                FeeRate: uint32(
3✔
6142
                                        r.server.cfg.Bitcoin.FeeRate,
3✔
6143
                                ),
3✔
6144
                                BaseFee:     r.server.cfg.Bitcoin.BaseFee,
3✔
6145
                                MinHTLCMsat: r.server.cfg.Bitcoin.MinHTLCIn,
3✔
6146

3✔
6147
                                // MaxHTLCMsat will be calculated on the fly by
3✔
6148
                                // using the introduction node's channel's
3✔
6149
                                // capacities.
3✔
6150
                                MaxHTLCMsat: 0,
3✔
6151
                        },
3✔
6152
                        MinNumPathHops: blindingRestrictions.NumHops,
3✔
6153
                }
3✔
6154
        }
3✔
6155

6156
        addInvoiceData := &invoicesrpc.AddInvoiceData{
3✔
6157
                Memo:            invoice.Memo,
3✔
6158
                Value:           value,
3✔
6159
                DescriptionHash: invoice.DescriptionHash,
3✔
6160
                Expiry:          invoice.Expiry,
3✔
6161
                FallbackAddr:    invoice.FallbackAddr,
3✔
6162
                CltvExpiry:      invoice.CltvExpiry,
3✔
6163
                Private:         invoice.Private,
3✔
6164
                RouteHints:      routeHints,
3✔
6165
                Amp:             invoice.IsAmp,
3✔
6166
                BlindedPathCfg:  blindedPathCfg,
3✔
6167
        }
3✔
6168

3✔
6169
        if invoice.RPreimage != nil {
6✔
6170
                preimage, err := lntypes.MakePreimage(invoice.RPreimage)
3✔
6171
                if err != nil {
3✔
6172
                        return nil, err
×
6173
                }
×
6174
                addInvoiceData.Preimage = &preimage
3✔
6175
        }
6176

6177
        hash, dbInvoice, err := invoicesrpc.AddInvoice(
3✔
6178
                ctx, addInvoiceCfg, addInvoiceData,
3✔
6179
        )
3✔
6180
        if err != nil {
3✔
6181
                return nil, err
×
6182
        }
×
6183

6184
        return &lnrpc.AddInvoiceResponse{
3✔
6185
                AddIndex:       dbInvoice.AddIndex,
3✔
6186
                PaymentRequest: string(dbInvoice.PaymentRequest),
3✔
6187
                RHash:          hash[:],
3✔
6188
                PaymentAddr:    dbInvoice.Terms.PaymentAddr[:],
3✔
6189
        }, nil
3✔
6190
}
6191

6192
// LookupInvoice attempts to look up an invoice according to its payment hash.
6193
// The passed payment hash *must* be exactly 32 bytes, if not an error is
6194
// returned.
6195
func (r *rpcServer) LookupInvoice(ctx context.Context,
6196
        req *lnrpc.PaymentHash) (*lnrpc.Invoice, error) {
3✔
6197

3✔
6198
        var (
3✔
6199
                payHash [32]byte
3✔
6200
                rHash   []byte
3✔
6201
                err     error
3✔
6202
        )
3✔
6203

3✔
6204
        // If the RHash as a raw string was provided, then decode that and use
3✔
6205
        // that directly. Otherwise, we use the raw bytes provided.
3✔
6206
        if req.RHashStr != "" {
6✔
6207
                rHash, err = hex.DecodeString(req.RHashStr)
3✔
6208
                if err != nil {
3✔
6209
                        return nil, err
×
6210
                }
×
6211
        } else {
3✔
6212
                rHash = req.RHash
3✔
6213
        }
3✔
6214

6215
        // Ensure that the payment hash is *exactly* 32-bytes.
6216
        if len(rHash) != 0 && len(rHash) != 32 {
3✔
6217
                return nil, fmt.Errorf("payment hash must be exactly "+
×
6218
                        "32 bytes, is instead %v", len(rHash))
×
6219
        }
×
6220
        copy(payHash[:], rHash)
3✔
6221

3✔
6222
        rpcsLog.Tracef("[lookupinvoice] searching for invoice %x", payHash[:])
3✔
6223

3✔
6224
        invoice, err := r.server.invoices.LookupInvoice(ctx, payHash)
3✔
6225
        switch {
3✔
6226
        case errors.Is(err, invoices.ErrInvoiceNotFound):
×
6227
                return nil, status.Error(codes.NotFound, err.Error())
×
6228
        case err != nil:
×
6229
                return nil, err
×
6230
        }
6231

6232
        rpcsLog.Tracef("[lookupinvoice] located invoice %v",
3✔
6233
                lnutils.SpewLogClosure(invoice))
3✔
6234

3✔
6235
        rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
3✔
6236
                &invoice, r.cfg.ActiveNetParams.Params,
3✔
6237
        )
3✔
6238
        if err != nil {
3✔
6239
                return nil, err
×
6240
        }
×
6241

6242
        // Give the aux data parser a chance to format the custom data in the
6243
        // invoice HTLCs.
6244
        err = fn.MapOptionZ(
3✔
6245
                r.server.implCfg.AuxDataParser,
3✔
6246
                func(parser AuxDataParser) error {
3✔
6247
                        return parser.InlineParseCustomData(rpcInvoice)
×
6248
                },
×
6249
        )
6250
        if err != nil {
3✔
6251
                return nil, fmt.Errorf("error parsing custom data: %w",
×
6252
                        err)
×
6253
        }
×
6254

6255
        return rpcInvoice, nil
3✔
6256
}
6257

6258
// ListInvoices returns a list of all the invoices currently stored within the
6259
// database. Any active debug invoices are ignored.
6260
func (r *rpcServer) ListInvoices(ctx context.Context,
6261
        req *lnrpc.ListInvoiceRequest) (*lnrpc.ListInvoiceResponse, error) {
3✔
6262

3✔
6263
        // If the number of invoices was not specified, then we'll default to
3✔
6264
        // returning the latest 100 invoices.
3✔
6265
        if req.NumMaxInvoices == 0 {
6✔
6266
                req.NumMaxInvoices = 100
3✔
6267
        }
3✔
6268

6269
        // If both dates are set, we check that the start date is less than the
6270
        // end date, otherwise we'll get an empty result.
6271
        if req.CreationDateStart != 0 && req.CreationDateEnd != 0 {
3✔
6272
                if req.CreationDateStart >= req.CreationDateEnd {
×
6273
                        return nil, fmt.Errorf("start date(%v) must be before "+
×
6274
                                "end date(%v)", req.CreationDateStart,
×
6275
                                req.CreationDateEnd)
×
6276
                }
×
6277
        }
6278

6279
        // Next, we'll map the proto request into a format that is understood by
6280
        // the database.
6281
        q := invoices.InvoiceQuery{
3✔
6282
                IndexOffset:       req.IndexOffset,
3✔
6283
                NumMaxInvoices:    req.NumMaxInvoices,
3✔
6284
                PendingOnly:       req.PendingOnly,
3✔
6285
                Reversed:          req.Reversed,
3✔
6286
                CreationDateStart: int64(req.CreationDateStart),
3✔
6287
                CreationDateEnd:   int64(req.CreationDateEnd),
3✔
6288
        }
3✔
6289

3✔
6290
        invoiceSlice, err := r.server.invoicesDB.QueryInvoices(ctx, q)
3✔
6291
        if err != nil {
3✔
6292
                return nil, fmt.Errorf("unable to query invoices: %w", err)
×
6293
        }
×
6294

6295
        // Before returning the response, we'll need to convert each invoice
6296
        // into it's proto representation.
6297
        resp := &lnrpc.ListInvoiceResponse{
3✔
6298
                Invoices:         make([]*lnrpc.Invoice, len(invoiceSlice.Invoices)),
3✔
6299
                FirstIndexOffset: invoiceSlice.FirstIndexOffset,
3✔
6300
                LastIndexOffset:  invoiceSlice.LastIndexOffset,
3✔
6301
        }
3✔
6302
        for i, invoice := range invoiceSlice.Invoices {
6✔
6303
                invoice := invoice
3✔
6304
                resp.Invoices[i], err = invoicesrpc.CreateRPCInvoice(
3✔
6305
                        &invoice, r.cfg.ActiveNetParams.Params,
3✔
6306
                )
3✔
6307
                if err != nil {
3✔
6308
                        return nil, err
×
6309
                }
×
6310

6311
                // Give the aux data parser a chance to format the custom data
6312
                // in the invoice HTLCs.
6313
                err = fn.MapOptionZ(
3✔
6314
                        r.server.implCfg.AuxDataParser,
3✔
6315
                        func(parser AuxDataParser) error {
3✔
6316
                                return parser.InlineParseCustomData(
×
6317
                                        resp.Invoices[i],
×
6318
                                )
×
6319
                        },
×
6320
                )
6321
                if err != nil {
3✔
6322
                        return nil, fmt.Errorf("error parsing custom data: %w",
×
6323
                                err)
×
6324
                }
×
6325
        }
6326

6327
        return resp, nil
3✔
6328
}
6329

6330
// SubscribeInvoices returns a uni-directional stream (server -> client) for
6331
// notifying the client of newly added/settled invoices.
6332
func (r *rpcServer) SubscribeInvoices(req *lnrpc.InvoiceSubscription,
6333
        updateStream lnrpc.Lightning_SubscribeInvoicesServer) error {
3✔
6334

3✔
6335
        invoiceClient, err := r.server.invoices.SubscribeNotifications(
3✔
6336
                updateStream.Context(), req.AddIndex, req.SettleIndex,
3✔
6337
        )
3✔
6338
        if err != nil {
3✔
6339
                return err
×
6340
        }
×
6341
        defer invoiceClient.Cancel()
3✔
6342

3✔
6343
        for {
6✔
6344
                select {
3✔
6345
                case newInvoice := <-invoiceClient.NewInvoices:
3✔
6346
                        rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
3✔
6347
                                newInvoice, r.cfg.ActiveNetParams.Params,
3✔
6348
                        )
3✔
6349
                        if err != nil {
3✔
6350
                                return err
×
6351
                        }
×
6352

6353
                        // Give the aux data parser a chance to format the
6354
                        // custom data in the invoice HTLCs.
6355
                        err = fn.MapOptionZ(
3✔
6356
                                r.server.implCfg.AuxDataParser,
3✔
6357
                                func(parser AuxDataParser) error {
3✔
6358
                                        return parser.InlineParseCustomData(
×
6359
                                                rpcInvoice,
×
6360
                                        )
×
6361
                                },
×
6362
                        )
6363
                        if err != nil {
3✔
6364
                                return fmt.Errorf("error parsing custom data: "+
×
6365
                                        "%w", err)
×
6366
                        }
×
6367

6368
                        if err := updateStream.Send(rpcInvoice); err != nil {
3✔
6369
                                return err
×
6370
                        }
×
6371

6372
                case settledInvoice := <-invoiceClient.SettledInvoices:
3✔
6373
                        rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
3✔
6374
                                settledInvoice, r.cfg.ActiveNetParams.Params,
3✔
6375
                        )
3✔
6376
                        if err != nil {
3✔
6377
                                return err
×
6378
                        }
×
6379

6380
                        // Give the aux data parser a chance to format the
6381
                        // custom data in the invoice HTLCs.
6382
                        err = fn.MapOptionZ(
3✔
6383
                                r.server.implCfg.AuxDataParser,
3✔
6384
                                func(parser AuxDataParser) error {
3✔
6385
                                        return parser.InlineParseCustomData(
×
6386
                                                rpcInvoice,
×
6387
                                        )
×
6388
                                },
×
6389
                        )
6390
                        if err != nil {
3✔
6391
                                return fmt.Errorf("error parsing custom data: "+
×
6392
                                        "%w", err)
×
6393
                        }
×
6394

6395
                        if err := updateStream.Send(rpcInvoice); err != nil {
3✔
6396
                                return err
×
6397
                        }
×
6398

6399
                // The response stream's context for whatever reason has been
6400
                // closed. If context is closed by an exceeded deadline we will
6401
                // return an error.
6402
                case <-updateStream.Context().Done():
3✔
6403
                        if errors.Is(updateStream.Context().Err(), context.Canceled) {
6✔
6404
                                return nil
3✔
6405
                        }
3✔
6406
                        return updateStream.Context().Err()
×
6407

6408
                case <-r.quit:
×
6409
                        return nil
×
6410
                }
6411
        }
6412
}
6413

6414
// SubscribeTransactions creates a uni-directional stream (server -> client) in
6415
// which any newly discovered transactions relevant to the wallet are sent
6416
// over.
6417
func (r *rpcServer) SubscribeTransactions(req *lnrpc.GetTransactionsRequest,
6418
        updateStream lnrpc.Lightning_SubscribeTransactionsServer) error {
×
6419

×
6420
        txClient, err := r.server.cc.Wallet.SubscribeTransactions()
×
6421
        if err != nil {
×
6422
                return err
×
6423
        }
×
6424
        defer txClient.Cancel()
×
6425
        rpcsLog.Infof("New transaction subscription")
×
6426

×
6427
        for {
×
6428
                select {
×
6429
                case tx := <-txClient.ConfirmedTransactions():
×
6430
                        detail := lnrpc.RPCTransaction(tx)
×
6431
                        if err := updateStream.Send(detail); err != nil {
×
6432
                                return err
×
6433
                        }
×
6434

6435
                case tx := <-txClient.UnconfirmedTransactions():
×
6436
                        detail := lnrpc.RPCTransaction(tx)
×
6437
                        if err := updateStream.Send(detail); err != nil {
×
6438
                                return err
×
6439
                        }
×
6440

6441
                // The response stream's context for whatever reason has been
6442
                // closed. If context is closed by an exceeded deadline we will
6443
                // return an error.
6444
                case <-updateStream.Context().Done():
×
6445
                        rpcsLog.Infof("Canceling transaction subscription")
×
6446
                        if errors.Is(updateStream.Context().Err(), context.Canceled) {
×
6447
                                return nil
×
6448
                        }
×
6449
                        return updateStream.Context().Err()
×
6450

6451
                case <-r.quit:
×
6452
                        return nil
×
6453
                }
6454
        }
6455
}
6456

6457
// GetTransactions returns a list of describing all the known transactions
6458
// relevant to the wallet.
6459
func (r *rpcServer) GetTransactions(ctx context.Context,
6460
        req *lnrpc.GetTransactionsRequest) (*lnrpc.TransactionDetails, error) {
3✔
6461

3✔
6462
        // To remain backwards compatible with the old api, default to the
3✔
6463
        // special case end height which will return transactions from the start
3✔
6464
        // height until the chain tip, including unconfirmed transactions.
3✔
6465
        var endHeight = btcwallet.UnconfirmedHeight
3✔
6466

3✔
6467
        // If the user has provided an end height, we overwrite our default.
3✔
6468
        if req.EndHeight != 0 {
6✔
6469
                endHeight = req.EndHeight
3✔
6470
        }
3✔
6471

6472
        txns, firstIdx, lastIdx, err :=
3✔
6473
                r.server.cc.Wallet.ListTransactionDetails(
3✔
6474
                        req.StartHeight, endHeight, req.Account,
3✔
6475
                        req.IndexOffset, req.MaxTransactions,
3✔
6476
                )
3✔
6477
        if err != nil {
3✔
6478
                return nil, err
×
6479
        }
×
6480

6481
        return lnrpc.RPCTransactionDetails(txns, firstIdx, lastIdx), nil
3✔
6482
}
6483

6484
// DescribeGraph returns a description of the latest graph state from the PoV
6485
// of the node. The graph information is partitioned into two components: all
6486
// the nodes/vertexes, and all the edges that connect the vertexes themselves.
6487
// As this is a directed graph, the edges also contain the node directional
6488
// specific routing policy which includes: the time lock delta, fee
6489
// information, etc.
6490
func (r *rpcServer) DescribeGraph(ctx context.Context,
6491
        req *lnrpc.ChannelGraphRequest) (*lnrpc.ChannelGraph, error) {
3✔
6492

3✔
6493
        resp := &lnrpc.ChannelGraph{}
3✔
6494
        includeUnannounced := req.IncludeUnannounced
3✔
6495

3✔
6496
        // Check to see if the cache is already populated, if so then we can
3✔
6497
        // just return it directly.
3✔
6498
        //
3✔
6499
        // TODO(roasbeef): move this to an interceptor level feature?
3✔
6500
        graphCacheActive := r.cfg.Caches.RPCGraphCacheDuration != 0
3✔
6501
        if graphCacheActive {
6✔
6502
                r.graphCache.Lock()
3✔
6503
                defer r.graphCache.Unlock()
3✔
6504

3✔
6505
                if r.describeGraphResp != nil {
6✔
6506
                        return r.describeGraphResp, nil
3✔
6507
                }
3✔
6508
        }
6509

6510
        // Obtain the pointer to the global singleton channel graph, this will
6511
        // provide a consistent view of the graph due to bolt db's
6512
        // transactional model.
6513
        graph := r.server.graphDB
3✔
6514

3✔
6515
        // First iterate through all the known nodes (connected or unconnected
3✔
6516
        // within the graph), collating their current state into the RPC
3✔
6517
        // response.
3✔
6518
        err := graph.ForEachNode(func(nodeTx graphdb.NodeRTx) error {
6✔
6519
                lnNode := marshalNode(nodeTx.Node())
3✔
6520

3✔
6521
                resp.Nodes = append(resp.Nodes, lnNode)
3✔
6522

3✔
6523
                return nil
3✔
6524
        })
3✔
6525
        if err != nil {
3✔
6526
                return nil, err
×
6527
        }
×
6528

6529
        // Next, for each active channel we know of within the graph, create a
6530
        // similar response which details both the edge information as well as
6531
        // the routing policies of th nodes connecting the two edges.
6532
        err = graph.ForEachChannel(func(edgeInfo *models.ChannelEdgeInfo,
3✔
6533
                c1, c2 *models.ChannelEdgePolicy) error {
6✔
6534

3✔
6535
                // Do not include unannounced channels unless specifically
3✔
6536
                // requested. Unannounced channels include both private channels as
3✔
6537
                // well as public channels whose authentication proof were not
3✔
6538
                // confirmed yet, hence were not announced.
3✔
6539
                if !includeUnannounced && edgeInfo.AuthProof == nil {
6✔
6540
                        return nil
3✔
6541
                }
3✔
6542

6543
                edge := marshalDBEdge(edgeInfo, c1, c2)
3✔
6544
                resp.Edges = append(resp.Edges, edge)
3✔
6545

3✔
6546
                return nil
3✔
6547
        })
6548
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
3✔
6549
                return nil, err
×
6550
        }
×
6551

6552
        // We still have the mutex held, so we can safely populate the cache
6553
        // now to save on GC churn for this query, but only if the cache isn't
6554
        // disabled.
6555
        if graphCacheActive {
6✔
6556
                r.describeGraphResp = resp
3✔
6557
        }
3✔
6558

6559
        return resp, nil
3✔
6560
}
6561

6562
// marshalExtraOpaqueData marshals the given tlv data. If the tlv stream is
6563
// malformed or empty, an empty map is returned. This makes the method safe to
6564
// use on unvalidated data.
6565
func marshalExtraOpaqueData(data []byte) map[uint64][]byte {
3✔
6566
        r := bytes.NewReader(data)
3✔
6567

3✔
6568
        tlvStream, err := tlv.NewStream()
3✔
6569
        if err != nil {
3✔
6570
                return nil
×
6571
        }
×
6572

6573
        // Since ExtraOpaqueData is provided by a potentially malicious peer,
6574
        // pass it into the P2P decoding variant.
6575
        parsedTypes, err := tlvStream.DecodeWithParsedTypesP2P(r)
3✔
6576
        if err != nil || len(parsedTypes) == 0 {
6✔
6577
                return nil
3✔
6578
        }
3✔
6579

6580
        records := make(map[uint64][]byte)
3✔
6581
        for k, v := range parsedTypes {
6✔
6582
                records[uint64(k)] = v
3✔
6583
        }
3✔
6584

6585
        return records
3✔
6586
}
6587

6588
// extractInboundFeeSafe tries to extract the inbound fee from the given extra
6589
// opaque data tlv block. If parsing fails, a zero inbound fee is returned. This
6590
// function is typically used on unvalidated data coming stored in the database.
6591
// There is not much we can do other than ignoring errors here.
6592
func extractInboundFeeSafe(data lnwire.ExtraOpaqueData) lnwire.Fee {
3✔
6593
        var inboundFee lnwire.Fee
3✔
6594

3✔
6595
        _, err := data.ExtractRecords(&inboundFee)
3✔
6596
        if err != nil {
3✔
6597
                // Return zero fee. Do not return the inboundFee variable
×
6598
                // because it may be undefined.
×
6599
                return lnwire.Fee{}
×
6600
        }
×
6601

6602
        return inboundFee
3✔
6603
}
6604

6605
func marshalDBEdge(edgeInfo *models.ChannelEdgeInfo,
6606
        c1, c2 *models.ChannelEdgePolicy) *lnrpc.ChannelEdge {
3✔
6607

3✔
6608
        // Make sure the policies match the node they belong to. c1 should point
3✔
6609
        // to the policy for NodeKey1, and c2 for NodeKey2.
3✔
6610
        if c1 != nil && c1.ChannelFlags&lnwire.ChanUpdateDirection == 1 ||
3✔
6611
                c2 != nil && c2.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
3✔
6612

×
6613
                c2, c1 = c1, c2
×
6614
        }
×
6615

6616
        var lastUpdate int64
3✔
6617
        if c1 != nil {
6✔
6618
                lastUpdate = c1.LastUpdate.Unix()
3✔
6619
        }
3✔
6620
        if c2 != nil && c2.LastUpdate.Unix() > lastUpdate {
6✔
6621
                lastUpdate = c2.LastUpdate.Unix()
3✔
6622
        }
3✔
6623

6624
        customRecords := marshalExtraOpaqueData(edgeInfo.ExtraOpaqueData)
3✔
6625

3✔
6626
        edge := &lnrpc.ChannelEdge{
3✔
6627
                ChannelId: edgeInfo.ChannelID,
3✔
6628
                ChanPoint: edgeInfo.ChannelPoint.String(),
3✔
6629
                // TODO(roasbeef): update should be on edge info itself
3✔
6630
                LastUpdate:    uint32(lastUpdate),
3✔
6631
                Node1Pub:      hex.EncodeToString(edgeInfo.NodeKey1Bytes[:]),
3✔
6632
                Node2Pub:      hex.EncodeToString(edgeInfo.NodeKey2Bytes[:]),
3✔
6633
                Capacity:      int64(edgeInfo.Capacity),
3✔
6634
                CustomRecords: customRecords,
3✔
6635
        }
3✔
6636

3✔
6637
        if c1 != nil {
6✔
6638
                edge.Node1Policy = marshalDBRoutingPolicy(c1)
3✔
6639
        }
3✔
6640

6641
        if c2 != nil {
6✔
6642
                edge.Node2Policy = marshalDBRoutingPolicy(c2)
3✔
6643
        }
3✔
6644

6645
        return edge
3✔
6646
}
6647

6648
func marshalDBRoutingPolicy(
6649
        policy *models.ChannelEdgePolicy) *lnrpc.RoutingPolicy {
3✔
6650

3✔
6651
        disabled := policy.ChannelFlags&lnwire.ChanUpdateDisabled != 0
3✔
6652

3✔
6653
        customRecords := marshalExtraOpaqueData(policy.ExtraOpaqueData)
3✔
6654
        inboundFee := extractInboundFeeSafe(policy.ExtraOpaqueData)
3✔
6655

3✔
6656
        return &lnrpc.RoutingPolicy{
3✔
6657
                TimeLockDelta:    uint32(policy.TimeLockDelta),
3✔
6658
                MinHtlc:          int64(policy.MinHTLC),
3✔
6659
                MaxHtlcMsat:      uint64(policy.MaxHTLC),
3✔
6660
                FeeBaseMsat:      int64(policy.FeeBaseMSat),
3✔
6661
                FeeRateMilliMsat: int64(policy.FeeProportionalMillionths),
3✔
6662
                Disabled:         disabled,
3✔
6663
                LastUpdate:       uint32(policy.LastUpdate.Unix()),
3✔
6664
                CustomRecords:    customRecords,
3✔
6665

3✔
6666
                InboundFeeBaseMsat:      inboundFee.BaseFee,
3✔
6667
                InboundFeeRateMilliMsat: inboundFee.FeeRate,
3✔
6668
        }
3✔
6669
}
3✔
6670

6671
// GetNodeMetrics returns all available node metrics calculated from the
6672
// current channel graph.
6673
func (r *rpcServer) GetNodeMetrics(ctx context.Context,
6674
        req *lnrpc.NodeMetricsRequest) (*lnrpc.NodeMetricsResponse, error) {
×
6675

×
6676
        // Get requested metric types.
×
6677
        getCentrality := false
×
6678
        for _, t := range req.Types {
×
6679
                if t == lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY {
×
6680
                        getCentrality = true
×
6681
                }
×
6682
        }
6683

6684
        // Only centrality can be requested for now.
6685
        if !getCentrality {
×
6686
                return nil, nil
×
6687
        }
×
6688

6689
        resp := &lnrpc.NodeMetricsResponse{
×
6690
                BetweennessCentrality: make(map[string]*lnrpc.FloatMetric),
×
6691
        }
×
6692

×
6693
        // Obtain the pointer to the global singleton channel graph, this will
×
6694
        // provide a consistent view of the graph due to bolt db's
×
6695
        // transactional model.
×
6696
        graph := r.server.graphDB
×
6697

×
6698
        // Calculate betweenness centrality if requested. Note that depending on the
×
6699
        // graph size, this may take up to a few minutes.
×
6700
        channelGraph := autopilot.ChannelGraphFromDatabase(graph)
×
6701
        centralityMetric, err := autopilot.NewBetweennessCentralityMetric(
×
6702
                runtime.NumCPU(),
×
6703
        )
×
6704
        if err != nil {
×
6705
                return nil, err
×
6706
        }
×
6707
        if err := centralityMetric.Refresh(channelGraph); err != nil {
×
6708
                return nil, err
×
6709
        }
×
6710

6711
        // Fill normalized and non normalized centrality.
6712
        centrality := centralityMetric.GetMetric(true)
×
6713
        for nodeID, val := range centrality {
×
6714
                resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])] =
×
6715
                        &lnrpc.FloatMetric{
×
6716
                                NormalizedValue: val,
×
6717
                        }
×
6718
        }
×
6719

6720
        centrality = centralityMetric.GetMetric(false)
×
6721
        for nodeID, val := range centrality {
×
6722
                resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])].Value = val
×
6723
        }
×
6724

6725
        return resp, nil
×
6726
}
6727

6728
// GetChanInfo returns the latest authenticated network announcement for the
6729
// given channel identified by either its channel ID or a channel outpoint. Both
6730
// uniquely identify the location of transaction's funding output within the
6731
// blockchain. The former is an 8-byte integer, while the latter is a string
6732
// formatted as funding_txid:output_index.
6733
func (r *rpcServer) GetChanInfo(_ context.Context,
6734
        in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) {
3✔
6735

3✔
6736
        graph := r.server.graphDB
3✔
6737

3✔
6738
        var (
3✔
6739
                edgeInfo     *models.ChannelEdgeInfo
3✔
6740
                edge1, edge2 *models.ChannelEdgePolicy
3✔
6741
                err          error
3✔
6742
        )
3✔
6743

3✔
6744
        switch {
3✔
6745
        case in.ChanId != 0:
3✔
6746
                edgeInfo, edge1, edge2, err = graph.FetchChannelEdgesByID(
3✔
6747
                        in.ChanId,
3✔
6748
                )
3✔
6749

6750
        case in.ChanPoint != "":
3✔
6751
                var chanPoint *wire.OutPoint
3✔
6752
                chanPoint, err = wire.NewOutPointFromString(in.ChanPoint)
3✔
6753
                if err != nil {
3✔
6754
                        return nil, err
×
6755
                }
×
6756
                edgeInfo, edge1, edge2, err = graph.FetchChannelEdgesByOutpoint(
3✔
6757
                        chanPoint,
3✔
6758
                )
3✔
6759

6760
        default:
×
6761
                return nil, fmt.Errorf("specify either chan_id or chan_point")
×
6762
        }
6763
        if err != nil {
6✔
6764
                return nil, err
3✔
6765
        }
3✔
6766

6767
        // Convert the database's edge format into the network/RPC edge format
6768
        // which couples the edge itself along with the directional node
6769
        // routing policies of each node involved within the channel.
6770
        channelEdge := marshalDBEdge(edgeInfo, edge1, edge2)
3✔
6771

3✔
6772
        return channelEdge, nil
3✔
6773
}
6774

6775
// GetNodeInfo returns the latest advertised and aggregate authenticated
6776
// channel information for the specified node identified by its public key.
6777
func (r *rpcServer) GetNodeInfo(ctx context.Context,
6778
        in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) {
×
6779

×
6780
        graph := r.server.graphDB
×
6781

×
6782
        // First, parse the hex-encoded public key into a full in-memory public
×
6783
        // key object we can work with for querying.
×
6784
        pubKey, err := route.NewVertexFromStr(in.PubKey)
×
6785
        if err != nil {
×
6786
                return nil, err
×
6787
        }
×
6788

6789
        // With the public key decoded, attempt to fetch the node corresponding
6790
        // to this public key. If the node cannot be found, then an error will
6791
        // be returned.
6792
        node, err := graph.FetchLightningNode(pubKey)
×
6793
        switch {
×
6794
        case errors.Is(err, graphdb.ErrGraphNodeNotFound):
×
6795
                return nil, status.Error(codes.NotFound, err.Error())
×
6796
        case err != nil:
×
6797
                return nil, err
×
6798
        }
6799

6800
        // With the node obtained, we'll now iterate through all its out going
6801
        // edges to gather some basic statistics about its out going channels.
6802
        var (
×
6803
                numChannels   uint32
×
6804
                totalCapacity btcutil.Amount
×
6805
                channels      []*lnrpc.ChannelEdge
×
6806
        )
×
6807

×
6808
        err = graph.ForEachNodeChannel(node.PubKeyBytes,
×
6809
                func(_ kvdb.RTx, edge *models.ChannelEdgeInfo,
×
6810
                        c1, c2 *models.ChannelEdgePolicy) error {
×
6811

×
6812
                        numChannels++
×
6813
                        totalCapacity += edge.Capacity
×
6814

×
6815
                        // Only populate the node's channels if the user
×
6816
                        // requested them.
×
6817
                        if in.IncludeChannels {
×
6818
                                // Do not include unannounced channels - private
×
6819
                                // channels or public channels whose
×
6820
                                // authentication proof were not confirmed yet.
×
6821
                                if edge.AuthProof == nil {
×
6822
                                        return nil
×
6823
                                }
×
6824

6825
                                // Convert the database's edge format into the
6826
                                // network/RPC edge format.
6827
                                channelEdge := marshalDBEdge(edge, c1, c2)
×
6828
                                channels = append(channels, channelEdge)
×
6829
                        }
6830

6831
                        return nil
×
6832
                },
6833
        )
6834
        if err != nil {
×
6835
                return nil, err
×
6836
        }
×
6837

6838
        return &lnrpc.NodeInfo{
×
6839
                Node:          marshalNode(node),
×
6840
                NumChannels:   numChannels,
×
6841
                TotalCapacity: int64(totalCapacity),
×
6842
                Channels:      channels,
×
6843
        }, nil
×
6844
}
6845

6846
func marshalNode(node *models.LightningNode) *lnrpc.LightningNode {
3✔
6847
        nodeAddrs := make([]*lnrpc.NodeAddress, len(node.Addresses))
3✔
6848
        for i, addr := range node.Addresses {
6✔
6849
                nodeAddr := &lnrpc.NodeAddress{
3✔
6850
                        Network: addr.Network(),
3✔
6851
                        Addr:    addr.String(),
3✔
6852
                }
3✔
6853
                nodeAddrs[i] = nodeAddr
3✔
6854
        }
3✔
6855

6856
        features := invoicesrpc.CreateRPCFeatures(node.Features)
3✔
6857

3✔
6858
        customRecords := marshalExtraOpaqueData(node.ExtraOpaqueData)
3✔
6859

3✔
6860
        return &lnrpc.LightningNode{
3✔
6861
                LastUpdate:    uint32(node.LastUpdate.Unix()),
3✔
6862
                PubKey:        hex.EncodeToString(node.PubKeyBytes[:]),
3✔
6863
                Addresses:     nodeAddrs,
3✔
6864
                Alias:         node.Alias,
3✔
6865
                Color:         graph.EncodeHexColor(node.Color),
3✔
6866
                Features:      features,
3✔
6867
                CustomRecords: customRecords,
3✔
6868
        }
3✔
6869
}
6870

6871
// QueryRoutes attempts to query the daemons' Channel Router for a possible
6872
// route to a target destination capable of carrying a specific amount of
6873
// satoshis within the route's flow. The returned route contains the full
6874
// details required to craft and send an HTLC, also including the necessary
6875
// information that should be present within the Sphinx packet encapsulated
6876
// within the HTLC.
6877
//
6878
// TODO(roasbeef): should return a slice of routes in reality
6879
//   - create separate PR to send based on well formatted route
6880
func (r *rpcServer) QueryRoutes(ctx context.Context,
6881
        in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
3✔
6882

3✔
6883
        return r.routerBackend.QueryRoutes(ctx, in)
3✔
6884
}
3✔
6885

6886
// GetNetworkInfo returns some basic stats about the known channel graph from
6887
// the PoV of the node.
6888
func (r *rpcServer) GetNetworkInfo(ctx context.Context,
6889
        _ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) {
×
6890

×
6891
        graph := r.server.graphDB
×
6892

×
6893
        var (
×
6894
                numNodes             uint32
×
6895
                numChannels          uint32
×
6896
                maxChanOut           uint32
×
6897
                totalNetworkCapacity btcutil.Amount
×
6898
                minChannelSize       btcutil.Amount = math.MaxInt64
×
6899
                maxChannelSize       btcutil.Amount
×
6900
                medianChanSize       btcutil.Amount
×
6901
        )
×
6902

×
6903
        // We'll use this map to de-duplicate channels during our traversal.
×
6904
        // This is needed since channels are directional, so there will be two
×
6905
        // edges for each channel within the graph.
×
6906
        seenChans := make(map[uint64]struct{})
×
6907

×
6908
        // We also keep a list of all encountered capacities, in order to
×
6909
        // calculate the median channel size.
×
6910
        var allChans []btcutil.Amount
×
6911

×
6912
        // We'll run through all the known nodes in the within our view of the
×
6913
        // network, tallying up the total number of nodes, and also gathering
×
6914
        // each node so we can measure the graph diameter and degree stats
×
6915
        // below.
×
6916
        err := graph.ForEachNodeCached(func(node route.Vertex,
×
6917
                edges map[uint64]*graphdb.DirectedChannel) error {
×
6918

×
6919
                // Increment the total number of nodes with each iteration.
×
6920
                numNodes++
×
6921

×
6922
                // For each channel we'll compute the out degree of each node,
×
6923
                // and also update our running tallies of the min/max channel
×
6924
                // capacity, as well as the total channel capacity. We pass
×
6925
                // through the db transaction from the outer view so we can
×
6926
                // re-use it within this inner view.
×
6927
                var outDegree uint32
×
6928
                for _, edge := range edges {
×
6929
                        // Bump up the out degree for this node for each
×
6930
                        // channel encountered.
×
6931
                        outDegree++
×
6932

×
6933
                        // If we've already seen this channel, then we'll
×
6934
                        // return early to ensure that we don't double-count
×
6935
                        // stats.
×
6936
                        if _, ok := seenChans[edge.ChannelID]; ok {
×
6937
                                return nil
×
6938
                        }
×
6939

6940
                        // Compare the capacity of this channel against the
6941
                        // running min/max to see if we should update the
6942
                        // extrema.
6943
                        chanCapacity := edge.Capacity
×
6944
                        if chanCapacity < minChannelSize {
×
6945
                                minChannelSize = chanCapacity
×
6946
                        }
×
6947
                        if chanCapacity > maxChannelSize {
×
6948
                                maxChannelSize = chanCapacity
×
6949
                        }
×
6950

6951
                        // Accumulate the total capacity of this channel to the
6952
                        // network wide-capacity.
6953
                        totalNetworkCapacity += chanCapacity
×
6954

×
6955
                        numChannels++
×
6956

×
6957
                        seenChans[edge.ChannelID] = struct{}{}
×
6958
                        allChans = append(allChans, edge.Capacity)
×
6959
                }
6960

6961
                // Finally, if the out degree of this node is greater than what
6962
                // we've seen so far, update the maxChanOut variable.
6963
                if outDegree > maxChanOut {
×
6964
                        maxChanOut = outDegree
×
6965
                }
×
6966

6967
                return nil
×
6968
        })
6969
        if err != nil {
×
6970
                return nil, err
×
6971
        }
×
6972

6973
        // Query the graph for the current number of zombie channels.
6974
        numZombies, err := graph.NumZombies()
×
6975
        if err != nil {
×
6976
                return nil, err
×
6977
        }
×
6978

6979
        // Find the median.
6980
        medianChanSize = autopilot.Median(allChans)
×
6981

×
6982
        // If we don't have any channels, then reset the minChannelSize to zero
×
6983
        // to avoid outputting NaN in encoded JSON.
×
6984
        if numChannels == 0 {
×
6985
                minChannelSize = 0
×
6986
        }
×
6987

6988
        // Graph diameter.
6989
        channelGraph := autopilot.ChannelGraphFromCachedDatabase(graph)
×
6990
        simpleGraph, err := autopilot.NewSimpleGraph(channelGraph)
×
6991
        if err != nil {
×
6992
                return nil, err
×
6993
        }
×
6994
        start := time.Now()
×
6995
        diameter := simpleGraph.DiameterRadialCutoff()
×
6996
        rpcsLog.Infof("elapsed time for diameter (%d) calculation: %v", diameter,
×
6997
                time.Since(start))
×
6998

×
6999
        // TODO(roasbeef): also add oldest channel?
×
7000
        netInfo := &lnrpc.NetworkInfo{
×
7001
                GraphDiameter:        diameter,
×
7002
                MaxOutDegree:         maxChanOut,
×
7003
                AvgOutDegree:         float64(2*numChannels) / float64(numNodes),
×
7004
                NumNodes:             numNodes,
×
7005
                NumChannels:          numChannels,
×
7006
                TotalNetworkCapacity: int64(totalNetworkCapacity),
×
7007
                AvgChannelSize:       float64(totalNetworkCapacity) / float64(numChannels),
×
7008

×
7009
                MinChannelSize:       int64(minChannelSize),
×
7010
                MaxChannelSize:       int64(maxChannelSize),
×
7011
                MedianChannelSizeSat: int64(medianChanSize),
×
7012
                NumZombieChans:       numZombies,
×
7013
        }
×
7014

×
7015
        // Similarly, if we don't have any channels, then we'll also set the
×
7016
        // average channel size to zero in order to avoid weird JSON encoding
×
7017
        // outputs.
×
7018
        if numChannels == 0 {
×
7019
                netInfo.AvgChannelSize = 0
×
7020
        }
×
7021

7022
        return netInfo, nil
×
7023
}
7024

7025
// StopDaemon will send a shutdown request to the interrupt handler, triggering
7026
// a graceful shutdown of the daemon.
7027
func (r *rpcServer) StopDaemon(_ context.Context,
7028
        _ *lnrpc.StopRequest) (*lnrpc.StopResponse, error) {
3✔
7029

3✔
7030
        // Before we even consider a shutdown, are we currently in recovery
3✔
7031
        // mode? We don't want to allow shutting down during recovery because
3✔
7032
        // that would mean the user would have to manually continue the rescan
3✔
7033
        // process next time by using `lncli unlock --recovery_window X`
3✔
7034
        // otherwise some funds wouldn't be picked up.
3✔
7035
        isRecoveryMode, progress, err := r.server.cc.Wallet.GetRecoveryInfo()
3✔
7036
        if err != nil {
3✔
7037
                return nil, fmt.Errorf("unable to get wallet recovery info: %w",
×
7038
                        err)
×
7039
        }
×
7040
        if isRecoveryMode && progress < 1 {
3✔
7041
                return nil, fmt.Errorf("wallet recovery in progress, cannot " +
×
7042
                        "shut down, please wait until rescan finishes")
×
7043
        }
×
7044

7045
        r.interceptor.RequestShutdown()
3✔
7046

3✔
7047
        return &lnrpc.StopResponse{
3✔
7048
                Status: "shutdown initiated, check logs for progress",
3✔
7049
        }, nil
3✔
7050
}
7051

7052
// SubscribeChannelGraph launches a streaming RPC that allows the caller to
7053
// receive notifications upon any changes the channel graph topology from the
7054
// review of the responding node. Events notified include: new nodes coming
7055
// online, nodes updating their authenticated attributes, new channels being
7056
// advertised, updates in the routing policy for a directional channel edge,
7057
// and finally when prior channels are closed on-chain.
7058
func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
7059
        updateStream lnrpc.Lightning_SubscribeChannelGraphServer) error {
3✔
7060

3✔
7061
        // First, we start by subscribing to a new intent to receive
3✔
7062
        // notifications from the channel router.
3✔
7063
        client, err := r.server.graphBuilder.SubscribeTopology()
3✔
7064
        if err != nil {
3✔
7065
                return err
×
7066
        }
×
7067

7068
        // Ensure that the resources for the topology update client is cleaned
7069
        // up once either the server, or client exists.
7070
        defer client.Cancel()
3✔
7071

3✔
7072
        for {
6✔
7073
                select {
3✔
7074

7075
                // A new update has been sent by the channel router, we'll
7076
                // marshal it into the form expected by the gRPC client, then
7077
                // send it off.
7078
                case topChange, ok := <-client.TopologyChanges:
3✔
7079
                        // If the second value from the channel read is nil,
3✔
7080
                        // then this means that the channel router is exiting
3✔
7081
                        // or the notification client was canceled. So we'll
3✔
7082
                        // exit early.
3✔
7083
                        if !ok {
3✔
7084
                                return errors.New("server shutting down")
×
7085
                        }
×
7086

7087
                        // Convert the struct from the channel router into the
7088
                        // form expected by the gRPC service then send it off
7089
                        // to the client.
7090
                        graphUpdate := marshallTopologyChange(topChange)
3✔
7091
                        if err := updateStream.Send(graphUpdate); err != nil {
3✔
7092
                                return err
×
7093
                        }
×
7094

7095
                // The response stream's context for whatever reason has been
7096
                // closed. If context is closed by an exceeded deadline
7097
                // we will return an error.
7098
                case <-updateStream.Context().Done():
3✔
7099
                        if errors.Is(updateStream.Context().Err(), context.Canceled) {
6✔
7100
                                return nil
3✔
7101
                        }
3✔
7102
                        return updateStream.Context().Err()
×
7103

7104
                // The server is quitting, so we'll exit immediately. Returning
7105
                // nil will close the clients read end of the stream.
7106
                case <-r.quit:
×
7107
                        return nil
×
7108
                }
7109
        }
7110
}
7111

7112
// marshallTopologyChange performs a mapping from the topology change struct
7113
// returned by the router to the form of notifications expected by the current
7114
// gRPC service.
7115
func marshallTopologyChange(
7116
        topChange *graph.TopologyChange) *lnrpc.GraphTopologyUpdate {
3✔
7117

3✔
7118
        // encodeKey is a simple helper function that converts a live public
3✔
7119
        // key into a hex-encoded version of the compressed serialization for
3✔
7120
        // the public key.
3✔
7121
        encodeKey := func(k *btcec.PublicKey) string {
6✔
7122
                return hex.EncodeToString(k.SerializeCompressed())
3✔
7123
        }
3✔
7124

7125
        nodeUpdates := make([]*lnrpc.NodeUpdate, len(topChange.NodeUpdates))
3✔
7126
        for i, nodeUpdate := range topChange.NodeUpdates {
6✔
7127
                nodeAddrs := make(
3✔
7128
                        []*lnrpc.NodeAddress, 0, len(nodeUpdate.Addresses),
3✔
7129
                )
3✔
7130
                for _, addr := range nodeUpdate.Addresses {
6✔
7131
                        nodeAddr := &lnrpc.NodeAddress{
3✔
7132
                                Network: addr.Network(),
3✔
7133
                                Addr:    addr.String(),
3✔
7134
                        }
3✔
7135
                        nodeAddrs = append(nodeAddrs, nodeAddr)
3✔
7136
                }
3✔
7137

7138
                addrs := make([]string, len(nodeUpdate.Addresses))
3✔
7139
                for i, addr := range nodeUpdate.Addresses {
6✔
7140
                        addrs[i] = addr.String()
3✔
7141
                }
3✔
7142

7143
                nodeUpdates[i] = &lnrpc.NodeUpdate{
3✔
7144
                        Addresses:     addrs,
3✔
7145
                        NodeAddresses: nodeAddrs,
3✔
7146
                        IdentityKey:   encodeKey(nodeUpdate.IdentityKey),
3✔
7147
                        Alias:         nodeUpdate.Alias,
3✔
7148
                        Color:         nodeUpdate.Color,
3✔
7149
                        Features: invoicesrpc.CreateRPCFeatures(
3✔
7150
                                nodeUpdate.Features,
3✔
7151
                        ),
3✔
7152
                }
3✔
7153
        }
7154

7155
        channelUpdates := make([]*lnrpc.ChannelEdgeUpdate, len(topChange.ChannelEdgeUpdates))
3✔
7156
        for i, channelUpdate := range topChange.ChannelEdgeUpdates {
6✔
7157

3✔
7158
                customRecords := marshalExtraOpaqueData(
3✔
7159
                        channelUpdate.ExtraOpaqueData,
3✔
7160
                )
3✔
7161
                inboundFee := extractInboundFeeSafe(
3✔
7162
                        channelUpdate.ExtraOpaqueData,
3✔
7163
                )
3✔
7164

3✔
7165
                channelUpdates[i] = &lnrpc.ChannelEdgeUpdate{
3✔
7166
                        ChanId: channelUpdate.ChanID,
3✔
7167
                        ChanPoint: &lnrpc.ChannelPoint{
3✔
7168
                                FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
3✔
7169
                                        FundingTxidBytes: channelUpdate.ChanPoint.Hash[:],
3✔
7170
                                },
3✔
7171
                                OutputIndex: channelUpdate.ChanPoint.Index,
3✔
7172
                        },
3✔
7173
                        Capacity: int64(channelUpdate.Capacity),
3✔
7174
                        RoutingPolicy: &lnrpc.RoutingPolicy{
3✔
7175
                                TimeLockDelta: uint32(
3✔
7176
                                        channelUpdate.TimeLockDelta,
3✔
7177
                                ),
3✔
7178
                                MinHtlc: int64(
3✔
7179
                                        channelUpdate.MinHTLC,
3✔
7180
                                ),
3✔
7181
                                MaxHtlcMsat: uint64(
3✔
7182
                                        channelUpdate.MaxHTLC,
3✔
7183
                                ),
3✔
7184
                                FeeBaseMsat: int64(
3✔
7185
                                        channelUpdate.BaseFee,
3✔
7186
                                ),
3✔
7187
                                FeeRateMilliMsat: int64(
3✔
7188
                                        channelUpdate.FeeRate,
3✔
7189
                                ),
3✔
7190
                                Disabled:                channelUpdate.Disabled,
3✔
7191
                                InboundFeeBaseMsat:      inboundFee.BaseFee,
3✔
7192
                                InboundFeeRateMilliMsat: inboundFee.FeeRate,
3✔
7193
                                CustomRecords:           customRecords,
3✔
7194
                        },
3✔
7195
                        AdvertisingNode: encodeKey(channelUpdate.AdvertisingNode),
3✔
7196
                        ConnectingNode:  encodeKey(channelUpdate.ConnectingNode),
3✔
7197
                }
3✔
7198
        }
3✔
7199

7200
        closedChans := make([]*lnrpc.ClosedChannelUpdate, len(topChange.ClosedChannels))
3✔
7201
        for i, closedChan := range topChange.ClosedChannels {
6✔
7202
                closedChans[i] = &lnrpc.ClosedChannelUpdate{
3✔
7203
                        ChanId:       closedChan.ChanID,
3✔
7204
                        Capacity:     int64(closedChan.Capacity),
3✔
7205
                        ClosedHeight: closedChan.ClosedHeight,
3✔
7206
                        ChanPoint: &lnrpc.ChannelPoint{
3✔
7207
                                FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
3✔
7208
                                        FundingTxidBytes: closedChan.ChanPoint.Hash[:],
3✔
7209
                                },
3✔
7210
                                OutputIndex: closedChan.ChanPoint.Index,
3✔
7211
                        },
3✔
7212
                }
3✔
7213
        }
3✔
7214

7215
        return &lnrpc.GraphTopologyUpdate{
3✔
7216
                NodeUpdates:    nodeUpdates,
3✔
7217
                ChannelUpdates: channelUpdates,
3✔
7218
                ClosedChans:    closedChans,
3✔
7219
        }
3✔
7220
}
7221

7222
// ListPayments returns a list of outgoing payments determined by a paginated
7223
// database query.
7224
func (r *rpcServer) ListPayments(ctx context.Context,
7225
        req *lnrpc.ListPaymentsRequest) (*lnrpc.ListPaymentsResponse, error) {
3✔
7226

3✔
7227
        // If both dates are set, we check that the start date is less than the
3✔
7228
        // end date, otherwise we'll get an empty result.
3✔
7229
        if req.CreationDateStart != 0 && req.CreationDateEnd != 0 {
3✔
7230
                if req.CreationDateStart >= req.CreationDateEnd {
×
7231
                        return nil, fmt.Errorf("start date(%v) must be before "+
×
7232
                                "end date(%v)", req.CreationDateStart,
×
7233
                                req.CreationDateEnd)
×
7234
                }
×
7235
        }
7236

7237
        query := channeldb.PaymentsQuery{
3✔
7238
                IndexOffset:       req.IndexOffset,
3✔
7239
                MaxPayments:       req.MaxPayments,
3✔
7240
                Reversed:          req.Reversed,
3✔
7241
                IncludeIncomplete: req.IncludeIncomplete,
3✔
7242
                CountTotal:        req.CountTotalPayments,
3✔
7243
                CreationDateStart: int64(req.CreationDateStart),
3✔
7244
                CreationDateEnd:   int64(req.CreationDateEnd),
3✔
7245
        }
3✔
7246

3✔
7247
        // If the maximum number of payments wasn't specified, then we'll
3✔
7248
        // default to return the maximal number of payments representable.
3✔
7249
        if req.MaxPayments == 0 {
6✔
7250
                query.MaxPayments = math.MaxUint64
3✔
7251
        }
3✔
7252

7253
        paymentsQuerySlice, err := r.server.miscDB.QueryPayments(query)
3✔
7254
        if err != nil {
3✔
7255
                return nil, err
×
7256
        }
×
7257

7258
        paymentsResp := &lnrpc.ListPaymentsResponse{
3✔
7259
                LastIndexOffset:  paymentsQuerySlice.LastIndexOffset,
3✔
7260
                FirstIndexOffset: paymentsQuerySlice.FirstIndexOffset,
3✔
7261
                TotalNumPayments: paymentsQuerySlice.TotalCount,
3✔
7262
        }
3✔
7263

3✔
7264
        for _, payment := range paymentsQuerySlice.Payments {
6✔
7265
                payment := payment
3✔
7266

3✔
7267
                rpcPayment, err := r.routerBackend.MarshallPayment(payment)
3✔
7268
                if err != nil {
3✔
7269
                        return nil, err
×
7270
                }
×
7271

7272
                paymentsResp.Payments = append(
3✔
7273
                        paymentsResp.Payments, rpcPayment,
3✔
7274
                )
3✔
7275
        }
7276

7277
        return paymentsResp, nil
3✔
7278
}
7279

7280
// DeletePayment deletes a payment from the DB given its payment hash. If
7281
// failedHtlcsOnly is set, only failed HTLC attempts of the payment will be
7282
// deleted.
7283
func (r *rpcServer) DeletePayment(ctx context.Context,
7284
        req *lnrpc.DeletePaymentRequest) (
7285
        *lnrpc.DeletePaymentResponse, error) {
×
7286

×
7287
        hash, err := lntypes.MakeHash(req.PaymentHash)
×
7288
        if err != nil {
×
7289
                return nil, err
×
7290
        }
×
7291

7292
        rpcsLog.Infof("[DeletePayment] payment_identifier=%v, "+
×
7293
                "failed_htlcs_only=%v", hash, req.FailedHtlcsOnly)
×
7294

×
7295
        err = r.server.miscDB.DeletePayment(hash, req.FailedHtlcsOnly)
×
7296
        if err != nil {
×
7297
                return nil, err
×
7298
        }
×
7299

7300
        return &lnrpc.DeletePaymentResponse{
×
7301
                Status: "payment deleted",
×
7302
        }, nil
×
7303
}
7304

7305
// DeleteAllPayments deletes all outgoing payments from DB.
7306
func (r *rpcServer) DeleteAllPayments(ctx context.Context,
7307
        req *lnrpc.DeleteAllPaymentsRequest) (
7308
        *lnrpc.DeleteAllPaymentsResponse, error) {
3✔
7309

3✔
7310
        switch {
3✔
7311
        // Since this is a destructive operation, at least one of the options
7312
        // must be set to true.
7313
        case !req.AllPayments && !req.FailedPaymentsOnly &&
7314
                !req.FailedHtlcsOnly:
×
7315

×
7316
                return nil, fmt.Errorf("at least one of the options " +
×
7317
                        "`all_payments`, `failed_payments_only`, or " +
×
7318
                        "`failed_htlcs_only` must be set to true")
×
7319

7320
        // `all_payments` cannot be true with `failed_payments_only` or
7321
        // `failed_htlcs_only`. `all_payments` includes all records, making
7322
        // these options contradictory.
7323
        case req.AllPayments &&
7324
                (req.FailedPaymentsOnly || req.FailedHtlcsOnly):
×
7325

×
7326
                return nil, fmt.Errorf("`all_payments` cannot be set to true " +
×
7327
                        "while either `failed_payments_only` or " +
×
7328
                        "`failed_htlcs_only` is also set to true")
×
7329
        }
7330

7331
        rpcsLog.Infof("[DeleteAllPayments] failed_payments_only=%v, "+
3✔
7332
                "failed_htlcs_only=%v", req.FailedPaymentsOnly,
3✔
7333
                req.FailedHtlcsOnly)
3✔
7334

3✔
7335
        numDeletedPayments, err := r.server.miscDB.DeletePayments(
3✔
7336
                req.FailedPaymentsOnly, req.FailedHtlcsOnly,
3✔
7337
        )
3✔
7338
        if err != nil {
3✔
7339
                return nil, err
×
7340
        }
×
7341

7342
        return &lnrpc.DeleteAllPaymentsResponse{
3✔
7343
                Status: fmt.Sprintf("%v payments deleted, failed_htlcs_only=%v",
3✔
7344
                        numDeletedPayments, req.FailedHtlcsOnly),
3✔
7345
        }, nil
3✔
7346
}
7347

7348
// DebugLevel allows a caller to programmatically set the logging verbosity of
7349
// lnd. The logging can be targeted according to a coarse daemon-wide logging
7350
// level, or in a granular fashion to specify the logging for a target
7351
// sub-system.
7352
func (r *rpcServer) DebugLevel(ctx context.Context,
7353
        req *lnrpc.DebugLevelRequest) (*lnrpc.DebugLevelResponse, error) {
×
7354

×
7355
        // If show is set, then we simply print out the list of available
×
7356
        // sub-systems.
×
7357
        if req.Show {
×
7358
                return &lnrpc.DebugLevelResponse{
×
7359
                        SubSystems: strings.Join(
×
7360
                                r.cfg.SubLogMgr.SupportedSubsystems(), " ",
×
7361
                        ),
×
7362
                }, nil
×
7363
        }
×
7364

7365
        rpcsLog.Infof("[debuglevel] changing debug level to: %v", req.LevelSpec)
×
7366

×
7367
        // Otherwise, we'll attempt to set the logging level using the
×
7368
        // specified level spec.
×
7369
        err := build.ParseAndSetDebugLevels(req.LevelSpec, r.cfg.SubLogMgr)
×
7370
        if err != nil {
×
7371
                return nil, err
×
7372
        }
×
7373

7374
        subLoggers := r.cfg.SubLogMgr.SubLoggers()
×
7375
        // Sort alphabetically by subsystem name.
×
7376
        var tags []string
×
7377
        for t := range subLoggers {
×
7378
                tags = append(tags, t)
×
7379
        }
×
7380
        sort.Strings(tags)
×
7381

×
7382
        // Create the log levels string.
×
7383
        var logLevels []string
×
7384
        for _, t := range tags {
×
7385
                logLevels = append(logLevels, fmt.Sprintf("%s=%s", t,
×
7386
                        subLoggers[t].Level().String()))
×
7387
        }
×
7388
        logLevelsString := strings.Join(logLevels, ", ")
×
7389

×
7390
        // Propagate the new config level to the main config struct.
×
7391
        r.cfg.DebugLevel = logLevelsString
×
7392

×
7393
        return &lnrpc.DebugLevelResponse{
×
7394
                SubSystems: logLevelsString,
×
7395
        }, nil
×
7396
}
7397

7398
// DecodePayReq takes an encoded payment request string and attempts to decode
7399
// it, returning a full description of the conditions encoded within the
7400
// payment request.
7401
func (r *rpcServer) DecodePayReq(ctx context.Context,
7402
        req *lnrpc.PayReqString) (*lnrpc.PayReq, error) {
3✔
7403

3✔
7404
        rpcsLog.Tracef("[decodepayreq] decoding: %v", req.PayReq)
3✔
7405

3✔
7406
        // Fist we'll attempt to decode the payment request string, if the
3✔
7407
        // request is invalid or the checksum doesn't match, then we'll exit
3✔
7408
        // here with an error.
3✔
7409
        payReq, err := zpay32.Decode(req.PayReq, r.cfg.ActiveNetParams.Params)
3✔
7410
        if err != nil {
3✔
7411
                return nil, err
×
7412
        }
×
7413

7414
        // Let the fields default to empty strings.
7415
        desc := ""
3✔
7416
        if payReq.Description != nil {
6✔
7417
                desc = *payReq.Description
3✔
7418
        }
3✔
7419

7420
        descHash := []byte("")
3✔
7421
        if payReq.DescriptionHash != nil {
3✔
7422
                descHash = payReq.DescriptionHash[:]
×
7423
        }
×
7424

7425
        fallbackAddr := ""
3✔
7426
        if payReq.FallbackAddr != nil {
3✔
7427
                fallbackAddr = payReq.FallbackAddr.String()
×
7428
        }
×
7429

7430
        // Expiry time will default to 3600 seconds if not specified
7431
        // explicitly.
7432
        expiry := int64(payReq.Expiry().Seconds())
3✔
7433

3✔
7434
        // Convert between the `lnrpc` and `routing` types.
3✔
7435
        routeHints := invoicesrpc.CreateRPCRouteHints(payReq.RouteHints)
3✔
7436

3✔
7437
        blindedPaymentPaths, err := invoicesrpc.CreateRPCBlindedPayments(
3✔
7438
                payReq.BlindedPaymentPaths,
3✔
7439
        )
3✔
7440
        if err != nil {
3✔
7441
                return nil, err
×
7442
        }
×
7443

7444
        var amtSat, amtMsat int64
3✔
7445
        if payReq.MilliSat != nil {
6✔
7446
                amtSat = int64(payReq.MilliSat.ToSatoshis())
3✔
7447
                amtMsat = int64(*payReq.MilliSat)
3✔
7448
        }
3✔
7449

7450
        // Extract the payment address from the payment request, if present.
7451
        paymentAddr := payReq.PaymentAddr.UnwrapOr([32]byte{})
3✔
7452

3✔
7453
        dest := payReq.Destination.SerializeCompressed()
3✔
7454
        return &lnrpc.PayReq{
3✔
7455
                Destination:     hex.EncodeToString(dest),
3✔
7456
                PaymentHash:     hex.EncodeToString(payReq.PaymentHash[:]),
3✔
7457
                NumSatoshis:     amtSat,
3✔
7458
                NumMsat:         amtMsat,
3✔
7459
                Timestamp:       payReq.Timestamp.Unix(),
3✔
7460
                Description:     desc,
3✔
7461
                DescriptionHash: hex.EncodeToString(descHash[:]),
3✔
7462
                FallbackAddr:    fallbackAddr,
3✔
7463
                Expiry:          expiry,
3✔
7464
                CltvExpiry:      int64(payReq.MinFinalCLTVExpiry()),
3✔
7465
                RouteHints:      routeHints,
3✔
7466
                BlindedPaths:    blindedPaymentPaths,
3✔
7467
                PaymentAddr:     paymentAddr[:],
3✔
7468
                Features:        invoicesrpc.CreateRPCFeatures(payReq.Features),
3✔
7469
        }, nil
3✔
7470
}
7471

7472
// feeBase is the fixed point that fee rate computation are performed over.
7473
// Nodes on the network advertise their fee rate using this point as a base.
7474
// This means that the minimal possible fee rate if 1e-6, or 0.000001, or
7475
// 0.0001%.
7476
const feeBase float64 = 1000000
7477

7478
// FeeReport allows the caller to obtain a report detailing the current fee
7479
// schedule enforced by the node globally for each channel.
7480
func (r *rpcServer) FeeReport(ctx context.Context,
7481
        _ *lnrpc.FeeReportRequest) (*lnrpc.FeeReportResponse, error) {
3✔
7482

3✔
7483
        channelGraph := r.server.graphDB
3✔
7484
        selfNode, err := channelGraph.SourceNode()
3✔
7485
        if err != nil {
3✔
7486
                return nil, err
×
7487
        }
×
7488

7489
        var feeReports []*lnrpc.ChannelFeeReport
3✔
7490
        err = channelGraph.ForEachNodeChannel(selfNode.PubKeyBytes,
3✔
7491
                func(_ kvdb.RTx, chanInfo *models.ChannelEdgeInfo,
3✔
7492
                        edgePolicy, _ *models.ChannelEdgePolicy) error {
6✔
7493

3✔
7494
                        // Self node should always have policies for its
3✔
7495
                        // channels.
3✔
7496
                        if edgePolicy == nil {
3✔
7497
                                return fmt.Errorf("no policy for outgoing "+
×
7498
                                        "channel %v ", chanInfo.ChannelID)
×
7499
                        }
×
7500

7501
                        // We'll compute the effective fee rate by converting
7502
                        // from a fixed point fee rate to a floating point fee
7503
                        // rate. The fee rate field in the database the amount
7504
                        // of mSAT charged per 1mil mSAT sent, so will divide by
7505
                        // this to get the proper fee rate.
7506
                        feeRateFixedPoint :=
3✔
7507
                                edgePolicy.FeeProportionalMillionths
3✔
7508
                        feeRate := float64(feeRateFixedPoint) / feeBase
3✔
7509

3✔
7510
                        // Decode inbound fee from extra data.
3✔
7511
                        var inboundFee lnwire.Fee
3✔
7512
                        _, err := edgePolicy.ExtraOpaqueData.ExtractRecords(
3✔
7513
                                &inboundFee,
3✔
7514
                        )
3✔
7515
                        if err != nil {
3✔
7516
                                return err
×
7517
                        }
×
7518

7519
                        // TODO(roasbeef): also add stats for revenue for each
7520
                        // channel
7521
                        feeReports = append(feeReports, &lnrpc.ChannelFeeReport{
3✔
7522
                                ChanId:       chanInfo.ChannelID,
3✔
7523
                                ChannelPoint: chanInfo.ChannelPoint.String(),
3✔
7524
                                BaseFeeMsat:  int64(edgePolicy.FeeBaseMSat),
3✔
7525
                                FeePerMil:    int64(feeRateFixedPoint),
3✔
7526
                                FeeRate:      feeRate,
3✔
7527

3✔
7528
                                InboundBaseFeeMsat: inboundFee.BaseFee,
3✔
7529
                                InboundFeePerMil:   inboundFee.FeeRate,
3✔
7530
                        })
3✔
7531

3✔
7532
                        return nil
3✔
7533
                },
7534
        )
7535
        if err != nil {
3✔
7536
                return nil, err
×
7537
        }
×
7538

7539
        fwdEventLog := r.server.miscDB.ForwardingLog()
3✔
7540

3✔
7541
        // computeFeeSum is a helper function that computes the total fees for
3✔
7542
        // a particular time slice described by a forwarding event query.
3✔
7543
        computeFeeSum := func(query channeldb.ForwardingEventQuery) (lnwire.MilliSatoshi, error) {
6✔
7544

3✔
7545
                var totalFees lnwire.MilliSatoshi
3✔
7546

3✔
7547
                // We'll continue to fetch the next query and accumulate the
3✔
7548
                // fees until the next query returns no events.
3✔
7549
                for {
6✔
7550
                        timeSlice, err := fwdEventLog.Query(query)
3✔
7551
                        if err != nil {
3✔
7552
                                return 0, err
×
7553
                        }
×
7554

7555
                        // If the timeslice is empty, then we'll return as
7556
                        // we've retrieved all the entries in this range.
7557
                        if len(timeSlice.ForwardingEvents) == 0 {
6✔
7558
                                break
3✔
7559
                        }
7560

7561
                        // Otherwise, we'll tally up an accumulate the total
7562
                        // fees for this time slice.
7563
                        for _, event := range timeSlice.ForwardingEvents {
6✔
7564
                                fee := event.AmtIn - event.AmtOut
3✔
7565
                                totalFees += fee
3✔
7566
                        }
3✔
7567

7568
                        // We'll now take the last offset index returned as
7569
                        // part of this response, and modify our query to start
7570
                        // at this index. This has a pagination effect in the
7571
                        // case that our query bounds has more than 100k
7572
                        // entries.
7573
                        query.IndexOffset = timeSlice.LastIndexOffset
3✔
7574
                }
7575

7576
                return totalFees, nil
3✔
7577
        }
7578

7579
        now := time.Now()
3✔
7580

3✔
7581
        // Before we perform the queries below, we'll instruct the switch to
3✔
7582
        // flush any pending events to disk. This ensure we get a complete
3✔
7583
        // snapshot at this particular time.
3✔
7584
        if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
3✔
7585
                return nil, fmt.Errorf("unable to flush forwarding "+
×
7586
                        "events: %v", err)
×
7587
        }
×
7588

7589
        // In addition to returning the current fee schedule for each channel.
7590
        // We'll also perform a series of queries to obtain the total fees
7591
        // earned over the past day, week, and month.
7592
        dayQuery := channeldb.ForwardingEventQuery{
3✔
7593
                StartTime:    now.Add(-time.Hour * 24),
3✔
7594
                EndTime:      now,
3✔
7595
                NumMaxEvents: 1000,
3✔
7596
        }
3✔
7597
        dayFees, err := computeFeeSum(dayQuery)
3✔
7598
        if err != nil {
3✔
7599
                return nil, fmt.Errorf("unable to retrieve day fees: %w", err)
×
7600
        }
×
7601

7602
        weekQuery := channeldb.ForwardingEventQuery{
3✔
7603
                StartTime:    now.Add(-time.Hour * 24 * 7),
3✔
7604
                EndTime:      now,
3✔
7605
                NumMaxEvents: 1000,
3✔
7606
        }
3✔
7607
        weekFees, err := computeFeeSum(weekQuery)
3✔
7608
        if err != nil {
3✔
7609
                return nil, fmt.Errorf("unable to retrieve day fees: %w", err)
×
7610
        }
×
7611

7612
        monthQuery := channeldb.ForwardingEventQuery{
3✔
7613
                StartTime:    now.Add(-time.Hour * 24 * 30),
3✔
7614
                EndTime:      now,
3✔
7615
                NumMaxEvents: 1000,
3✔
7616
        }
3✔
7617
        monthFees, err := computeFeeSum(monthQuery)
3✔
7618
        if err != nil {
3✔
7619
                return nil, fmt.Errorf("unable to retrieve day fees: %w", err)
×
7620
        }
×
7621

7622
        return &lnrpc.FeeReportResponse{
3✔
7623
                ChannelFees: feeReports,
3✔
7624
                DayFeeSum:   uint64(dayFees.ToSatoshis()),
3✔
7625
                WeekFeeSum:  uint64(weekFees.ToSatoshis()),
3✔
7626
                MonthFeeSum: uint64(monthFees.ToSatoshis()),
3✔
7627
        }, nil
3✔
7628
}
7629

7630
// minFeeRate is the smallest permitted fee rate within the network. This is
7631
// derived by the fact that fee rates are computed using a fixed point of
7632
// 1,000,000. As a result, the smallest representable fee rate is 1e-6, or
7633
// 0.000001, or 0.0001%.
7634
const minFeeRate = 1e-6
7635

7636
// UpdateChannelPolicy allows the caller to update the channel forwarding policy
7637
// for all channels globally, or a particular channel.
7638
func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
7639
        req *lnrpc.PolicyUpdateRequest) (*lnrpc.PolicyUpdateResponse, error) {
3✔
7640

3✔
7641
        var targetChans []wire.OutPoint
3✔
7642
        switch scope := req.Scope.(type) {
3✔
7643
        // If the request is targeting all active channels, then we don't need
7644
        // target any channels by their channel point.
7645
        case *lnrpc.PolicyUpdateRequest_Global:
3✔
7646

7647
        // Otherwise, we're targeting an individual channel by its channel
7648
        // point.
7649
        case *lnrpc.PolicyUpdateRequest_ChanPoint:
3✔
7650
                txid, err := lnrpc.GetChanPointFundingTxid(scope.ChanPoint)
3✔
7651
                if err != nil {
3✔
7652
                        return nil, err
×
7653
                }
×
7654
                targetChans = append(targetChans, wire.OutPoint{
3✔
7655
                        Hash:  *txid,
3✔
7656
                        Index: scope.ChanPoint.OutputIndex,
3✔
7657
                })
3✔
7658
        default:
×
7659
                return nil, fmt.Errorf("unknown scope: %v", scope)
×
7660
        }
7661

7662
        var feeRateFixed uint32
3✔
7663

3✔
7664
        switch {
3✔
7665
        // The request should use either the fee rate in percent, or the new
7666
        // ppm rate, but not both.
7667
        case req.FeeRate != 0 && req.FeeRatePpm != 0:
×
7668
                errMsg := "cannot set both FeeRate and FeeRatePpm at the " +
×
7669
                        "same time"
×
7670

×
7671
                return nil, status.Errorf(codes.InvalidArgument, errMsg)
×
7672

7673
        // If the request is using fee_rate.
7674
        case req.FeeRate != 0:
3✔
7675
                // As a sanity check, if the fee isn't zero, we'll ensure that
3✔
7676
                // the passed fee rate is below 1e-6, or the lowest allowed
3✔
7677
                // non-zero fee rate expressible within the protocol.
3✔
7678
                if req.FeeRate != 0 && req.FeeRate < minFeeRate {
3✔
7679
                        return nil, fmt.Errorf("fee rate of %v is too "+
×
7680
                                "small, min fee rate is %v", req.FeeRate,
×
7681
                                minFeeRate)
×
7682
                }
×
7683

7684
                // We'll also need to convert the floating point fee rate we
7685
                // accept over RPC to the fixed point rate that we use within
7686
                // the protocol. We do this by multiplying the passed fee rate
7687
                // by the fee base. This gives us the fixed point, scaled by 1
7688
                // million that's used within the protocol.
7689
                //
7690
                // Because of the inaccurate precision of the IEEE 754
7691
                // standard, we need to round the product of feerate and
7692
                // feebase.
7693
                feeRateFixed = uint32(math.Round(req.FeeRate * feeBase))
3✔
7694

7695
        // Otherwise, we use the fee_rate_ppm parameter.
7696
        case req.FeeRatePpm != 0:
3✔
7697
                feeRateFixed = req.FeeRatePpm
3✔
7698
        }
7699

7700
        // We'll also ensure that the user isn't setting a CLTV delta that
7701
        // won't give outgoing HTLCs enough time to fully resolve if needed.
7702
        if req.TimeLockDelta < minTimeLockDelta {
3✔
7703
                return nil, fmt.Errorf("time lock delta of %v is too small, "+
×
7704
                        "minimum supported is %v", req.TimeLockDelta,
×
7705
                        minTimeLockDelta)
×
7706
        } else if req.TimeLockDelta > uint32(MaxTimeLockDelta) {
3✔
7707
                return nil, fmt.Errorf("time lock delta of %v is too big, "+
×
7708
                        "maximum supported is %v", req.TimeLockDelta,
×
7709
                        MaxTimeLockDelta)
×
7710
        }
×
7711

7712
        // By default, positive inbound fees are rejected.
7713
        if !r.cfg.AcceptPositiveInboundFees && req.InboundFee != nil {
6✔
7714
                if req.InboundFee.BaseFeeMsat > 0 {
3✔
7715
                        return nil, fmt.Errorf("positive values for inbound "+
×
7716
                                "base fee msat are not supported: %v",
×
7717
                                req.InboundFee.BaseFeeMsat)
×
7718
                }
×
7719
                if req.InboundFee.FeeRatePpm > 0 {
3✔
7720
                        return nil, fmt.Errorf("positive values for inbound "+
×
7721
                                "fee rate ppm are not supported: %v",
×
7722
                                req.InboundFee.FeeRatePpm)
×
7723
                }
×
7724
        }
7725

7726
        // If no inbound fees have been specified, we indicate with an empty
7727
        // option that the previous inbound fee should be retained during the
7728
        // edge update.
7729
        inboundFee := fn.None[models.InboundFee]()
3✔
7730
        if req.InboundFee != nil {
6✔
7731
                inboundFee = fn.Some(models.InboundFee{
3✔
7732
                        Base: req.InboundFee.BaseFeeMsat,
3✔
7733
                        Rate: req.InboundFee.FeeRatePpm,
3✔
7734
                })
3✔
7735
        }
3✔
7736

7737
        baseFeeMsat := lnwire.MilliSatoshi(req.BaseFeeMsat)
3✔
7738
        feeSchema := routing.FeeSchema{
3✔
7739
                BaseFee:    baseFeeMsat,
3✔
7740
                FeeRate:    feeRateFixed,
3✔
7741
                InboundFee: inboundFee,
3✔
7742
        }
3✔
7743

3✔
7744
        maxHtlc := lnwire.MilliSatoshi(req.MaxHtlcMsat)
3✔
7745
        var minHtlc *lnwire.MilliSatoshi
3✔
7746
        if req.MinHtlcMsatSpecified {
3✔
7747
                min := lnwire.MilliSatoshi(req.MinHtlcMsat)
×
7748
                minHtlc = &min
×
7749
        }
×
7750

7751
        chanPolicy := routing.ChannelPolicy{
3✔
7752
                FeeSchema:     feeSchema,
3✔
7753
                TimeLockDelta: req.TimeLockDelta,
3✔
7754
                MaxHTLC:       maxHtlc,
3✔
7755
                MinHTLC:       minHtlc,
3✔
7756
        }
3✔
7757

3✔
7758
        rpcsLog.Debugf("[updatechanpolicy] updating channel policy "+
3✔
7759
                "base_fee=%v, rate_fixed=%v, time_lock_delta: %v, "+
3✔
7760
                "min_htlc=%v, max_htlc=%v, targets=%v",
3✔
7761
                req.BaseFeeMsat, feeRateFixed, req.TimeLockDelta,
3✔
7762
                minHtlc, maxHtlc,
3✔
7763
                spew.Sdump(targetChans))
3✔
7764

3✔
7765
        // With the scope resolved, we'll now send this to the local channel
3✔
7766
        // manager so it can propagate the new policy for our target channel(s).
3✔
7767
        failedUpdates, err := r.server.localChanMgr.UpdatePolicy(chanPolicy,
3✔
7768
                req.CreateMissingEdge, targetChans...)
3✔
7769
        if err != nil {
3✔
7770
                return nil, err
×
7771
        }
×
7772

7773
        return &lnrpc.PolicyUpdateResponse{
3✔
7774
                FailedUpdates: failedUpdates,
3✔
7775
        }, nil
3✔
7776
}
7777

7778
// ForwardingHistory allows the caller to query the htlcswitch for a record of
7779
// all HTLC's forwarded within the target time range, and integer offset within
7780
// that time range. If no time-range is specified, then the first chunk of the
7781
// past 24 hrs of forwarding history are returned.
7782

7783
// A list of forwarding events are returned. The size of each forwarding event
7784
// is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.
7785
// In order to safely stay under this max limit, we'll return 50k events per
7786
// response.  Each response has the index offset of the last entry. The index
7787
// offset can be provided to the request to allow the caller to skip a series
7788
// of records.
7789
func (r *rpcServer) ForwardingHistory(ctx context.Context,
7790
        req *lnrpc.ForwardingHistoryRequest) (*lnrpc.ForwardingHistoryResponse,
7791
        error) {
3✔
7792

3✔
7793
        // Before we perform the queries below, we'll instruct the switch to
3✔
7794
        // flush any pending events to disk. This ensure we get a complete
3✔
7795
        // snapshot at this particular time.
3✔
7796
        if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
3✔
7797
                return nil, fmt.Errorf("unable to flush forwarding "+
×
7798
                        "events: %v", err)
×
7799
        }
×
7800

7801
        var (
3✔
7802
                startTime, endTime time.Time
3✔
7803

3✔
7804
                numEvents uint32
3✔
7805
        )
3✔
7806

3✔
7807
        // startTime defaults to the Unix epoch (0 unixtime, or
3✔
7808
        // midnight 01-01-1970).
3✔
7809
        startTime = time.Unix(int64(req.StartTime), 0)
3✔
7810

3✔
7811
        // If the end time wasn't specified, assume a default end time of now.
3✔
7812
        if req.EndTime == 0 {
6✔
7813
                now := time.Now()
3✔
7814
                endTime = now
3✔
7815
        } else {
3✔
7816
                endTime = time.Unix(int64(req.EndTime), 0)
×
7817
        }
×
7818

7819
        // If the number of events wasn't specified, then we'll default to
7820
        // returning the last 100 events.
7821
        numEvents = req.NumMaxEvents
3✔
7822
        if numEvents == 0 {
6✔
7823
                numEvents = 100
3✔
7824
        }
3✔
7825

7826
        // Next, we'll map the proto request into a format that is understood by
7827
        // the forwarding log.
7828
        eventQuery := channeldb.ForwardingEventQuery{
3✔
7829
                StartTime:    startTime,
3✔
7830
                EndTime:      endTime,
3✔
7831
                IndexOffset:  req.IndexOffset,
3✔
7832
                NumMaxEvents: numEvents,
3✔
7833
        }
3✔
7834
        timeSlice, err := r.server.miscDB.ForwardingLog().Query(eventQuery)
3✔
7835
        if err != nil {
3✔
7836
                return nil, fmt.Errorf("unable to query forwarding log: %w",
×
7837
                        err)
×
7838
        }
×
7839

7840
        // chanToPeerAlias caches previously looked up channel information.
7841
        chanToPeerAlias := make(map[lnwire.ShortChannelID]string)
3✔
7842

3✔
7843
        // Helper function to extract a peer's node alias given its SCID.
3✔
7844
        getRemoteAlias := func(chanID lnwire.ShortChannelID) (string, error) {
6✔
7845
                // If we'd previously seen this chanID then return the cached
3✔
7846
                // peer alias.
3✔
7847
                if peerAlias, ok := chanToPeerAlias[chanID]; ok {
6✔
7848
                        return peerAlias, nil
3✔
7849
                }
3✔
7850

7851
                // Else call the server to look up the peer alias.
7852
                edge, err := r.GetChanInfo(ctx, &lnrpc.ChanInfoRequest{
3✔
7853
                        ChanId: chanID.ToUint64(),
3✔
7854
                })
3✔
7855
                if err != nil {
3✔
7856
                        return "", err
×
7857
                }
×
7858

7859
                remotePub := edge.Node1Pub
3✔
7860
                if r.selfNode.String() == edge.Node1Pub {
4✔
7861
                        remotePub = edge.Node2Pub
1✔
7862
                }
1✔
7863

7864
                vertex, err := route.NewVertexFromStr(remotePub)
3✔
7865
                if err != nil {
3✔
7866
                        return "", err
×
7867
                }
×
7868

7869
                peer, err := r.server.graphDB.FetchLightningNode(vertex)
3✔
7870
                if err != nil {
3✔
7871
                        return "", err
×
7872
                }
×
7873

7874
                // Cache the peer alias.
7875
                chanToPeerAlias[chanID] = peer.Alias
3✔
7876

3✔
7877
                return peer.Alias, nil
3✔
7878
        }
7879

7880
        // TODO(roasbeef): add settlement latency?
7881
        //  * use FPE on all records?
7882

7883
        // With the events retrieved, we'll now map them into the proper proto
7884
        // response.
7885
        //
7886
        // TODO(roasbeef): show in ns for the outside?
7887
        fwdingEvents := make(
3✔
7888
                []*lnrpc.ForwardingEvent, len(timeSlice.ForwardingEvents),
3✔
7889
        )
3✔
7890
        resp := &lnrpc.ForwardingHistoryResponse{
3✔
7891
                ForwardingEvents: fwdingEvents,
3✔
7892
                LastOffsetIndex:  timeSlice.LastIndexOffset,
3✔
7893
        }
3✔
7894
        for i, event := range timeSlice.ForwardingEvents {
6✔
7895
                amtInMsat := event.AmtIn
3✔
7896
                amtOutMsat := event.AmtOut
3✔
7897
                feeMsat := event.AmtIn - event.AmtOut
3✔
7898

3✔
7899
                resp.ForwardingEvents[i] = &lnrpc.ForwardingEvent{
3✔
7900
                        Timestamp:   uint64(event.Timestamp.Unix()),
3✔
7901
                        TimestampNs: uint64(event.Timestamp.UnixNano()),
3✔
7902
                        ChanIdIn:    event.IncomingChanID.ToUint64(),
3✔
7903
                        ChanIdOut:   event.OutgoingChanID.ToUint64(),
3✔
7904
                        AmtIn:       uint64(amtInMsat.ToSatoshis()),
3✔
7905
                        AmtOut:      uint64(amtOutMsat.ToSatoshis()),
3✔
7906
                        Fee:         uint64(feeMsat.ToSatoshis()),
3✔
7907
                        FeeMsat:     uint64(feeMsat),
3✔
7908
                        AmtInMsat:   uint64(amtInMsat),
3✔
7909
                        AmtOutMsat:  uint64(amtOutMsat),
3✔
7910
                }
3✔
7911

3✔
7912
                if req.PeerAliasLookup {
6✔
7913
                        aliasIn, err := getRemoteAlias(event.IncomingChanID)
3✔
7914
                        if err != nil {
3✔
7915
                                aliasIn = fmt.Sprintf("unable to lookup peer "+
×
7916
                                        "alias: %v", err)
×
7917
                        }
×
7918
                        aliasOut, err := getRemoteAlias(event.OutgoingChanID)
3✔
7919
                        if err != nil {
3✔
7920
                                aliasOut = fmt.Sprintf("unable to lookup peer"+
×
7921
                                        "alias: %v", err)
×
7922
                        }
×
7923
                        resp.ForwardingEvents[i].PeerAliasIn = aliasIn
3✔
7924
                        resp.ForwardingEvents[i].PeerAliasOut = aliasOut
3✔
7925
                }
7926
        }
7927

7928
        return resp, nil
3✔
7929
}
7930

7931
// ExportChannelBackup attempts to return an encrypted static channel backup
7932
// for the target channel identified by it channel point. The backup is
7933
// encrypted with a key generated from the aezeed seed of the user. The
7934
// returned backup can either be restored using the RestoreChannelBackup method
7935
// once lnd is running, or via the InitWallet and UnlockWallet methods from the
7936
// WalletUnlocker service.
7937
func (r *rpcServer) ExportChannelBackup(ctx context.Context,
7938
        in *lnrpc.ExportChannelBackupRequest) (*lnrpc.ChannelBackup, error) {
3✔
7939

3✔
7940
        // First, we'll convert the lnrpc channel point into a wire.OutPoint
3✔
7941
        // that we can manipulate.
3✔
7942
        txid, err := lnrpc.GetChanPointFundingTxid(in.ChanPoint)
3✔
7943
        if err != nil {
3✔
7944
                return nil, err
×
7945
        }
×
7946
        chanPoint := wire.OutPoint{
3✔
7947
                Hash:  *txid,
3✔
7948
                Index: in.ChanPoint.OutputIndex,
3✔
7949
        }
3✔
7950

3✔
7951
        // Next, we'll attempt to fetch a channel backup for this channel from
3✔
7952
        // the database. If this channel has been closed, or the outpoint is
3✔
7953
        // unknown, then we'll return an error
3✔
7954
        unpackedBackup, err := chanbackup.FetchBackupForChan(
3✔
7955
                chanPoint, r.server.chanStateDB, r.server.addrSource,
3✔
7956
        )
3✔
7957
        if err != nil {
3✔
7958
                return nil, err
×
7959
        }
×
7960

7961
        // At this point, we have an unpacked backup (plaintext) so we'll now
7962
        // attempt to serialize and encrypt it in order to create a packed
7963
        // backup.
7964
        packedBackups, err := chanbackup.PackStaticChanBackups(
3✔
7965
                []chanbackup.Single{*unpackedBackup},
3✔
7966
                r.server.cc.KeyRing,
3✔
7967
        )
3✔
7968
        if err != nil {
3✔
7969
                return nil, fmt.Errorf("packing of back ups failed: %w", err)
×
7970
        }
×
7971

7972
        // Before we proceed, we'll ensure that we received a backup for this
7973
        // channel, otherwise, we'll bail out.
7974
        packedBackup, ok := packedBackups[chanPoint]
3✔
7975
        if !ok {
3✔
7976
                return nil, fmt.Errorf("expected single backup for "+
×
7977
                        "ChannelPoint(%v), got %v", chanPoint,
×
7978
                        len(packedBackup))
×
7979
        }
×
7980

7981
        return &lnrpc.ChannelBackup{
3✔
7982
                ChanPoint:  in.ChanPoint,
3✔
7983
                ChanBackup: packedBackup,
3✔
7984
        }, nil
3✔
7985
}
7986

7987
// VerifyChanBackup allows a caller to verify the integrity of a channel backup
7988
// snapshot. This method will accept both either a packed Single or a packed
7989
// Multi. Specifying both will result in an error.
7990
func (r *rpcServer) VerifyChanBackup(ctx context.Context,
7991
        in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) {
3✔
7992

3✔
7993
        var (
3✔
7994
                channels []chanbackup.Single
3✔
7995
                err      error
3✔
7996
        )
3✔
7997
        switch {
3✔
7998
        // If neither a Single or Multi has been specified, then we have nothing
7999
        // to verify.
8000
        case in.GetSingleChanBackups() == nil && in.GetMultiChanBackup() == nil:
×
8001
                return nil, errors.New("either a Single or Multi channel " +
×
8002
                        "backup must be specified")
×
8003

8004
        // Either a Single or a Multi must be specified, but not both.
8005
        case in.GetSingleChanBackups() != nil && in.GetMultiChanBackup() != nil:
×
8006
                return nil, errors.New("either a Single or Multi channel " +
×
8007
                        "backup must be specified, but not both")
×
8008

8009
        // If a Single is specified then we'll only accept one of them to allow
8010
        // the caller to map the valid/invalid state for each individual Single.
8011
        case in.GetSingleChanBackups() != nil:
×
8012
                chanBackupsProtos := in.GetSingleChanBackups().ChanBackups
×
8013
                if len(chanBackupsProtos) != 1 {
×
8014
                        return nil, errors.New("only one Single is accepted " +
×
8015
                                "at a time")
×
8016
                }
×
8017

8018
                // First, we'll convert the raw byte slice into a type we can
8019
                // work with a bit better.
8020
                chanBackup := chanbackup.PackedSingles(
×
8021
                        [][]byte{chanBackupsProtos[0].ChanBackup},
×
8022
                )
×
8023

×
8024
                // With our PackedSingles created, we'll attempt to unpack the
×
8025
                // backup. If this fails, then we know the backup is invalid for
×
8026
                // some reason.
×
8027
                channels, err = chanBackup.Unpack(r.server.cc.KeyRing)
×
8028
                if err != nil {
×
8029
                        return nil, fmt.Errorf("invalid single channel "+
×
8030
                                "backup: %v", err)
×
8031
                }
×
8032

8033
        case in.GetMultiChanBackup() != nil:
3✔
8034
                // We'll convert the raw byte slice into a PackedMulti that we
3✔
8035
                // can easily work with.
3✔
8036
                packedMultiBackup := in.GetMultiChanBackup().MultiChanBackup
3✔
8037
                packedMulti := chanbackup.PackedMulti(packedMultiBackup)
3✔
8038

3✔
8039
                // We'll now attempt to unpack the Multi. If this fails, then we
3✔
8040
                // know it's invalid.
3✔
8041
                multi, err := packedMulti.Unpack(r.server.cc.KeyRing)
3✔
8042
                if err != nil {
3✔
8043
                        return nil, fmt.Errorf("invalid multi channel backup: "+
×
8044
                                "%v", err)
×
8045
                }
×
8046

8047
                channels = multi.StaticBackups
3✔
8048
        }
8049

8050
        return &lnrpc.VerifyChanBackupResponse{
3✔
8051
                ChanPoints: fn.Map(channels, func(c chanbackup.Single) string {
6✔
8052
                        return c.FundingOutpoint.String()
3✔
8053
                }),
3✔
8054
        }, nil
8055
}
8056

8057
// createBackupSnapshot converts the passed Single backup into a snapshot which
8058
// contains individual packed single backups, as well as a single packed multi
8059
// backup.
8060
func (r *rpcServer) createBackupSnapshot(backups []chanbackup.Single) (
8061
        *lnrpc.ChanBackupSnapshot, error) {
3✔
8062

3✔
8063
        // Once we have the set of back ups, we'll attempt to pack them all
3✔
8064
        // into a series of single channel backups.
3✔
8065
        singleChanPackedBackups, err := chanbackup.PackStaticChanBackups(
3✔
8066
                backups, r.server.cc.KeyRing,
3✔
8067
        )
3✔
8068
        if err != nil {
3✔
8069
                return nil, fmt.Errorf("unable to pack set of chan "+
×
8070
                        "backups: %v", err)
×
8071
        }
×
8072

8073
        // Now that we have our set of single packed backups, we'll morph that
8074
        // into a form that the proto response requires.
8075
        numBackups := len(singleChanPackedBackups)
3✔
8076
        singleBackupResp := &lnrpc.ChannelBackups{
3✔
8077
                ChanBackups: make([]*lnrpc.ChannelBackup, 0, numBackups),
3✔
8078
        }
3✔
8079
        for chanPoint, singlePackedBackup := range singleChanPackedBackups {
6✔
8080
                txid := chanPoint.Hash
3✔
8081
                rpcChanPoint := &lnrpc.ChannelPoint{
3✔
8082
                        FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
3✔
8083
                                FundingTxidBytes: txid[:],
3✔
8084
                        },
3✔
8085
                        OutputIndex: chanPoint.Index,
3✔
8086
                }
3✔
8087

3✔
8088
                singleBackupResp.ChanBackups = append(
3✔
8089
                        singleBackupResp.ChanBackups,
3✔
8090
                        &lnrpc.ChannelBackup{
3✔
8091
                                ChanPoint:  rpcChanPoint,
3✔
8092
                                ChanBackup: singlePackedBackup,
3✔
8093
                        },
3✔
8094
                )
3✔
8095
        }
3✔
8096

8097
        // In addition, to the set of single chan backups, we'll also create a
8098
        // single multi-channel backup which can be serialized into a single
8099
        // file for safe storage.
8100
        var b bytes.Buffer
3✔
8101
        unpackedMultiBackup := chanbackup.Multi{
3✔
8102
                StaticBackups: backups,
3✔
8103
        }
3✔
8104
        err = unpackedMultiBackup.PackToWriter(&b, r.server.cc.KeyRing)
3✔
8105
        if err != nil {
3✔
8106
                return nil, fmt.Errorf("unable to multi-pack backups: %w", err)
×
8107
        }
×
8108

8109
        multiBackupResp := &lnrpc.MultiChanBackup{
3✔
8110
                MultiChanBackup: b.Bytes(),
3✔
8111
        }
3✔
8112
        for _, singleBackup := range singleBackupResp.ChanBackups {
6✔
8113
                multiBackupResp.ChanPoints = append(
3✔
8114
                        multiBackupResp.ChanPoints, singleBackup.ChanPoint,
3✔
8115
                )
3✔
8116
        }
3✔
8117

8118
        return &lnrpc.ChanBackupSnapshot{
3✔
8119
                SingleChanBackups: singleBackupResp,
3✔
8120
                MultiChanBackup:   multiBackupResp,
3✔
8121
        }, nil
3✔
8122
}
8123

8124
// ExportAllChannelBackups returns static channel backups for all existing
8125
// channels known to lnd. A set of regular singular static channel backups for
8126
// each channel are returned. Additionally, a multi-channel backup is returned
8127
// as well, which contains a single encrypted blob containing the backups of
8128
// each channel.
8129
func (r *rpcServer) ExportAllChannelBackups(ctx context.Context,
8130
        in *lnrpc.ChanBackupExportRequest) (*lnrpc.ChanBackupSnapshot, error) {
3✔
8131

3✔
8132
        // First, we'll attempt to read back ups for ALL currently opened
3✔
8133
        // channels from disk.
3✔
8134
        allUnpackedBackups, err := chanbackup.FetchStaticChanBackups(
3✔
8135
                r.server.chanStateDB, r.server.addrSource,
3✔
8136
        )
3✔
8137
        if err != nil {
3✔
8138
                return nil, fmt.Errorf("unable to fetch all static chan "+
×
8139
                        "backups: %v", err)
×
8140
        }
×
8141

8142
        // With the backups assembled, we'll create a full snapshot.
8143
        return r.createBackupSnapshot(allUnpackedBackups)
3✔
8144
}
8145

8146
// RestoreChannelBackups accepts a set of singular channel backups, or a single
8147
// encrypted multi-chan backup and attempts to recover any funds remaining
8148
// within the channel. If we're able to unpack the backup, then the new channel
8149
// will be shown under listchannels, as well as pending channels.
8150
func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
8151
        in *lnrpc.RestoreChanBackupRequest) (*lnrpc.RestoreBackupResponse, error) {
3✔
8152

3✔
8153
        // The server hasn't yet started, so it won't be able to service any of
3✔
8154
        // our requests, so we'll bail early here.
3✔
8155
        if !r.server.Started() {
3✔
8156
                return nil, ErrServerNotActive
×
8157
        }
×
8158

8159
        // First, we'll make our implementation of the
8160
        // chanbackup.ChannelRestorer interface which we'll use to properly
8161
        // restore either a set of chanbackup.Single or chanbackup.Multi
8162
        // backups.
8163
        chanRestorer := &chanDBRestorer{
3✔
8164
                db:         r.server.chanStateDB,
3✔
8165
                secretKeys: r.server.cc.KeyRing,
3✔
8166
                chainArb:   r.server.chainArb,
3✔
8167
        }
3✔
8168

3✔
8169
        // We'll accept either a list of Single backups, or a single Multi
3✔
8170
        // backup which contains several single backups.
3✔
8171
        var (
3✔
8172
                numRestored int
3✔
8173
                err         error
3✔
8174
        )
3✔
8175
        switch {
3✔
8176
        case in.GetChanBackups() != nil:
×
8177
                chanBackupsProtos := in.GetChanBackups()
×
8178

×
8179
                // Now that we know what type of backup we're working with,
×
8180
                // we'll parse them all out into a more suitable format.
×
8181
                packedBackups := make([][]byte, 0, len(chanBackupsProtos.ChanBackups))
×
8182
                for _, chanBackup := range chanBackupsProtos.ChanBackups {
×
8183
                        packedBackups = append(
×
8184
                                packedBackups, chanBackup.ChanBackup,
×
8185
                        )
×
8186
                }
×
8187

8188
                // With our backups obtained, we'll now restore them which will
8189
                // write the new backups to disk, and then attempt to connect
8190
                // out to any peers that we know of which were our prior
8191
                // channel peers.
8192
                numRestored, err = chanbackup.UnpackAndRecoverSingles(
×
8193
                        chanbackup.PackedSingles(packedBackups),
×
8194
                        r.server.cc.KeyRing, chanRestorer, r.server,
×
8195
                )
×
8196
                if err != nil {
×
8197
                        return nil, fmt.Errorf("unable to unpack single "+
×
8198
                                "backups: %v", err)
×
8199
                }
×
8200

8201
        case in.GetMultiChanBackup() != nil:
3✔
8202
                packedMultiBackup := in.GetMultiChanBackup()
3✔
8203

3✔
8204
                // With our backups obtained, we'll now restore them which will
3✔
8205
                // write the new backups to disk, and then attempt to connect
3✔
8206
                // out to any peers that we know of which were our prior
3✔
8207
                // channel peers.
3✔
8208
                packedMulti := chanbackup.PackedMulti(packedMultiBackup)
3✔
8209
                numRestored, err = chanbackup.UnpackAndRecoverMulti(
3✔
8210
                        packedMulti, r.server.cc.KeyRing, chanRestorer,
3✔
8211
                        r.server,
3✔
8212
                )
3✔
8213
                if err != nil {
3✔
8214
                        return nil, fmt.Errorf("unable to unpack chan "+
×
8215
                                "backup: %v", err)
×
8216
                }
×
8217
        }
8218

8219
        return &lnrpc.RestoreBackupResponse{
3✔
8220
                NumRestored: uint32(numRestored),
3✔
8221
        }, nil
3✔
8222
}
8223

8224
// SubscribeChannelBackups allows a client to sub-subscribe to the most up to
8225
// date information concerning the state of all channel back ups. Each time a
8226
// new channel is added, we return the new set of channels, along with a
8227
// multi-chan backup containing the backup info for all channels. Each time a
8228
// channel is closed, we send a new update, which contains new new chan back
8229
// ups, but the updated set of encrypted multi-chan backups with the closed
8230
// channel(s) removed.
8231
func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription,
8232
        updateStream lnrpc.Lightning_SubscribeChannelBackupsServer) error {
3✔
8233

3✔
8234
        // First, we'll subscribe to the primary channel notifier so we can
3✔
8235
        // obtain events for new pending/opened/closed channels.
3✔
8236
        chanSubscription, err := r.server.channelNotifier.SubscribeChannelEvents()
3✔
8237
        if err != nil {
3✔
8238
                return err
×
8239
        }
×
8240

8241
        defer chanSubscription.Cancel()
3✔
8242
        for {
6✔
8243
                select {
3✔
8244
                // A new event has been sent by the channel notifier, we'll
8245
                // assemble, then sling out a new event to the client.
8246
                case e := <-chanSubscription.Updates():
3✔
8247
                        // TODO(roasbeef): batch dispatch ntnfs
3✔
8248

3✔
8249
                        switch e.(type) {
3✔
8250

8251
                        // We only care about new/closed channels, so we'll
8252
                        // skip any events for active/inactive channels.
8253
                        // To make the subscription behave the same way as the
8254
                        // synchronous call and the file based backup, we also
8255
                        // include pending channels in the update.
8256
                        case channelnotifier.ActiveChannelEvent:
3✔
8257
                                continue
3✔
8258
                        case channelnotifier.InactiveChannelEvent:
3✔
8259
                                continue
3✔
8260
                        case channelnotifier.ActiveLinkEvent:
3✔
8261
                                continue
3✔
8262
                        case channelnotifier.InactiveLinkEvent:
3✔
8263
                                continue
3✔
8264
                        }
8265

8266
                        // Now that we know the channel state has changed,
8267
                        // we'll obtains the current set of single channel
8268
                        // backups from disk.
8269
                        chanBackups, err := chanbackup.FetchStaticChanBackups(
3✔
8270
                                r.server.chanStateDB, r.server.addrSource,
3✔
8271
                        )
3✔
8272
                        if err != nil {
3✔
8273
                                return fmt.Errorf("unable to fetch all "+
×
8274
                                        "static chan backups: %v", err)
×
8275
                        }
×
8276

8277
                        // With our backups obtained, we'll pack them into a
8278
                        // snapshot and send them back to the client.
8279
                        backupSnapshot, err := r.createBackupSnapshot(
3✔
8280
                                chanBackups,
3✔
8281
                        )
3✔
8282
                        if err != nil {
3✔
8283
                                return err
×
8284
                        }
×
8285
                        err = updateStream.Send(backupSnapshot)
3✔
8286
                        if err != nil {
3✔
8287
                                return err
×
8288
                        }
×
8289

8290
                // The response stream's context for whatever reason has been
8291
                // closed. If context is closed by an exceeded deadline we will
8292
                // return an error.
8293
                case <-updateStream.Context().Done():
3✔
8294
                        if errors.Is(updateStream.Context().Err(), context.Canceled) {
6✔
8295
                                return nil
3✔
8296
                        }
3✔
8297
                        return updateStream.Context().Err()
×
8298

8299
                case <-r.quit:
×
8300
                        return nil
×
8301
                }
8302
        }
8303
}
8304

8305
// ChannelAcceptor dispatches a bi-directional streaming RPC in which
8306
// OpenChannel requests are sent to the client and the client responds with
8307
// a boolean that tells LND whether or not to accept the channel. This allows
8308
// node operators to specify their own criteria for accepting inbound channels
8309
// through a single persistent connection.
8310
func (r *rpcServer) ChannelAcceptor(stream lnrpc.Lightning_ChannelAcceptorServer) error {
3✔
8311
        chainedAcceptor := r.chanPredicate
3✔
8312

3✔
8313
        // Create a new RPCAcceptor which will send requests into the
3✔
8314
        // newRequests channel when it receives them.
3✔
8315
        rpcAcceptor := chanacceptor.NewRPCAcceptor(
3✔
8316
                stream.Recv, stream.Send, r.cfg.AcceptorTimeout,
3✔
8317
                r.cfg.ActiveNetParams.Params, r.quit,
3✔
8318
        )
3✔
8319

3✔
8320
        // Add the RPCAcceptor to the ChainedAcceptor and defer its removal.
3✔
8321
        id := chainedAcceptor.AddAcceptor(rpcAcceptor)
3✔
8322
        defer chainedAcceptor.RemoveAcceptor(id)
3✔
8323

3✔
8324
        // Run the rpc acceptor, which will accept requests for channel
3✔
8325
        // acceptance decisions from our chained acceptor, send them to the
3✔
8326
        // channel acceptor and listen for and report responses. This function
3✔
8327
        // blocks, and will exit if the rpcserver receives the instruction to
3✔
8328
        // shutdown, or the client cancels.
3✔
8329
        return rpcAcceptor.Run()
3✔
8330
}
3✔
8331

8332
// BakeMacaroon allows the creation of a new macaroon with custom read and write
8333
// permissions. No first-party caveats are added since this can be done offline.
8334
// If the --allow-external-permissions flag is set, the RPC will allow
8335
// external permissions that LND is not aware of.
8336
func (r *rpcServer) BakeMacaroon(ctx context.Context,
8337
        req *lnrpc.BakeMacaroonRequest) (*lnrpc.BakeMacaroonResponse, error) {
3✔
8338

3✔
8339
        // If the --no-macaroons flag is used to start lnd, the macaroon service
3✔
8340
        // is not initialized. Therefore we can't bake new macaroons.
3✔
8341
        if r.macService == nil {
3✔
8342
                return nil, errMacaroonDisabled
×
8343
        }
×
8344

8345
        helpMsg := fmt.Sprintf("supported actions are %v, supported entities "+
3✔
8346
                "are %v", validActions, validEntities)
3✔
8347

3✔
8348
        // Don't allow empty permission list as it doesn't make sense to have
3✔
8349
        // a macaroon that is not allowed to access any RPC.
3✔
8350
        if len(req.Permissions) == 0 {
6✔
8351
                return nil, fmt.Errorf("permission list cannot be empty. "+
3✔
8352
                        "specify at least one action/entity pair. %s", helpMsg)
3✔
8353
        }
3✔
8354

8355
        // Validate and map permission struct used by gRPC to the one used by
8356
        // the bakery. If the --allow-external-permissions flag is set, we
8357
        // will not validate, but map.
8358
        requestedPermissions := make([]bakery.Op, len(req.Permissions))
3✔
8359
        for idx, op := range req.Permissions {
6✔
8360
                if req.AllowExternalPermissions {
6✔
8361
                        requestedPermissions[idx] = bakery.Op{
3✔
8362
                                Entity: op.Entity,
3✔
8363
                                Action: op.Action,
3✔
8364
                        }
3✔
8365
                        continue
3✔
8366
                }
8367

8368
                if !stringInSlice(op.Entity, validEntities) {
6✔
8369
                        return nil, fmt.Errorf("invalid permission entity. %s",
3✔
8370
                                helpMsg)
3✔
8371
                }
3✔
8372

8373
                // Either we have the special entity "uri" which specifies a
8374
                // full gRPC URI or we have one of the pre-defined actions.
8375
                if op.Entity == macaroons.PermissionEntityCustomURI {
6✔
8376
                        allPermissions := r.interceptorChain.Permissions()
3✔
8377
                        _, ok := allPermissions[op.Action]
3✔
8378
                        if !ok {
3✔
8379
                                return nil, fmt.Errorf("invalid permission " +
×
8380
                                        "action, must be an existing URI in " +
×
8381
                                        "the format /package.Service/" +
×
8382
                                        "MethodName")
×
8383
                        }
×
8384
                } else if !stringInSlice(op.Action, validActions) {
6✔
8385
                        return nil, fmt.Errorf("invalid permission action. %s",
3✔
8386
                                helpMsg)
3✔
8387
                }
3✔
8388

8389
                requestedPermissions[idx] = bakery.Op{
3✔
8390
                        Entity: op.Entity,
3✔
8391
                        Action: op.Action,
3✔
8392
                }
3✔
8393
        }
8394

8395
        // Convert root key id from uint64 to bytes. Because the
8396
        // DefaultRootKeyID is a digit 0 expressed in a byte slice of a string
8397
        // "0", we will keep the IDs in the same format - all must be numeric,
8398
        // and must be a byte slice of string value of the digit, e.g.,
8399
        // uint64(123) to string(123).
8400
        rootKeyID := []byte(strconv.FormatUint(req.RootKeyId, 10))
3✔
8401

3✔
8402
        // Bake new macaroon with the given permissions and send it binary
3✔
8403
        // serialized and hex encoded to the client.
3✔
8404
        newMac, err := r.macService.NewMacaroon(
3✔
8405
                ctx, rootKeyID, requestedPermissions...,
3✔
8406
        )
3✔
8407
        if err != nil {
3✔
8408
                return nil, err
×
8409
        }
×
8410
        newMacBytes, err := newMac.M().MarshalBinary()
3✔
8411
        if err != nil {
3✔
8412
                return nil, err
×
8413
        }
×
8414
        resp := &lnrpc.BakeMacaroonResponse{}
3✔
8415
        resp.Macaroon = hex.EncodeToString(newMacBytes)
3✔
8416

3✔
8417
        return resp, nil
3✔
8418
}
8419

8420
// ListMacaroonIDs returns a list of macaroon root key IDs in use.
8421
func (r *rpcServer) ListMacaroonIDs(ctx context.Context,
8422
        req *lnrpc.ListMacaroonIDsRequest) (
8423
        *lnrpc.ListMacaroonIDsResponse, error) {
3✔
8424

3✔
8425
        // If the --no-macaroons flag is used to start lnd, the macaroon service
3✔
8426
        // is not initialized. Therefore we can't show any IDs.
3✔
8427
        if r.macService == nil {
3✔
8428
                return nil, errMacaroonDisabled
×
8429
        }
×
8430

8431
        rootKeyIDByteSlice, err := r.macService.ListMacaroonIDs(ctx)
3✔
8432
        if err != nil {
3✔
8433
                return nil, err
×
8434
        }
×
8435

8436
        var rootKeyIDs []uint64
3✔
8437
        for _, value := range rootKeyIDByteSlice {
6✔
8438
                // Convert bytes into uint64.
3✔
8439
                id, err := strconv.ParseUint(string(value), 10, 64)
3✔
8440
                if err != nil {
3✔
8441
                        return nil, err
×
8442
                }
×
8443

8444
                rootKeyIDs = append(rootKeyIDs, id)
3✔
8445
        }
8446

8447
        return &lnrpc.ListMacaroonIDsResponse{RootKeyIds: rootKeyIDs}, nil
3✔
8448
}
8449

8450
// DeleteMacaroonID removes a specific macaroon ID.
8451
func (r *rpcServer) DeleteMacaroonID(ctx context.Context,
8452
        req *lnrpc.DeleteMacaroonIDRequest) (
8453
        *lnrpc.DeleteMacaroonIDResponse, error) {
3✔
8454

3✔
8455
        // If the --no-macaroons flag is used to start lnd, the macaroon service
3✔
8456
        // is not initialized. Therefore we can't delete any IDs.
3✔
8457
        if r.macService == nil {
3✔
8458
                return nil, errMacaroonDisabled
×
8459
        }
×
8460

8461
        // Convert root key id from uint64 to bytes. Because the
8462
        // DefaultRootKeyID is a digit 0 expressed in a byte slice of a string
8463
        // "0", we will keep the IDs in the same format - all must be digit, and
8464
        // must be a byte slice of string value of the digit.
8465
        rootKeyID := []byte(strconv.FormatUint(req.RootKeyId, 10))
3✔
8466
        deletedIDBytes, err := r.macService.DeleteMacaroonID(ctx, rootKeyID)
3✔
8467
        if err != nil {
6✔
8468
                return nil, err
3✔
8469
        }
3✔
8470

8471
        return &lnrpc.DeleteMacaroonIDResponse{
3✔
8472
                // If the root key ID doesn't exist, it won't be deleted. We
3✔
8473
                // will return a response with deleted = false, otherwise true.
3✔
8474
                Deleted: deletedIDBytes != nil,
3✔
8475
        }, nil
3✔
8476
}
8477

8478
// ListPermissions lists all RPC method URIs and their required macaroon
8479
// permissions to access them.
8480
func (r *rpcServer) ListPermissions(_ context.Context,
8481
        _ *lnrpc.ListPermissionsRequest) (*lnrpc.ListPermissionsResponse,
8482
        error) {
3✔
8483

3✔
8484
        permissionMap := make(map[string]*lnrpc.MacaroonPermissionList)
3✔
8485
        for uri, perms := range r.interceptorChain.Permissions() {
6✔
8486
                rpcPerms := make([]*lnrpc.MacaroonPermission, len(perms))
3✔
8487
                for idx, perm := range perms {
6✔
8488
                        rpcPerms[idx] = &lnrpc.MacaroonPermission{
3✔
8489
                                Entity: perm.Entity,
3✔
8490
                                Action: perm.Action,
3✔
8491
                        }
3✔
8492
                }
3✔
8493
                permissionMap[uri] = &lnrpc.MacaroonPermissionList{
3✔
8494
                        Permissions: rpcPerms,
3✔
8495
                }
3✔
8496
        }
8497

8498
        return &lnrpc.ListPermissionsResponse{
3✔
8499
                MethodPermissions: permissionMap,
3✔
8500
        }, nil
3✔
8501
}
8502

8503
// CheckMacaroonPermissions checks the caveats and permissions of a macaroon.
8504
func (r *rpcServer) CheckMacaroonPermissions(ctx context.Context,
8505
        req *lnrpc.CheckMacPermRequest) (*lnrpc.CheckMacPermResponse, error) {
3✔
8506

3✔
8507
        // Turn grpc macaroon permission into bakery.Op for the server to
3✔
8508
        // process.
3✔
8509
        permissions := make([]bakery.Op, len(req.Permissions))
3✔
8510
        for idx, perm := range req.Permissions {
6✔
8511
                permissions[idx] = bakery.Op{
3✔
8512
                        Entity: perm.Entity,
3✔
8513
                        Action: perm.Action,
3✔
8514
                }
3✔
8515
        }
3✔
8516

8517
        err := r.macService.CheckMacAuth(
3✔
8518
                ctx, req.Macaroon, permissions, req.FullMethod,
3✔
8519
        )
3✔
8520
        if err != nil {
6✔
8521
                return nil, status.Error(codes.InvalidArgument, err.Error())
3✔
8522
        }
3✔
8523

8524
        return &lnrpc.CheckMacPermResponse{
3✔
8525
                Valid: true,
3✔
8526
        }, nil
3✔
8527
}
8528

8529
// FundingStateStep is an advanced funding related call that allows the caller
8530
// to either execute some preparatory steps for a funding workflow, or manually
8531
// progress a funding workflow. The primary way a funding flow is identified is
8532
// via its pending channel ID. As an example, this method can be used to
8533
// specify that we're expecting a funding flow for a particular pending channel
8534
// ID, for which we need to use specific parameters.  Alternatively, this can
8535
// be used to interactively drive PSBT signing for funding for partially
8536
// complete funding transactions.
8537
func (r *rpcServer) FundingStateStep(ctx context.Context,
8538
        in *lnrpc.FundingTransitionMsg) (*lnrpc.FundingStateStepResp, error) {
3✔
8539

3✔
8540
        var pendingChanID [32]byte
3✔
8541
        switch {
3✔
8542
        // If this is a message to register a new shim that is an external
8543
        // channel point, then we'll contact the wallet to register this new
8544
        // shim. A user will use this method to register a new channel funding
8545
        // workflow which has already been partially negotiated outside of the
8546
        // core protocol.
8547
        case in.GetShimRegister() != nil &&
8548
                in.GetShimRegister().GetChanPointShim() != nil:
3✔
8549

3✔
8550
                rpcShimIntent := in.GetShimRegister().GetChanPointShim()
3✔
8551

3✔
8552
                // Using the rpc shim as a template, we'll construct a new
3✔
8553
                // chanfunding.Assembler that is able to express proper
3✔
8554
                // formulation of this expected channel.
3✔
8555
                shimAssembler, err := newFundingShimAssembler(
3✔
8556
                        rpcShimIntent, false, r.server.cc.KeyRing,
3✔
8557
                )
3✔
8558
                if err != nil {
3✔
8559
                        return nil, err
×
8560
                }
×
8561
                req := &chanfunding.Request{
3✔
8562
                        RemoteAmt: btcutil.Amount(rpcShimIntent.Amt),
3✔
8563
                }
3✔
8564
                shimIntent, err := shimAssembler.ProvisionChannel(req)
3✔
8565
                if err != nil {
3✔
8566
                        return nil, err
×
8567
                }
×
8568

8569
                // Once we have the intent, we'll register it with the wallet.
8570
                // Once we receive an incoming funding request that uses this
8571
                // pending channel ID, then this shim will be dispatched in
8572
                // place of our regular funding workflow.
8573
                copy(pendingChanID[:], rpcShimIntent.PendingChanId)
3✔
8574
                err = r.server.cc.Wallet.RegisterFundingIntent(
3✔
8575
                        pendingChanID, shimIntent,
3✔
8576
                )
3✔
8577
                if err != nil {
6✔
8578
                        return nil, err
3✔
8579
                }
3✔
8580

8581
        // There is no need to register a PSBT shim before opening the channel,
8582
        // even though our RPC message structure allows for it. Inform the user
8583
        // by returning a proper error instead of just doing nothing.
8584
        case in.GetShimRegister() != nil &&
8585
                in.GetShimRegister().GetPsbtShim() != nil:
×
8586

×
8587
                return nil, fmt.Errorf("PSBT shim must only be sent when " +
×
8588
                        "opening a channel")
×
8589

8590
        // If this is a transition to cancel an existing shim, then we'll pass
8591
        // this message along to the wallet, informing it that the intent no
8592
        // longer needs to be considered and should be cleaned up.
8593
        case in.GetShimCancel() != nil:
×
8594
                rpcsLog.Debugf("Canceling funding shim for pending_id=%x",
×
8595
                        in.GetShimCancel().PendingChanId)
×
8596

×
8597
                copy(pendingChanID[:], in.GetShimCancel().PendingChanId)
×
8598
                err := r.server.cc.Wallet.CancelFundingIntent(pendingChanID)
×
8599
                if err != nil {
×
8600
                        return nil, err
×
8601
                }
×
8602

8603
        // If this is a transition to verify the PSBT for an existing shim,
8604
        // we'll do so and then store the verified PSBT for later so we can
8605
        // compare it to the final, signed one.
8606
        case in.GetPsbtVerify() != nil:
3✔
8607
                rpcsLog.Debugf("Verifying PSBT for pending_id=%x",
3✔
8608
                        in.GetPsbtVerify().PendingChanId)
3✔
8609

3✔
8610
                copy(pendingChanID[:], in.GetPsbtVerify().PendingChanId)
3✔
8611
                packet, err := psbt.NewFromRawBytes(
3✔
8612
                        bytes.NewReader(in.GetPsbtVerify().FundedPsbt), false,
3✔
8613
                )
3✔
8614
                if err != nil {
3✔
8615
                        return nil, fmt.Errorf("error parsing psbt: %w", err)
×
8616
                }
×
8617

8618
                err = r.server.cc.Wallet.PsbtFundingVerify(
3✔
8619
                        pendingChanID, packet, in.GetPsbtVerify().SkipFinalize,
3✔
8620
                )
3✔
8621
                if err != nil {
3✔
8622
                        return nil, err
×
8623
                }
×
8624

8625
        // If this is a transition to finalize the PSBT funding flow, we compare
8626
        // the final PSBT to the previously verified one and if nothing
8627
        // unexpected was changed, continue the channel opening process.
8628
        case in.GetPsbtFinalize() != nil:
3✔
8629
                msg := in.GetPsbtFinalize()
3✔
8630
                rpcsLog.Debugf("Finalizing PSBT for pending_id=%x",
3✔
8631
                        msg.PendingChanId)
3✔
8632

3✔
8633
                copy(pendingChanID[:], in.GetPsbtFinalize().PendingChanId)
3✔
8634

3✔
8635
                var (
3✔
8636
                        packet *psbt.Packet
3✔
8637
                        rawTx  *wire.MsgTx
3✔
8638
                        err    error
3✔
8639
                )
3✔
8640

3✔
8641
                // Either the signed PSBT or the raw transaction need to be set
3✔
8642
                // but not both at the same time.
3✔
8643
                switch {
3✔
8644
                case len(msg.SignedPsbt) > 0 && len(msg.FinalRawTx) > 0:
×
8645
                        return nil, fmt.Errorf("cannot set both signed PSBT " +
×
8646
                                "and final raw TX at the same time")
×
8647

8648
                case len(msg.SignedPsbt) > 0:
3✔
8649
                        packet, err = psbt.NewFromRawBytes(
3✔
8650
                                bytes.NewReader(in.GetPsbtFinalize().SignedPsbt),
3✔
8651
                                false,
3✔
8652
                        )
3✔
8653
                        if err != nil {
3✔
8654
                                return nil, fmt.Errorf("error parsing psbt: %w",
×
8655
                                        err)
×
8656
                        }
×
8657

8658
                case len(msg.FinalRawTx) > 0:
3✔
8659
                        rawTx = &wire.MsgTx{}
3✔
8660
                        err = rawTx.Deserialize(bytes.NewReader(msg.FinalRawTx))
3✔
8661
                        if err != nil {
3✔
8662
                                return nil, fmt.Errorf("error parsing final "+
×
8663
                                        "raw TX: %v", err)
×
8664
                        }
×
8665

8666
                default:
×
8667
                        return nil, fmt.Errorf("PSBT or raw transaction to " +
×
8668
                                "finalize missing")
×
8669
                }
8670

8671
                err = r.server.cc.Wallet.PsbtFundingFinalize(
3✔
8672
                        pendingChanID, packet, rawTx,
3✔
8673
                )
3✔
8674
                if err != nil {
3✔
8675
                        return nil, err
×
8676
                }
×
8677
        }
8678

8679
        // TODO(roasbeef): extend PendingChannels to also show shims
8680

8681
        // TODO(roasbeef): return resulting state? also add a method to query
8682
        // current state?
8683
        return &lnrpc.FundingStateStepResp{}, nil
3✔
8684
}
8685

8686
// RegisterRPCMiddleware adds a new gRPC middleware to the interceptor chain. A
8687
// gRPC middleware is software component external to lnd that aims to add
8688
// additional business logic to lnd by observing/intercepting/validating
8689
// incoming gRPC client requests and (if needed) replacing/overwriting outgoing
8690
// messages before they're sent to the client. When registering the middleware
8691
// must identify itself and indicate what custom macaroon caveats it wants to
8692
// be responsible for. Only requests that contain a macaroon with that specific
8693
// custom caveat are then sent to the middleware for inspection. As a security
8694
// measure, _no_ middleware can intercept requests made with _unencumbered_
8695
// macaroons!
8696
func (r *rpcServer) RegisterRPCMiddleware(
8697
        stream lnrpc.Lightning_RegisterRPCMiddlewareServer) error {
3✔
8698

3✔
8699
        // This is a security critical functionality and needs to be enabled
3✔
8700
        // specifically by the user.
3✔
8701
        if !r.cfg.RPCMiddleware.Enable {
3✔
8702
                return fmt.Errorf("RPC middleware not enabled in config")
×
8703
        }
×
8704

8705
        // When registering a middleware the first message being sent from the
8706
        // middleware must be a registration message containing its name and the
8707
        // custom caveat it wants to register for.
8708
        var (
3✔
8709
                registerChan     = make(chan *lnrpc.MiddlewareRegistration, 1)
3✔
8710
                registerDoneChan = make(chan struct{})
3✔
8711
                errChan          = make(chan error, 1)
3✔
8712
        )
3✔
8713
        ctxc, cancel := context.WithTimeout(
3✔
8714
                stream.Context(), r.cfg.RPCMiddleware.InterceptTimeout,
3✔
8715
        )
3✔
8716
        defer cancel()
3✔
8717

3✔
8718
        // Read the first message in a goroutine because the Recv method blocks
3✔
8719
        // until the message arrives.
3✔
8720
        go func() {
6✔
8721
                msg, err := stream.Recv()
3✔
8722
                if err != nil {
3✔
8723
                        errChan <- err
×
8724

×
8725
                        return
×
8726
                }
×
8727

8728
                registerChan <- msg.GetRegister()
3✔
8729
        }()
8730

8731
        // Wait for the initial message to arrive or time out if it takes too
8732
        // long.
8733
        var registerMsg *lnrpc.MiddlewareRegistration
3✔
8734
        select {
3✔
8735
        case registerMsg = <-registerChan:
3✔
8736
                if registerMsg == nil {
3✔
8737
                        return fmt.Errorf("invalid initial middleware " +
×
8738
                                "registration message")
×
8739
                }
×
8740

8741
        case err := <-errChan:
×
8742
                return fmt.Errorf("error receiving initial middleware "+
×
8743
                        "registration message: %v", err)
×
8744

8745
        case <-ctxc.Done():
×
8746
                return ctxc.Err()
×
8747

8748
        case <-r.quit:
×
8749
                return ErrServerShuttingDown
×
8750
        }
8751

8752
        // Make sure the registration is valid.
8753
        const nameMinLength = 5
3✔
8754
        if len(registerMsg.MiddlewareName) < nameMinLength {
6✔
8755
                return fmt.Errorf("invalid middleware name, use descriptive "+
3✔
8756
                        "name of at least %d characters", nameMinLength)
3✔
8757
        }
3✔
8758

8759
        readOnly := registerMsg.ReadOnlyMode
3✔
8760
        caveatName := registerMsg.CustomMacaroonCaveatName
3✔
8761
        switch {
3✔
8762
        case readOnly && len(caveatName) > 0:
3✔
8763
                return fmt.Errorf("cannot set read-only and custom caveat " +
3✔
8764
                        "name at the same time")
3✔
8765

8766
        case !readOnly && len(caveatName) < nameMinLength:
3✔
8767
                return fmt.Errorf("need to set either custom caveat name "+
3✔
8768
                        "of at least %d characters or read-only mode",
3✔
8769
                        nameMinLength)
3✔
8770
        }
8771

8772
        middleware := rpcperms.NewMiddlewareHandler(
3✔
8773
                registerMsg.MiddlewareName,
3✔
8774
                caveatName, readOnly, stream.Recv, stream.Send,
3✔
8775
                r.cfg.RPCMiddleware.InterceptTimeout,
3✔
8776
                r.cfg.ActiveNetParams.Params, r.quit,
3✔
8777
        )
3✔
8778

3✔
8779
        // Add the RPC middleware to the interceptor chain and defer its
3✔
8780
        // removal.
3✔
8781
        if err := r.interceptorChain.RegisterMiddleware(middleware); err != nil {
3✔
8782
                return fmt.Errorf("error registering middleware: %w", err)
×
8783
        }
×
8784
        defer r.interceptorChain.RemoveMiddleware(registerMsg.MiddlewareName)
3✔
8785

3✔
8786
        // Send a message to the client to indicate that the registration has
3✔
8787
        // successfully completed.
3✔
8788
        regCompleteMsg := &lnrpc.RPCMiddlewareRequest{
3✔
8789
                InterceptType: &lnrpc.RPCMiddlewareRequest_RegComplete{
3✔
8790
                        RegComplete: true,
3✔
8791
                },
3✔
8792
        }
3✔
8793

3✔
8794
        // Send the message in a goroutine because the Send method blocks until
3✔
8795
        // the message is read by the client.
3✔
8796
        go func() {
6✔
8797
                err := stream.Send(regCompleteMsg)
3✔
8798
                if err != nil {
3✔
8799
                        errChan <- err
×
8800
                        return
×
8801
                }
×
8802

8803
                close(registerDoneChan)
3✔
8804
        }()
8805

8806
        select {
3✔
8807
        case err := <-errChan:
×
8808
                return fmt.Errorf("error sending middleware registration "+
×
8809
                        "complete message: %v", err)
×
8810

8811
        case <-ctxc.Done():
×
8812
                return ctxc.Err()
×
8813

8814
        case <-r.quit:
×
8815
                return ErrServerShuttingDown
×
8816

8817
        case <-registerDoneChan:
3✔
8818
        }
8819

8820
        return middleware.Run()
3✔
8821
}
8822

8823
// SendCustomMessage sends a custom peer message.
8824
func (r *rpcServer) SendCustomMessage(_ context.Context,
8825
        req *lnrpc.SendCustomMessageRequest) (*lnrpc.SendCustomMessageResponse,
8826
        error) {
3✔
8827

3✔
8828
        peer, err := route.NewVertexFromBytes(req.Peer)
3✔
8829
        if err != nil {
3✔
8830
                return nil, err
×
8831
        }
×
8832

8833
        err = r.server.SendCustomMessage(
3✔
8834
                peer, lnwire.MessageType(req.Type), req.Data,
3✔
8835
        )
3✔
8836
        switch {
3✔
8837
        case errors.Is(err, ErrPeerNotConnected):
×
8838
                return nil, status.Error(codes.NotFound, err.Error())
×
8839
        case err != nil:
3✔
8840
                return nil, err
3✔
8841
        }
8842

8843
        return &lnrpc.SendCustomMessageResponse{
3✔
8844
                Status: "message sent successfully",
3✔
8845
        }, nil
3✔
8846
}
8847

8848
// SubscribeCustomMessages subscribes to a stream of incoming custom peer
8849
// messages.
8850
func (r *rpcServer) SubscribeCustomMessages(
8851
        _ *lnrpc.SubscribeCustomMessagesRequest,
8852
        server lnrpc.Lightning_SubscribeCustomMessagesServer) error {
3✔
8853

3✔
8854
        client, err := r.server.SubscribeCustomMessages()
3✔
8855
        if err != nil {
3✔
8856
                return err
×
8857
        }
×
8858
        defer client.Cancel()
3✔
8859

3✔
8860
        for {
6✔
8861
                select {
3✔
8862
                case <-client.Quit():
×
8863
                        return errors.New("shutdown")
×
8864

8865
                case <-server.Context().Done():
3✔
8866
                        return server.Context().Err()
3✔
8867

8868
                case update := <-client.Updates():
3✔
8869
                        customMsg := update.(*CustomMessage)
3✔
8870

3✔
8871
                        err := server.Send(&lnrpc.CustomMessage{
3✔
8872
                                Peer: customMsg.Peer[:],
3✔
8873
                                Data: customMsg.Msg.Data,
3✔
8874
                                Type: uint32(customMsg.Msg.Type),
3✔
8875
                        })
3✔
8876
                        if err != nil {
3✔
8877
                                return err
×
8878
                        }
×
8879
                }
8880
        }
8881
}
8882

8883
// ListAliases returns the set of all aliases we have ever allocated along with
8884
// their base SCIDs and possibly a separate confirmed SCID in the case of
8885
// zero-conf.
8886
func (r *rpcServer) ListAliases(_ context.Context,
8887
        _ *lnrpc.ListAliasesRequest) (*lnrpc.ListAliasesResponse, error) {
×
8888

×
8889
        // Fetch the map of all aliases.
×
8890
        mapAliases := r.server.aliasMgr.ListAliases()
×
8891

×
8892
        // Fill out the response. This does not include the zero-conf confirmed
×
8893
        // SCID. Doing so would require more database lookups, and it can be
×
8894
        // cross-referenced with the output of ListChannels/ClosedChannels.
×
8895
        resp := &lnrpc.ListAliasesResponse{
×
8896
                AliasMaps: make([]*lnrpc.AliasMap, 0),
×
8897
        }
×
8898

×
8899
        // Now we need to parse the created mappings into an rpc response.
×
8900
        resp.AliasMaps = lnrpc.MarshalAliasMap(mapAliases)
×
8901

×
8902
        return resp, nil
×
8903
}
×
8904

8905
// rpcInitiator returns the correct lnrpc initiator for channels where we have
8906
// a record of the opening channel.
8907
func rpcInitiator(isInitiator bool) lnrpc.Initiator {
3✔
8908
        if isInitiator {
6✔
8909
                return lnrpc.Initiator_INITIATOR_LOCAL
3✔
8910
        }
3✔
8911

8912
        return lnrpc.Initiator_INITIATOR_REMOTE
3✔
8913
}
8914

8915
// chainSyncInfo wraps info about the best block and whether the system is
8916
// synced to that block.
8917
type chainSyncInfo struct {
8918
        // isSynced specifies whether the whole system is considered synced.
8919
        // When true, it means the following subsystems are at the best height
8920
        // reported by the chain backend,
8921
        // - wallet.
8922
        // - channel graph.
8923
        // - blockbeat dispatcher.
8924
        isSynced bool
8925

8926
        // bestHeight is the current height known to the chain backend.
8927
        bestHeight int32
8928

8929
        // blockHash is the hash of the current block known to the chain
8930
        // backend.
8931
        blockHash chainhash.Hash
8932

8933
        // timestamp is the block's timestamp the wallet has synced to.
8934
        timestamp int64
8935
}
8936

8937
// getChainSyncInfo queries the chain backend, the wallet, the channel router
8938
// and the blockbeat dispatcher to determine the best block and whether the
8939
// system is considered synced.
8940
func (r *rpcServer) getChainSyncInfo() (*chainSyncInfo, error) {
3✔
8941
        bestHash, bestHeight, err := r.server.cc.ChainIO.GetBestBlock()
3✔
8942
        if err != nil {
3✔
8943
                return nil, fmt.Errorf("unable to get best block info: %w", err)
×
8944
        }
×
8945

8946
        isSynced, bestHeaderTimestamp, err := r.server.cc.Wallet.IsSynced()
3✔
8947
        if err != nil {
3✔
8948
                return nil, fmt.Errorf("unable to sync PoV of the wallet "+
×
8949
                        "with current best block in the main chain: %v", err)
×
8950
        }
×
8951

8952
        // Create an info to be returned.
8953
        info := &chainSyncInfo{
3✔
8954
                isSynced:   isSynced,
3✔
8955
                bestHeight: bestHeight,
3✔
8956
                blockHash:  *bestHash,
3✔
8957
                timestamp:  bestHeaderTimestamp,
3✔
8958
        }
3✔
8959

3✔
8960
        // Exit early if the wallet is not synced.
3✔
8961
        if !isSynced {
4✔
8962
                return info, nil
1✔
8963
        }
1✔
8964

8965
        // If the router does full channel validation, it has a lot of work to
8966
        // do for each block. So it might be possible that it isn't yet up to
8967
        // date with the most recent block, even if the wallet is. This can
8968
        // happen in environments with high CPU load (such as parallel itests).
8969
        // Since the `synced_to_chain` flag in the response of this call is used
8970
        // by many wallets (and also our itests) to make sure everything's up to
8971
        // date, we add the router's state to it. So the flag will only toggle
8972
        // to true once the router was also able to catch up.
8973
        if !r.cfg.Routing.AssumeChannelValid {
6✔
8974
                routerHeight := r.server.graphBuilder.SyncedHeight()
3✔
8975
                isSynced = uint32(bestHeight) == routerHeight
3✔
8976
        }
3✔
8977

8978
        // Exit early if the channel graph is not synced.
8979
        if !isSynced {
3✔
8980
                return info, nil
×
8981
        }
×
8982

8983
        // Given the wallet and the channel router are synced, we now check
8984
        // whether the blockbeat dispatcher is synced.
8985
        height := r.server.blockbeatDispatcher.CurrentHeight()
3✔
8986

3✔
8987
        // Overwrite isSynced and return.
3✔
8988
        info.isSynced = height == bestHeight
3✔
8989

3✔
8990
        return info, nil
3✔
8991
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc