• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14350633513

09 Apr 2025 06:37AM UTC coverage: 58.642%. First build
14350633513

Pull #9690

github

web-flow
Merge 9f7b6f71c into ac052988c
Pull Request #9690: autopilot: thread contexts through in preparation for GraphSource methods taking a context

1 of 83 new or added lines in 13 files covered. (1.2%)

97190 of 165734 relevant lines covered (58.64%)

1.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

1.03
/discovery/bootstrapper.go
1
package discovery
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/rand"
7
        "crypto/sha256"
8
        "errors"
9
        "fmt"
10
        prand "math/rand"
11
        "net"
12
        "strconv"
13
        "strings"
14
        "time"
15

16
        "github.com/btcsuite/btcd/btcec/v2"
17
        "github.com/btcsuite/btcd/btcutil/bech32"
18
        "github.com/lightningnetwork/lnd/autopilot"
19
        "github.com/lightningnetwork/lnd/lnutils"
20
        "github.com/lightningnetwork/lnd/lnwire"
21
        "github.com/lightningnetwork/lnd/tor"
22
        "github.com/miekg/dns"
23
)
24

25
func init() {
3✔
26
        prand.Seed(time.Now().Unix())
3✔
27
}
3✔
28

29
// NetworkPeerBootstrapper is an interface that represents an initial peer
30
// bootstrap mechanism. This interface is to be used to bootstrap a new peer to
31
// the connection by providing it with the pubkey+address of a set of existing
32
// peers on the network. Several bootstrap mechanisms can be implemented such
33
// as DNS, in channel graph, DHT's, etc.
34
type NetworkPeerBootstrapper interface {
35
        // SampleNodeAddrs uniformly samples a set of specified address from
36
        // the network peer bootstrapper source. The num addrs field passed in
37
        // denotes how many valid peer addresses to return. The passed set of
38
        // node nodes allows the caller to ignore a set of nodes perhaps
39
        // because they already have connections established.
40
        SampleNodeAddrs(numAddrs uint32,
41
                ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error)
42

43
        // Name returns a human readable string which names the concrete
44
        // implementation of the NetworkPeerBootstrapper.
45
        Name() string
46
}
47

48
// MultiSourceBootstrap attempts to utilize a set of NetworkPeerBootstrapper
49
// passed in to return the target (numAddrs) number of peer addresses that can
50
// be used to bootstrap a peer just joining the Lightning Network. Each
51
// bootstrapper will be queried successively until the target amount is met. If
52
// the ignore map is populated, then the bootstrappers will be instructed to
53
// skip those nodes.
54
func MultiSourceBootstrap(ignore map[autopilot.NodeID]struct{}, numAddrs uint32,
55
        bootstrappers ...NetworkPeerBootstrapper) ([]*lnwire.NetAddress, error) {
×
56

×
57
        // We'll randomly shuffle our bootstrappers before querying them in
×
58
        // order to avoid from querying the same bootstrapper method over and
×
59
        // over, as some of these might tend to provide better/worse results
×
60
        // than others.
×
61
        bootstrappers = shuffleBootstrappers(bootstrappers)
×
62

×
63
        var addrs []*lnwire.NetAddress
×
64
        for _, bootstrapper := range bootstrappers {
×
65
                // If we already have enough addresses, then we can exit early
×
66
                // w/o querying the additional bootstrappers.
×
67
                if uint32(len(addrs)) >= numAddrs {
×
68
                        break
×
69
                }
70

71
                log.Infof("Attempting to bootstrap with: %v", bootstrapper.Name())
×
72

×
73
                // If we still need additional addresses, then we'll compute
×
74
                // the number of address remaining that we need to fetch.
×
75
                numAddrsLeft := numAddrs - uint32(len(addrs))
×
76
                log.Tracef("Querying for %v addresses", numAddrsLeft)
×
77
                netAddrs, err := bootstrapper.SampleNodeAddrs(numAddrsLeft, ignore)
×
78
                if err != nil {
×
79
                        // If we encounter an error with a bootstrapper, then
×
80
                        // we'll continue on to the next available
×
81
                        // bootstrapper.
×
82
                        log.Errorf("Unable to query bootstrapper %v: %v",
×
83
                                bootstrapper.Name(), err)
×
84
                        continue
×
85
                }
86

87
                addrs = append(addrs, netAddrs...)
×
88
        }
89

90
        if len(addrs) == 0 {
×
91
                return nil, errors.New("no addresses found")
×
92
        }
×
93

94
        log.Infof("Obtained %v addrs to bootstrap network with", len(addrs))
×
95

×
96
        return addrs, nil
×
97
}
98

99
// shuffleBootstrappers shuffles the set of bootstrappers in order to avoid
100
// querying the same bootstrapper over and over. To shuffle the set of
101
// candidates, we use a version of the Fisher–Yates shuffle algorithm.
102
func shuffleBootstrappers(candidates []NetworkPeerBootstrapper) []NetworkPeerBootstrapper {
×
103
        shuffled := make([]NetworkPeerBootstrapper, len(candidates))
×
104
        perm := prand.Perm(len(candidates))
×
105

×
106
        for i, v := range perm {
×
107
                shuffled[v] = candidates[i]
×
108
        }
×
109

110
        return shuffled
×
111
}
112

113
// ChannelGraphBootstrapper is an implementation of the NetworkPeerBootstrapper
114
// which attempts to retrieve advertised peers directly from the active channel
115
// graph. This instance requires a backing autopilot.ChannelGraph instance in
116
// order to operate properly.
117
type ChannelGraphBootstrapper struct {
118
        chanGraph autopilot.ChannelGraph
119

120
        // hashAccumulator is a set of 32 random bytes that are read upon the
121
        // creation of the channel graph bootstrapper. We use this value to
122
        // randomly select nodes within the known graph to connect to. After
123
        // each selection, we rotate the accumulator by hashing it with itself.
124
        hashAccumulator [32]byte
125

126
        tried map[autopilot.NodeID]struct{}
127
}
128

129
// A compile time assertion to ensure that ChannelGraphBootstrapper meets the
130
// NetworkPeerBootstrapper interface.
131
var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
132

133
// NewGraphBootstrapper returns a new instance of a ChannelGraphBootstrapper
134
// backed by an active autopilot.ChannelGraph instance. This type of network
135
// peer bootstrapper will use the authenticated nodes within the known channel
136
// graph to bootstrap connections.
137
func NewGraphBootstrapper(cg autopilot.ChannelGraph) (NetworkPeerBootstrapper, error) {
×
138

×
139
        c := &ChannelGraphBootstrapper{
×
140
                chanGraph: cg,
×
141
                tried:     make(map[autopilot.NodeID]struct{}),
×
142
        }
×
143

×
144
        if _, err := rand.Read(c.hashAccumulator[:]); err != nil {
×
145
                return nil, err
×
146
        }
×
147

148
        return c, nil
×
149
}
150

151
// SampleNodeAddrs uniformly samples a set of specified address from the
152
// network peer bootstrapper source. The num addrs field passed in denotes how
153
// many valid peer addresses to return.
154
//
155
// NOTE: Part of the NetworkPeerBootstrapper interface.
156
func (c *ChannelGraphBootstrapper) SampleNodeAddrs(numAddrs uint32,
157
        ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error) {
×
158

×
NEW
159
        ctx := context.TODO()
×
NEW
160

×
161
        // We'll merge the ignore map with our currently selected map in order
×
162
        // to ensure we don't return any duplicate nodes.
×
163
        for n := range ignore {
×
164
                log.Tracef("Ignored node %x for bootstrapping", n)
×
165
                c.tried[n] = struct{}{}
×
166
        }
×
167

168
        // In order to bootstrap, we'll iterate all the nodes in the channel
169
        // graph, accumulating nodes until either we go through all active
170
        // nodes, or we reach our limit. We ensure that we meet the randomly
171
        // sample constraint as we maintain an xor accumulator to ensure we
172
        // randomly sample nodes independent of the iteration of the channel
173
        // graph.
174
        sampleAddrs := func() ([]*lnwire.NetAddress, error) {
×
175
                var (
×
176
                        a []*lnwire.NetAddress
×
177

×
178
                        // We'll create a special error so we can return early
×
179
                        // and abort the transaction once we find a match.
×
180
                        errFound = fmt.Errorf("found node")
×
181
                )
×
182

×
NEW
183
                err := c.chanGraph.ForEachNode(ctx, func(_ context.Context,
×
NEW
184
                        node autopilot.Node) error {
×
NEW
185

×
186
                        nID := autopilot.NodeID(node.PubKey())
×
187
                        if _, ok := c.tried[nID]; ok {
×
188
                                return nil
×
189
                        }
×
190

191
                        // We'll select the first node we come across who's
192
                        // public key is less than our current accumulator
193
                        // value. When comparing, we skip the first byte as
194
                        // it's 50/50. If it isn't less, than then we'll
195
                        // continue forward.
196
                        nodePubKeyBytes := node.PubKey()
×
197
                        if bytes.Compare(c.hashAccumulator[:], nodePubKeyBytes[1:]) > 0 {
×
198
                                return nil
×
199
                        }
×
200

201
                        for _, nodeAddr := range node.Addrs() {
×
202
                                // If we haven't yet reached our limit, then
×
203
                                // we'll copy over the details of this node
×
204
                                // into the set of addresses to be returned.
×
205
                                switch nodeAddr.(type) {
×
206
                                case *net.TCPAddr, *tor.OnionAddr:
×
207
                                default:
×
208
                                        // If this isn't a valid address
×
209
                                        // supported by the protocol, then we'll
×
210
                                        // skip this node.
×
211
                                        return nil
×
212
                                }
213

214
                                nodePub, err := btcec.ParsePubKey(
×
215
                                        nodePubKeyBytes[:],
×
216
                                )
×
217
                                if err != nil {
×
218
                                        return err
×
219
                                }
×
220

221
                                // At this point, we've found an eligible node,
222
                                // so we'll return early with our shibboleth
223
                                // error.
224
                                a = append(a, &lnwire.NetAddress{
×
225
                                        IdentityKey: nodePub,
×
226
                                        Address:     nodeAddr,
×
227
                                })
×
228
                        }
229

230
                        c.tried[nID] = struct{}{}
×
231

×
232
                        return errFound
×
233
                })
234
                if err != nil && err != errFound {
×
235
                        return nil, err
×
236
                }
×
237

238
                return a, nil
×
239
        }
240

241
        // We'll loop and sample new addresses from the graph source until
242
        // we've reached our target number of outbound connections or we hit 50
243
        // attempts, which ever comes first.
244
        var (
×
245
                addrs []*lnwire.NetAddress
×
246
                tries uint32
×
247
        )
×
248
        for tries < 30 && uint32(len(addrs)) < numAddrs {
×
249
                sampleAddrs, err := sampleAddrs()
×
250
                if err != nil {
×
251
                        return nil, err
×
252
                }
×
253

254
                tries++
×
255

×
256
                // We'll now rotate our hash accumulator one value forwards.
×
257
                c.hashAccumulator = sha256.Sum256(c.hashAccumulator[:])
×
258

×
259
                // If this attempt didn't yield any addresses, then we'll exit
×
260
                // early.
×
261
                if len(sampleAddrs) == 0 {
×
262
                        continue
×
263
                }
264

265
                addrs = append(addrs, sampleAddrs...)
×
266
        }
267

268
        log.Tracef("Ending hash accumulator state: %x", c.hashAccumulator)
×
269

×
270
        return addrs, nil
×
271
}
272

273
// Name returns a human readable string which names the concrete implementation
274
// of the NetworkPeerBootstrapper.
275
//
276
// NOTE: Part of the NetworkPeerBootstrapper interface.
277
func (c *ChannelGraphBootstrapper) Name() string {
×
278
        return "Authenticated Channel Graph"
×
279
}
×
280

281
// DNSSeedBootstrapper as an implementation of the NetworkPeerBootstrapper
282
// interface which implements peer bootstrapping via a special DNS seed as
283
// defined in BOLT-0010. For further details concerning Lightning's current DNS
284
// boot strapping protocol, see this link:
285
//   - https://github.com/lightningnetwork/lightning-rfc/blob/master/10-dns-bootstrap.md
286
type DNSSeedBootstrapper struct {
287
        // dnsSeeds is an array of two tuples we'll use for bootstrapping. The
288
        // first item in the tuple is the primary host we'll use to attempt the
289
        // SRV lookup we require. If we're unable to receive a response over
290
        // UDP, then we'll fall back to manual TCP resolution. The second item
291
        // in the tuple is a special A record that we'll query in order to
292
        // receive the IP address of the current authoritative DNS server for
293
        // the network seed.
294
        dnsSeeds [][2]string
295
        net      tor.Net
296

297
        // timeout is the maximum amount of time a dial will wait for a connect to
298
        // complete.
299
        timeout time.Duration
300
}
301

302
// A compile time assertion to ensure that DNSSeedBootstrapper meets the
303
// NetworkPeerjBootstrapper interface.
304
var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
305

306
// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper.
307
// The set of passed seeds should point to DNS servers that properly implement
308
// Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
309
// of passed DNS seeds should come in pairs, with the second host name to be
310
// used as a fallback for manual TCP resolution in the case of an error
311
// receiving the UDP response. The second host should return a single A record
312
// with the IP address of the authoritative name server.
313
func NewDNSSeedBootstrapper(
314
        seeds [][2]string, net tor.Net,
315
        timeout time.Duration) NetworkPeerBootstrapper {
×
316

×
317
        return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net, timeout: timeout}
×
318
}
×
319

320
// fallBackSRVLookup attempts to manually query for SRV records we need to
321
// properly bootstrap. We do this by querying the special record at the "soa."
322
// sub-domain of supporting DNS servers. The returned IP address will be the IP
323
// address of the authoritative DNS server. Once we have this IP address, we'll
324
// connect manually over TCP to request the SRV record. This is necessary as
325
// the records we return are currently too large for a class of resolvers,
326
// causing them to be filtered out. The targetEndPoint is the original end
327
// point that was meant to be hit.
328
func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string,
329
        targetEndPoint string) ([]*net.SRV, error) {
×
330

×
331
        log.Tracef("Attempting to query fallback DNS seed")
×
332

×
333
        // First, we'll lookup the IP address of the server that will act as
×
334
        // our shim.
×
335
        addrs, err := d.net.LookupHost(soaShim)
×
336
        if err != nil {
×
337
                return nil, err
×
338
        }
×
339

340
        // Once we have the IP address, we'll establish a TCP connection using
341
        // port 53.
342
        dnsServer := net.JoinHostPort(addrs[0], "53")
×
343
        conn, err := d.net.Dial("tcp", dnsServer, d.timeout)
×
344
        if err != nil {
×
345
                return nil, err
×
346
        }
×
347

348
        dnsHost := fmt.Sprintf("_nodes._tcp.%v.", targetEndPoint)
×
349
        dnsConn := &dns.Conn{Conn: conn}
×
350
        defer dnsConn.Close()
×
351

×
352
        // With the connection established, we'll craft our SRV query, write
×
353
        // toe request, then wait for the server to give our response.
×
354
        msg := new(dns.Msg)
×
355
        msg.SetQuestion(dnsHost, dns.TypeSRV)
×
356
        if err := dnsConn.WriteMsg(msg); err != nil {
×
357
                return nil, err
×
358
        }
×
359
        resp, err := dnsConn.ReadMsg()
×
360
        if err != nil {
×
361
                return nil, err
×
362
        }
×
363

364
        // If the message response code was not the success code, fail.
365
        if resp.Rcode != dns.RcodeSuccess {
×
366
                return nil, fmt.Errorf("unsuccessful SRV request, "+
×
367
                        "received: %v", resp.Rcode)
×
368
        }
×
369

370
        // Retrieve the RR(s) of the Answer section, and convert to the format
371
        // that net.LookupSRV would normally return.
372
        var rrs []*net.SRV
×
373
        for _, rr := range resp.Answer {
×
374
                srv := rr.(*dns.SRV)
×
375
                rrs = append(rrs, &net.SRV{
×
376
                        Target:   srv.Target,
×
377
                        Port:     srv.Port,
×
378
                        Priority: srv.Priority,
×
379
                        Weight:   srv.Weight,
×
380
                })
×
381
        }
×
382

383
        return rrs, nil
×
384
}
385

386
// SampleNodeAddrs uniformly samples a set of specified address from the
387
// network peer bootstrapper source. The num addrs field passed in denotes how
388
// many valid peer addresses to return. The set of DNS seeds are used
389
// successively to retrieve eligible target nodes.
390
func (d *DNSSeedBootstrapper) SampleNodeAddrs(numAddrs uint32,
391
        ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error) {
×
392

×
393
        var netAddrs []*lnwire.NetAddress
×
394

×
395
        // We'll try all the registered DNS seeds, exiting early if one of them
×
396
        // gives us all the peers we need.
×
397
        //
×
398
        // TODO(roasbeef): should combine results from both
×
399
search:
×
400
        for _, dnsSeedTuple := range d.dnsSeeds {
×
401
                // We'll first query the seed with an SRV record so we can
×
402
                // obtain a random sample of the encoded public keys of nodes.
×
403
                // We use the lndLookupSRV function for this task.
×
404
                primarySeed := dnsSeedTuple[0]
×
405
                _, addrs, err := d.net.LookupSRV(
×
406
                        "nodes", "tcp", primarySeed, d.timeout,
×
407
                )
×
408
                if err != nil {
×
409
                        log.Tracef("Unable to lookup SRV records via "+
×
410
                                "primary seed (%v): %v", primarySeed, err)
×
411

×
412
                        log.Trace("Falling back to secondary")
×
413

×
414
                        // If the host of the secondary seed is blank, then
×
415
                        // we'll bail here as we can't proceed.
×
416
                        if dnsSeedTuple[1] == "" {
×
417
                                log.Tracef("DNS seed %v has no secondary, "+
×
418
                                        "skipping fallback", primarySeed)
×
419
                                continue
×
420
                        }
421

422
                        // If we get an error when trying to query via the
423
                        // primary seed, we'll fallback to the secondary seed
424
                        // before concluding failure.
425
                        soaShim := dnsSeedTuple[1]
×
426
                        addrs, err = d.fallBackSRVLookup(
×
427
                                soaShim, primarySeed,
×
428
                        )
×
429
                        if err != nil {
×
430
                                log.Tracef("Unable to query fall "+
×
431
                                        "back dns seed (%v): %v", soaShim, err)
×
432
                                continue
×
433
                        }
434

435
                        log.Tracef("Successfully queried fallback DNS seed")
×
436
                }
437

438
                log.Tracef("Retrieved SRV records from dns seed: %v",
×
439
                        lnutils.SpewLogClosure(addrs))
×
440

×
441
                // Next, we'll need to issue an A record request for each of
×
442
                // the nodes, skipping it if nothing comes back.
×
443
                for _, nodeSrv := range addrs {
×
444
                        if uint32(len(netAddrs)) >= numAddrs {
×
445
                                break search
×
446
                        }
447

448
                        // With the SRV target obtained, we'll now perform
449
                        // another query to obtain the IP address for the
450
                        // matching bech32 encoded node key. We use the
451
                        // lndLookup function for this task.
452
                        bechNodeHost := nodeSrv.Target
×
453
                        addrs, err := d.net.LookupHost(bechNodeHost)
×
454
                        if err != nil {
×
455
                                return nil, err
×
456
                        }
×
457

458
                        if len(addrs) == 0 {
×
459
                                log.Tracef("No addresses for %v, skipping",
×
460
                                        bechNodeHost)
×
461
                                continue
×
462
                        }
463

464
                        log.Tracef("Attempting to convert: %v", bechNodeHost)
×
465

×
466
                        // If the host isn't correctly formatted, then we'll
×
467
                        // skip it.
×
468
                        if len(bechNodeHost) == 0 ||
×
469
                                !strings.Contains(bechNodeHost, ".") {
×
470

×
471
                                continue
×
472
                        }
473

474
                        // If we have a set of valid addresses, then we'll need
475
                        // to parse the public key from the original bech32
476
                        // encoded string.
477
                        bechNode := strings.Split(bechNodeHost, ".")
×
478
                        _, nodeBytes5Bits, err := bech32.Decode(bechNode[0])
×
479
                        if err != nil {
×
480
                                return nil, err
×
481
                        }
×
482

483
                        // Once we have the bech32 decoded pubkey, we'll need
484
                        // to convert the 5-bit word grouping into our regular
485
                        // 8-bit word grouping so we can convert it into a
486
                        // public key.
487
                        nodeBytes, err := bech32.ConvertBits(
×
488
                                nodeBytes5Bits, 5, 8, false,
×
489
                        )
×
490
                        if err != nil {
×
491
                                return nil, err
×
492
                        }
×
493
                        nodeKey, err := btcec.ParsePubKey(nodeBytes)
×
494
                        if err != nil {
×
495
                                return nil, err
×
496
                        }
×
497

498
                        // If we have an ignore list, and this node is in the
499
                        // ignore list, then we'll go to the next candidate.
500
                        if ignore != nil {
×
501
                                nID := autopilot.NewNodeID(nodeKey)
×
502
                                if _, ok := ignore[nID]; ok {
×
503
                                        continue
×
504
                                }
505
                        }
506

507
                        // Finally we'll convert the host:port peer to a proper
508
                        // TCP address to use within the lnwire.NetAddress. We
509
                        // don't need to use the lndResolveTCP function here
510
                        // because we already have the host:port peer.
511
                        addr := net.JoinHostPort(
×
512
                                addrs[0],
×
513
                                strconv.FormatUint(uint64(nodeSrv.Port), 10),
×
514
                        )
×
515
                        tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
×
516
                        if err != nil {
×
517
                                return nil, err
×
518
                        }
×
519

520
                        // Finally, with all the information parsed, we'll
521
                        // return this fully valid address as a connection
522
                        // attempt.
523
                        lnAddr := &lnwire.NetAddress{
×
524
                                IdentityKey: nodeKey,
×
525
                                Address:     tcpAddr,
×
526
                        }
×
527

×
528
                        log.Tracef("Obtained %v as valid reachable "+
×
529
                                "node", lnAddr)
×
530

×
531
                        netAddrs = append(netAddrs, lnAddr)
×
532
                }
533
        }
534

535
        return netAddrs, nil
×
536
}
537

538
// Name returns a human readable string which names the concrete
539
// implementation of the NetworkPeerBootstrapper.
540
func (d *DNSSeedBootstrapper) Name() string {
×
541
        return fmt.Sprintf("BOLT-0010 DNS Seed: %v", d.dnsSeeds)
×
542
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc