• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16990665124

15 Aug 2025 01:10PM UTC coverage: 66.74% (-0.03%) from 66.765%
16990665124

Pull #9455

github

web-flow
Merge 035fac41d into fb1adfc21
Pull Request #9455: [1/2] discovery+lnwire: add support for DNS host name in NodeAnnouncement msg

116 of 188 new or added lines in 8 files covered. (61.7%)

110 existing lines in 23 files now uncovered.

136011 of 203791 relevant lines covered (66.74%)

21482.89 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

41.79
/discovery/bootstrapper.go
1
package discovery
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/rand"
7
        "crypto/sha256"
8
        "errors"
9
        "fmt"
10
        prand "math/rand"
11
        "net"
12
        "strconv"
13
        "strings"
14
        "time"
15

16
        "github.com/btcsuite/btcd/btcec/v2"
17
        "github.com/btcsuite/btcd/btcutil/bech32"
18
        "github.com/lightningnetwork/lnd/autopilot"
19
        "github.com/lightningnetwork/lnd/lnutils"
20
        "github.com/lightningnetwork/lnd/lnwire"
21
        "github.com/lightningnetwork/lnd/routing/route"
22
        "github.com/lightningnetwork/lnd/tor"
23
        "github.com/miekg/dns"
24
)
25

26
func init() {
13✔
27
        prand.Seed(time.Now().Unix())
13✔
28
}
13✔
29

30
// NetworkPeerBootstrapper is an interface that represents an initial peer
31
// bootstrap mechanism. This interface is to be used to bootstrap a new peer to
32
// the connection by providing it with the pubkey+address of a set of existing
33
// peers on the network. Several bootstrap mechanisms can be implemented such
34
// as DNS, in channel graph, DHT's, etc.
35
type NetworkPeerBootstrapper interface {
36
        // SampleNodeAddrs uniformly samples a set of specified address from
37
        // the network peer bootstrapper source. The num addrs field passed in
38
        // denotes how many valid peer addresses to return. The passed set of
39
        // node nodes allows the caller to ignore a set of nodes perhaps
40
        // because they already have connections established.
41
        SampleNodeAddrs(ctx context.Context, numAddrs uint32,
42
                ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress,
43
                error)
44

45
        // Name returns a human readable string which names the concrete
46
        // implementation of the NetworkPeerBootstrapper.
47
        Name() string
48
}
49

50
// MultiSourceBootstrap attempts to utilize a set of NetworkPeerBootstrapper
51
// passed in to return the target (numAddrs) number of peer addresses that can
52
// be used to bootstrap a peer just joining the Lightning Network. Each
53
// bootstrapper will be queried successively until the target amount is met. If
54
// the ignore map is populated, then the bootstrappers will be instructed to
55
// skip those nodes.
56
func MultiSourceBootstrap(ctx context.Context,
57
        ignore map[autopilot.NodeID]struct{}, numAddrs uint32,
58
        bootstrappers ...NetworkPeerBootstrapper) ([]*lnwire.NetAddress, error) {
3✔
59

3✔
60
        // We'll randomly shuffle our bootstrappers before querying them in
3✔
61
        // order to avoid from querying the same bootstrapper method over and
3✔
62
        // over, as some of these might tend to provide better/worse results
3✔
63
        // than others.
3✔
64
        bootstrappers = shuffleBootstrappers(bootstrappers)
3✔
65

3✔
66
        var addrs []*lnwire.NetAddress
3✔
67
        for _, bootstrapper := range bootstrappers {
6✔
68
                // If we already have enough addresses, then we can exit early
3✔
69
                // w/o querying the additional bootstrappers.
3✔
70
                if uint32(len(addrs)) >= numAddrs {
3✔
71
                        break
×
72
                }
73

74
                log.Infof("Attempting to bootstrap with: %v", bootstrapper.Name())
3✔
75

3✔
76
                // If we still need additional addresses, then we'll compute
3✔
77
                // the number of address remaining that we need to fetch.
3✔
78
                numAddrsLeft := numAddrs - uint32(len(addrs))
3✔
79
                log.Tracef("Querying for %v addresses", numAddrsLeft)
3✔
80
                netAddrs, err := bootstrapper.SampleNodeAddrs(
3✔
81
                        ctx, numAddrsLeft, ignore,
3✔
82
                )
3✔
83
                if err != nil {
3✔
84
                        // If we encounter an error with a bootstrapper, then
×
85
                        // we'll continue on to the next available
×
86
                        // bootstrapper.
×
87
                        log.Errorf("Unable to query bootstrapper %v: %v",
×
88
                                bootstrapper.Name(), err)
×
89
                        continue
×
90
                }
91

92
                addrs = append(addrs, netAddrs...)
3✔
93
        }
94

95
        if len(addrs) == 0 {
3✔
UNCOV
96
                return nil, errors.New("no addresses found")
×
UNCOV
97
        }
×
98

99
        log.Infof("Obtained %v addrs to bootstrap network with", len(addrs))
3✔
100

3✔
101
        return addrs, nil
3✔
102
}
103

104
// shuffleBootstrappers shuffles the set of bootstrappers in order to avoid
105
// querying the same bootstrapper over and over. To shuffle the set of
106
// candidates, we use a version of the Fisher–Yates shuffle algorithm.
107
func shuffleBootstrappers(candidates []NetworkPeerBootstrapper) []NetworkPeerBootstrapper {
3✔
108
        shuffled := make([]NetworkPeerBootstrapper, len(candidates))
3✔
109
        perm := prand.Perm(len(candidates))
3✔
110

3✔
111
        for i, v := range perm {
6✔
112
                shuffled[v] = candidates[i]
3✔
113
        }
3✔
114

115
        return shuffled
3✔
116
}
117

118
// ChannelGraphBootstrapper is an implementation of the NetworkPeerBootstrapper
119
// which attempts to retrieve advertised peers directly from the active channel
120
// graph. This instance requires a backing autopilot.ChannelGraph instance in
121
// order to operate properly.
122
type ChannelGraphBootstrapper struct {
123
        chanGraph autopilot.ChannelGraph
124

125
        // hashAccumulator is used to determine which nodes to use for
126
        // bootstrapping. It allows us to potentially introduce some randomness
127
        // into the selection process.
128
        hashAccumulator hashAccumulator
129

130
        tried map[autopilot.NodeID]struct{}
131
}
132

133
// A compile time assertion to ensure that ChannelGraphBootstrapper meets the
134
// NetworkPeerBootstrapper interface.
135
var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
136

137
// NewGraphBootstrapper returns a new instance of a ChannelGraphBootstrapper
138
// backed by an active autopilot.ChannelGraph instance. This type of network
139
// peer bootstrapper will use the authenticated nodes within the known channel
140
// graph to bootstrap connections.
141
func NewGraphBootstrapper(cg autopilot.ChannelGraph,
142
        deterministicSampling bool) (NetworkPeerBootstrapper, error) {
3✔
143

3✔
144
        var (
3✔
145
                hashAccumulator hashAccumulator
3✔
146
                err             error
3✔
147
        )
3✔
148
        if deterministicSampling {
6✔
149
                // If we're using deterministic sampling, then we'll use a
3✔
150
                // no-op hash accumulator that will always return false for
3✔
151
                // skipNode.
3✔
152
                hashAccumulator = newNoOpHashAccumulator()
3✔
153
        } else {
3✔
154
                // Otherwise, we'll use a random hash accumulator to sample
×
155
                // nodes from the channel graph.
×
156
                hashAccumulator, err = newRandomHashAccumulator()
×
157
                if err != nil {
×
158
                        return nil, fmt.Errorf("unable to create hash "+
×
159
                                "accumulator: %w", err)
×
160
                }
×
161
        }
162

163
        return &ChannelGraphBootstrapper{
3✔
164
                chanGraph:       cg,
3✔
165
                tried:           make(map[autopilot.NodeID]struct{}),
3✔
166
                hashAccumulator: hashAccumulator,
3✔
167
        }, nil
3✔
168
}
169

170
// SampleNodeAddrs uniformly samples a set of specified address from the
171
// network peer bootstrapper source. The num addrs field passed in denotes how
172
// many valid peer addresses to return.
173
//
174
// NOTE: Part of the NetworkPeerBootstrapper interface.
175
func (c *ChannelGraphBootstrapper) SampleNodeAddrs(_ context.Context,
176
        numAddrs uint32,
177
        ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error) {
3✔
178

3✔
179
        ctx := context.TODO()
3✔
180

3✔
181
        // We'll merge the ignore map with our currently selected map in order
3✔
182
        // to ensure we don't return any duplicate nodes.
3✔
183
        for n := range ignore {
6✔
184
                log.Tracef("Ignored node %x for bootstrapping", n)
3✔
185
                c.tried[n] = struct{}{}
3✔
186
        }
3✔
187

188
        // In order to bootstrap, we'll iterate all the nodes in the channel
189
        // graph, accumulating nodes until either we go through all active
190
        // nodes, or we reach our limit. We ensure that we meet the randomly
191
        // sample constraint as we maintain an xor accumulator to ensure we
192
        // randomly sample nodes independent of the iteration of the channel
193
        // graph.
194
        sampleAddrs := func() ([]*lnwire.NetAddress, error) {
6✔
195
                var (
3✔
196
                        a []*lnwire.NetAddress
3✔
197

3✔
198
                        // We'll create a special error so we can return early
3✔
199
                        // and abort the transaction once we find a match.
3✔
200
                        errFound = fmt.Errorf("found node")
3✔
201
                )
3✔
202

3✔
203
                err := c.chanGraph.ForEachNode(ctx, func(_ context.Context,
3✔
204
                        node autopilot.Node) error {
6✔
205

3✔
206
                        nID := autopilot.NodeID(node.PubKey())
3✔
207
                        if _, ok := c.tried[nID]; ok {
6✔
208
                                return nil
3✔
209
                        }
3✔
210

211
                        // We'll select the first node we come across who's
212
                        // public key is less than our current accumulator
213
                        // value. When comparing, we skip the first byte as
214
                        // it's 50/50. If it isn't less, than then we'll
215
                        // continue forward.
216
                        nodePubKeyBytes := node.PubKey()
3✔
217
                        if c.hashAccumulator.skipNode(nodePubKeyBytes) {
3✔
218
                                return nil
×
219
                        }
×
220

221
                        for _, nodeAddr := range node.Addrs() {
6✔
222
                                // If we haven't yet reached our limit, then
3✔
223
                                // we'll copy over the details of this node
3✔
224
                                // into the set of addresses to be returned.
3✔
225
                                switch nodeAddr.(type) {
3✔
226
                                case *net.TCPAddr, *tor.OnionAddr:
3✔
227
                                default:
×
228
                                        // If this isn't a valid address
×
229
                                        // supported by the protocol, then we'll
×
230
                                        // skip this node.
×
231
                                        return nil
×
232
                                }
233

234
                                nodePub, err := btcec.ParsePubKey(
3✔
235
                                        nodePubKeyBytes[:],
3✔
236
                                )
3✔
237
                                if err != nil {
3✔
238
                                        return err
×
239
                                }
×
240

241
                                // At this point, we've found an eligible node,
242
                                // so we'll return early with our shibboleth
243
                                // error.
244
                                a = append(a, &lnwire.NetAddress{
3✔
245
                                        IdentityKey: nodePub,
3✔
246
                                        Address:     nodeAddr,
3✔
247
                                })
3✔
248
                        }
249

250
                        return errFound
3✔
251
                }, func() {
3✔
252
                        a = nil
3✔
253
                })
3✔
254
                if err != nil && !errors.Is(err, errFound) {
3✔
255
                        return nil, err
×
256
                }
×
257

258
                return a, nil
3✔
259
        }
260

261
        // We'll loop and sample new addresses from the graph source until
262
        // we've reached our target number of outbound connections or we hit 50
263
        // attempts, which ever comes first.
264
        var (
3✔
265
                addrs []*lnwire.NetAddress
3✔
266
                tries uint32
3✔
267
        )
3✔
268
        for tries < 30 && uint32(len(addrs)) < numAddrs {
6✔
269
                sampleAddrs, err := sampleAddrs()
3✔
270
                if err != nil {
3✔
271
                        return nil, err
×
272
                }
×
273

274
                tries++
3✔
275

3✔
276
                // We'll now rotate our hash accumulator one value forwards.
3✔
277
                c.hashAccumulator.rotate()
3✔
278

3✔
279
                // If this attempt didn't yield any addresses, then we'll exit
3✔
280
                // early.
3✔
281
                if len(sampleAddrs) == 0 {
3✔
UNCOV
282
                        continue
×
283
                }
284

285
                for _, addr := range sampleAddrs {
6✔
286
                        nID := autopilot.NodeID(
3✔
287
                                addr.IdentityKey.SerializeCompressed(),
3✔
288
                        )
3✔
289

3✔
290
                        c.tried[nID] = struct{}{}
3✔
291
                }
3✔
292

293
                addrs = append(addrs, sampleAddrs...)
3✔
294
        }
295

296
        log.Tracef("Ending hash accumulator state: %x", c.hashAccumulator)
3✔
297

3✔
298
        return addrs, nil
3✔
299
}
300

301
// Name returns a human readable string which names the concrete implementation
302
// of the NetworkPeerBootstrapper.
303
//
304
// NOTE: Part of the NetworkPeerBootstrapper interface.
305
func (c *ChannelGraphBootstrapper) Name() string {
3✔
306
        return "Authenticated Channel Graph"
3✔
307
}
3✔
308

309
// DNSSeedBootstrapper as an implementation of the NetworkPeerBootstrapper
310
// interface which implements peer bootstrapping via a special DNS seed as
311
// defined in BOLT-0010. For further details concerning Lightning's current DNS
312
// boot strapping protocol, see this link:
313
//   - https://github.com/lightningnetwork/lightning-rfc/blob/master/10-dns-bootstrap.md
314
type DNSSeedBootstrapper struct {
315
        // dnsSeeds is an array of two tuples we'll use for bootstrapping. The
316
        // first item in the tuple is the primary host we'll use to attempt the
317
        // SRV lookup we require. If we're unable to receive a response over
318
        // UDP, then we'll fall back to manual TCP resolution. The second item
319
        // in the tuple is a special A record that we'll query in order to
320
        // receive the IP address of the current authoritative DNS server for
321
        // the network seed.
322
        dnsSeeds [][2]string
323
        net      tor.Net
324

325
        // timeout is the maximum amount of time a dial will wait for a connect to
326
        // complete.
327
        timeout time.Duration
328
}
329

330
// A compile time assertion to ensure that DNSSeedBootstrapper meets the
331
// NetworkPeerjBootstrapper interface.
332
var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
333

334
// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper.
335
// The set of passed seeds should point to DNS servers that properly implement
336
// Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
337
// of passed DNS seeds should come in pairs, with the second host name to be
338
// used as a fallback for manual TCP resolution in the case of an error
339
// receiving the UDP response. The second host should return a single A record
340
// with the IP address of the authoritative name server.
341
func NewDNSSeedBootstrapper(
342
        seeds [][2]string, net tor.Net,
343
        timeout time.Duration) NetworkPeerBootstrapper {
×
344

×
345
        return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net, timeout: timeout}
×
346
}
×
347

348
// fallBackSRVLookup attempts to manually query for SRV records we need to
349
// properly bootstrap. We do this by querying the special record at the "soa."
350
// sub-domain of supporting DNS servers. The returned IP address will be the IP
351
// address of the authoritative DNS server. Once we have this IP address, we'll
352
// connect manually over TCP to request the SRV record. This is necessary as
353
// the records we return are currently too large for a class of resolvers,
354
// causing them to be filtered out. The targetEndPoint is the original end
355
// point that was meant to be hit.
356
func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string,
357
        targetEndPoint string) ([]*net.SRV, error) {
×
358

×
359
        log.Tracef("Attempting to query fallback DNS seed")
×
360

×
361
        // First, we'll lookup the IP address of the server that will act as
×
362
        // our shim.
×
363
        addrs, err := d.net.LookupHost(soaShim)
×
364
        if err != nil {
×
365
                return nil, err
×
366
        }
×
367

368
        // Once we have the IP address, we'll establish a TCP connection using
369
        // port 53.
370
        dnsServer := net.JoinHostPort(addrs[0], "53")
×
371
        conn, err := d.net.Dial("tcp", dnsServer, d.timeout)
×
372
        if err != nil {
×
373
                return nil, err
×
374
        }
×
375

376
        dnsHost := fmt.Sprintf("_nodes._tcp.%v.", targetEndPoint)
×
377
        dnsConn := &dns.Conn{Conn: conn}
×
378
        defer dnsConn.Close()
×
379

×
380
        // With the connection established, we'll craft our SRV query, write
×
381
        // toe request, then wait for the server to give our response.
×
382
        msg := new(dns.Msg)
×
383
        msg.SetQuestion(dnsHost, dns.TypeSRV)
×
384
        if err := dnsConn.WriteMsg(msg); err != nil {
×
385
                return nil, err
×
386
        }
×
387
        resp, err := dnsConn.ReadMsg()
×
388
        if err != nil {
×
389
                return nil, err
×
390
        }
×
391

392
        // If the message response code was not the success code, fail.
393
        if resp.Rcode != dns.RcodeSuccess {
×
394
                return nil, fmt.Errorf("unsuccessful SRV request, "+
×
395
                        "received: %v", resp.Rcode)
×
396
        }
×
397

398
        // Retrieve the RR(s) of the Answer section, and convert to the format
399
        // that net.LookupSRV would normally return.
400
        var rrs []*net.SRV
×
401
        for _, rr := range resp.Answer {
×
402
                srv := rr.(*dns.SRV)
×
403
                rrs = append(rrs, &net.SRV{
×
404
                        Target:   srv.Target,
×
405
                        Port:     srv.Port,
×
406
                        Priority: srv.Priority,
×
407
                        Weight:   srv.Weight,
×
408
                })
×
409
        }
×
410

411
        return rrs, nil
×
412
}
413

414
// SampleNodeAddrs uniformly samples a set of specified address from the
415
// network peer bootstrapper source. The num addrs field passed in denotes how
416
// many valid peer addresses to return. The set of DNS seeds are used
417
// successively to retrieve eligible target nodes.
418
func (d *DNSSeedBootstrapper) SampleNodeAddrs(_ context.Context,
419
        numAddrs uint32,
420
        ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error) {
×
421

×
422
        var netAddrs []*lnwire.NetAddress
×
423

×
424
        // We'll try all the registered DNS seeds, exiting early if one of them
×
425
        // gives us all the peers we need.
×
426
        //
×
427
        // TODO(roasbeef): should combine results from both
×
428
search:
×
429
        for _, dnsSeedTuple := range d.dnsSeeds {
×
430
                // We'll first query the seed with an SRV record so we can
×
431
                // obtain a random sample of the encoded public keys of nodes.
×
432
                // We use the lndLookupSRV function for this task.
×
433
                primarySeed := dnsSeedTuple[0]
×
434
                _, addrs, err := d.net.LookupSRV(
×
435
                        "nodes", "tcp", primarySeed, d.timeout,
×
436
                )
×
437
                if err != nil {
×
438
                        log.Tracef("Unable to lookup SRV records via "+
×
439
                                "primary seed (%v): %v", primarySeed, err)
×
440

×
441
                        log.Trace("Falling back to secondary")
×
442

×
443
                        // If the host of the secondary seed is blank, then
×
444
                        // we'll bail here as we can't proceed.
×
445
                        if dnsSeedTuple[1] == "" {
×
446
                                log.Tracef("DNS seed %v has no secondary, "+
×
447
                                        "skipping fallback", primarySeed)
×
448
                                continue
×
449
                        }
450

451
                        // If we get an error when trying to query via the
452
                        // primary seed, we'll fallback to the secondary seed
453
                        // before concluding failure.
454
                        soaShim := dnsSeedTuple[1]
×
455
                        addrs, err = d.fallBackSRVLookup(
×
456
                                soaShim, primarySeed,
×
457
                        )
×
458
                        if err != nil {
×
459
                                log.Tracef("Unable to query fall "+
×
460
                                        "back dns seed (%v): %v", soaShim, err)
×
461
                                continue
×
462
                        }
463

464
                        log.Tracef("Successfully queried fallback DNS seed")
×
465
                }
466

467
                log.Tracef("Retrieved SRV records from dns seed: %v",
×
468
                        lnutils.SpewLogClosure(addrs))
×
469

×
470
                // Next, we'll need to issue an A record request for each of
×
471
                // the nodes, skipping it if nothing comes back.
×
472
                for _, nodeSrv := range addrs {
×
473
                        if uint32(len(netAddrs)) >= numAddrs {
×
474
                                break search
×
475
                        }
476

477
                        // With the SRV target obtained, we'll now perform
478
                        // another query to obtain the IP address for the
479
                        // matching bech32 encoded node key. We use the
480
                        // lndLookup function for this task.
481
                        bechNodeHost := nodeSrv.Target
×
482
                        addrs, err := d.net.LookupHost(bechNodeHost)
×
483
                        if err != nil {
×
484
                                return nil, err
×
485
                        }
×
486

487
                        if len(addrs) == 0 {
×
488
                                log.Tracef("No addresses for %v, skipping",
×
489
                                        bechNodeHost)
×
490
                                continue
×
491
                        }
492

493
                        log.Tracef("Attempting to convert: %v", bechNodeHost)
×
494

×
495
                        // If the host isn't correctly formatted, then we'll
×
496
                        // skip it.
×
497
                        if len(bechNodeHost) == 0 ||
×
498
                                !strings.Contains(bechNodeHost, ".") {
×
499

×
500
                                continue
×
501
                        }
502

503
                        // If we have a set of valid addresses, then we'll need
504
                        // to parse the public key from the original bech32
505
                        // encoded string.
506
                        bechNode := strings.Split(bechNodeHost, ".")
×
507
                        _, nodeBytes5Bits, err := bech32.Decode(bechNode[0])
×
508
                        if err != nil {
×
509
                                return nil, err
×
510
                        }
×
511

512
                        // Once we have the bech32 decoded pubkey, we'll need
513
                        // to convert the 5-bit word grouping into our regular
514
                        // 8-bit word grouping so we can convert it into a
515
                        // public key.
516
                        nodeBytes, err := bech32.ConvertBits(
×
517
                                nodeBytes5Bits, 5, 8, false,
×
518
                        )
×
519
                        if err != nil {
×
520
                                return nil, err
×
521
                        }
×
522
                        nodeKey, err := btcec.ParsePubKey(nodeBytes)
×
523
                        if err != nil {
×
524
                                return nil, err
×
525
                        }
×
526

527
                        // If we have an ignore list, and this node is in the
528
                        // ignore list, then we'll go to the next candidate.
529
                        if ignore != nil {
×
530
                                nID := autopilot.NewNodeID(nodeKey)
×
531
                                if _, ok := ignore[nID]; ok {
×
532
                                        continue
×
533
                                }
534
                        }
535

536
                        // Finally we'll convert the host:port peer to a proper
537
                        // TCP address to use within the lnwire.NetAddress. We
538
                        // don't need to use the lndResolveTCP function here
539
                        // because we already have the host:port peer.
540
                        addr := net.JoinHostPort(
×
541
                                addrs[0],
×
542
                                strconv.FormatUint(uint64(nodeSrv.Port), 10),
×
543
                        )
×
544
                        tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
×
545
                        if err != nil {
×
546
                                return nil, err
×
547
                        }
×
548

549
                        // Finally, with all the information parsed, we'll
550
                        // return this fully valid address as a connection
551
                        // attempt.
552
                        lnAddr := &lnwire.NetAddress{
×
553
                                IdentityKey: nodeKey,
×
554
                                Address:     tcpAddr,
×
555
                        }
×
556

×
557
                        log.Tracef("Obtained %v as valid reachable "+
×
558
                                "node", lnAddr)
×
559

×
560
                        netAddrs = append(netAddrs, lnAddr)
×
561
                }
562
        }
563

564
        return netAddrs, nil
×
565
}
566

567
// Name returns a human readable string which names the concrete
568
// implementation of the NetworkPeerBootstrapper.
569
func (d *DNSSeedBootstrapper) Name() string {
×
570
        return fmt.Sprintf("BOLT-0010 DNS Seed: %v", d.dnsSeeds)
×
571
}
×
572

573
// hashAccumulator is an interface that defines the methods required for
574
// a hash accumulator used to sample nodes from the channel graph.
575
type hashAccumulator interface {
576
        // rotate rotates the hash accumulator value.
577
        rotate()
578

579
        // skipNode returns true if the node with the given public key
580
        // should be skipped based on the current hash accumulator state.
581
        skipNode(pubKey route.Vertex) bool
582
}
583

584
// randomHashAccumulator is an implementation of the hashAccumulator
585
// interface that uses a random hash to sample nodes from the channel graph.
586
type randomHashAccumulator struct {
587
        hash [32]byte
588
}
589

590
// A compile time assertion to ensure that randomHashAccumulator meets the
591
// hashAccumulator interface.
592
var _ hashAccumulator = (*randomHashAccumulator)(nil)
593

594
// newRandomHashAccumulator returns a new instance of a randomHashAccumulator.
595
// This accumulator is used to randomly sample nodes from the channel graph.
596
func newRandomHashAccumulator() (*randomHashAccumulator, error) {
×
597
        var r randomHashAccumulator
×
598

×
599
        if _, err := rand.Read(r.hash[:]); err != nil {
×
600
                return nil, fmt.Errorf("unable to read random bytes: %w", err)
×
601
        }
×
602

603
        return &r, nil
×
604
}
605

606
// rotate rotates the hash accumulator by hashing the current value
607
// with itself. This ensures that we have a new random value to compare
608
// against when we sample nodes from the channel graph.
609
//
610
// NOTE: this is part of the hashAccumulator interface.
611
func (r *randomHashAccumulator) rotate() {
×
612
        r.hash = sha256.Sum256(r.hash[:])
×
613
}
×
614

615
// skipNode returns true if the node with the given public key should be skipped
616
// based on the current hash accumulator state. It will return false for the
617
// pub key if it is lexicographically less than our current accumulator value.
618
// It does so by comparing the current hash accumulator value with the passed
619
// byte slice. When comparing, we skip the first byte as it's 50/50 between 02
620
// and 03 for compressed pub keys.
621
//
622
// NOTE: this is part of the hashAccumulator interface.
623
func (r *randomHashAccumulator) skipNode(pub route.Vertex) bool {
×
624
        return bytes.Compare(r.hash[:], pub[1:]) > 0
×
625
}
×
626

627
// noOpHashAccumulator is a no-op implementation of the hashAccumulator
628
// interface. This is used when we want deterministic behavior and don't
629
// want to sample nodes randomly from the channel graph.
630
type noOpHashAccumulator struct{}
631

632
// newNoOpHashAccumulator returns a new instance of a noOpHashAccumulator.
633
func newNoOpHashAccumulator() *noOpHashAccumulator {
3✔
634
        return &noOpHashAccumulator{}
3✔
635
}
3✔
636

637
// rotate is a no-op for the noOpHashAccumulator.
638
//
639
// NOTE: this is part of the hashAccumulator interface.
640
func (*noOpHashAccumulator) rotate() {}
3✔
641

642
// skipNode always returns false, meaning that no nodes will be skipped.
643
//
644
// NOTE: this is part of the hashAccumulator interface.
645
func (*noOpHashAccumulator) skipNode(route.Vertex) bool {
3✔
646
        return false
3✔
647
}
3✔
648

649
// A compile-time assertion to ensure that noOpHashAccumulator meets the
650
// hashAccumulator interface.
651
var _ hashAccumulator = (*noOpHashAccumulator)(nil)
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc