• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16177692143

09 Jul 2025 06:49PM UTC coverage: 55.317% (-2.3%) from 57.611%
16177692143

Pull #10060

github

web-flow
Merge 4aec413e3 into 0e830da9d
Pull Request #10060: sweep: fix expected spending events being missed

9 of 25 new or added lines in 1 file covered. (36.0%)

23713 existing lines in 281 files now uncovered.

108499 of 196142 relevant lines covered (55.32%)

22331.52 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.92
/discovery/bootstrapper.go
1
package discovery
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/rand"
7
        "crypto/sha256"
8
        "errors"
9
        "fmt"
10
        prand "math/rand"
11
        "net"
12
        "strconv"
13
        "strings"
14
        "time"
15

16
        "github.com/btcsuite/btcd/btcec/v2"
17
        "github.com/btcsuite/btcd/btcutil/bech32"
18
        "github.com/lightningnetwork/lnd/autopilot"
19
        "github.com/lightningnetwork/lnd/lnutils"
20
        "github.com/lightningnetwork/lnd/lnwire"
21
        "github.com/lightningnetwork/lnd/routing/route"
22
        "github.com/lightningnetwork/lnd/tor"
23
        "github.com/miekg/dns"
24
)
25

26
func init() {
10✔
27
        prand.Seed(time.Now().Unix())
10✔
28
}
10✔
29

30
// NetworkPeerBootstrapper is an interface that represents an initial peer
31
// bootstrap mechanism. This interface is to be used to bootstrap a new peer to
32
// the connection by providing it with the pubkey+address of a set of existing
33
// peers on the network. Several bootstrap mechanisms can be implemented such
34
// as DNS, in channel graph, DHT's, etc.
35
type NetworkPeerBootstrapper interface {
36
        // SampleNodeAddrs uniformly samples a set of specified address from
37
        // the network peer bootstrapper source. The num addrs field passed in
38
        // denotes how many valid peer addresses to return. The passed set of
39
        // node nodes allows the caller to ignore a set of nodes perhaps
40
        // because they already have connections established.
41
        SampleNodeAddrs(ctx context.Context, numAddrs uint32,
42
                ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress,
43
                error)
44

45
        // Name returns a human readable string which names the concrete
46
        // implementation of the NetworkPeerBootstrapper.
47
        Name() string
48
}
49

50
// MultiSourceBootstrap attempts to utilize a set of NetworkPeerBootstrapper
51
// passed in to return the target (numAddrs) number of peer addresses that can
52
// be used to bootstrap a peer just joining the Lightning Network. Each
53
// bootstrapper will be queried successively until the target amount is met. If
54
// the ignore map is populated, then the bootstrappers will be instructed to
55
// skip those nodes.
56
func MultiSourceBootstrap(ctx context.Context,
57
        ignore map[autopilot.NodeID]struct{}, numAddrs uint32,
UNCOV
58
        bootstrappers ...NetworkPeerBootstrapper) ([]*lnwire.NetAddress, error) {
×
UNCOV
59

×
UNCOV
60
        // We'll randomly shuffle our bootstrappers before querying them in
×
UNCOV
61
        // order to avoid from querying the same bootstrapper method over and
×
UNCOV
62
        // over, as some of these might tend to provide better/worse results
×
UNCOV
63
        // than others.
×
UNCOV
64
        bootstrappers = shuffleBootstrappers(bootstrappers)
×
UNCOV
65

×
UNCOV
66
        var addrs []*lnwire.NetAddress
×
UNCOV
67
        for _, bootstrapper := range bootstrappers {
×
UNCOV
68
                // If we already have enough addresses, then we can exit early
×
UNCOV
69
                // w/o querying the additional bootstrappers.
×
UNCOV
70
                if uint32(len(addrs)) >= numAddrs {
×
71
                        break
×
72
                }
73

UNCOV
74
                log.Infof("Attempting to bootstrap with: %v", bootstrapper.Name())
×
UNCOV
75

×
UNCOV
76
                // If we still need additional addresses, then we'll compute
×
UNCOV
77
                // the number of address remaining that we need to fetch.
×
UNCOV
78
                numAddrsLeft := numAddrs - uint32(len(addrs))
×
UNCOV
79
                log.Tracef("Querying for %v addresses", numAddrsLeft)
×
UNCOV
80
                netAddrs, err := bootstrapper.SampleNodeAddrs(
×
UNCOV
81
                        ctx, numAddrsLeft, ignore,
×
UNCOV
82
                )
×
UNCOV
83
                if err != nil {
×
84
                        // If we encounter an error with a bootstrapper, then
×
85
                        // we'll continue on to the next available
×
86
                        // bootstrapper.
×
87
                        log.Errorf("Unable to query bootstrapper %v: %v",
×
88
                                bootstrapper.Name(), err)
×
89
                        continue
×
90
                }
91

UNCOV
92
                addrs = append(addrs, netAddrs...)
×
93
        }
94

UNCOV
95
        if len(addrs) == 0 {
×
96
                return nil, errors.New("no addresses found")
×
97
        }
×
98

UNCOV
99
        log.Infof("Obtained %v addrs to bootstrap network with", len(addrs))
×
UNCOV
100

×
UNCOV
101
        return addrs, nil
×
102
}
103

104
// shuffleBootstrappers shuffles the set of bootstrappers in order to avoid
105
// querying the same bootstrapper over and over. To shuffle the set of
106
// candidates, we use a version of the Fisher–Yates shuffle algorithm.
UNCOV
107
func shuffleBootstrappers(candidates []NetworkPeerBootstrapper) []NetworkPeerBootstrapper {
×
UNCOV
108
        shuffled := make([]NetworkPeerBootstrapper, len(candidates))
×
UNCOV
109
        perm := prand.Perm(len(candidates))
×
UNCOV
110

×
UNCOV
111
        for i, v := range perm {
×
UNCOV
112
                shuffled[v] = candidates[i]
×
UNCOV
113
        }
×
114

UNCOV
115
        return shuffled
×
116
}
117

118
// ChannelGraphBootstrapper is an implementation of the NetworkPeerBootstrapper
119
// which attempts to retrieve advertised peers directly from the active channel
120
// graph. This instance requires a backing autopilot.ChannelGraph instance in
121
// order to operate properly.
122
type ChannelGraphBootstrapper struct {
123
        chanGraph autopilot.ChannelGraph
124

125
        // hashAccumulator is used to determine which nodes to use for
126
        // bootstrapping. It allows us to potentially introduce some randomness
127
        // into the selection process.
128
        hashAccumulator hashAccumulator
129

130
        tried map[autopilot.NodeID]struct{}
131
}
132

133
// A compile time assertion to ensure that ChannelGraphBootstrapper meets the
134
// NetworkPeerBootstrapper interface.
135
var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
136

137
// NewGraphBootstrapper returns a new instance of a ChannelGraphBootstrapper
138
// backed by an active autopilot.ChannelGraph instance. This type of network
139
// peer bootstrapper will use the authenticated nodes within the known channel
140
// graph to bootstrap connections.
141
func NewGraphBootstrapper(cg autopilot.ChannelGraph,
UNCOV
142
        deterministicSampling bool) (NetworkPeerBootstrapper, error) {
×
UNCOV
143

×
UNCOV
144
        var (
×
UNCOV
145
                hashAccumulator hashAccumulator
×
UNCOV
146
                err             error
×
UNCOV
147
        )
×
UNCOV
148
        if deterministicSampling {
×
UNCOV
149
                // If we're using deterministic sampling, then we'll use a
×
UNCOV
150
                // no-op hash accumulator that will always return false for
×
UNCOV
151
                // skipNode.
×
UNCOV
152
                hashAccumulator = newNoOpHashAccumulator()
×
UNCOV
153
        } else {
×
154
                // Otherwise, we'll use a random hash accumulator to sample
×
155
                // nodes from the channel graph.
×
156
                hashAccumulator, err = newRandomHashAccumulator()
×
157
                if err != nil {
×
158
                        return nil, fmt.Errorf("unable to create hash "+
×
159
                                "accumulator: %w", err)
×
160
                }
×
161
        }
162

UNCOV
163
        return &ChannelGraphBootstrapper{
×
UNCOV
164
                chanGraph:       cg,
×
UNCOV
165
                tried:           make(map[autopilot.NodeID]struct{}),
×
UNCOV
166
                hashAccumulator: hashAccumulator,
×
UNCOV
167
        }, nil
×
168
}
169

170
// SampleNodeAddrs uniformly samples a set of specified address from the
171
// network peer bootstrapper source. The num addrs field passed in denotes how
172
// many valid peer addresses to return.
173
//
174
// NOTE: Part of the NetworkPeerBootstrapper interface.
175
func (c *ChannelGraphBootstrapper) SampleNodeAddrs(_ context.Context,
176
        numAddrs uint32,
UNCOV
177
        ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error) {
×
UNCOV
178

×
UNCOV
179
        ctx := context.TODO()
×
UNCOV
180

×
UNCOV
181
        // We'll merge the ignore map with our currently selected map in order
×
UNCOV
182
        // to ensure we don't return any duplicate nodes.
×
UNCOV
183
        for n := range ignore {
×
UNCOV
184
                log.Tracef("Ignored node %x for bootstrapping", n)
×
UNCOV
185
                c.tried[n] = struct{}{}
×
UNCOV
186
        }
×
187

188
        // In order to bootstrap, we'll iterate all the nodes in the channel
189
        // graph, accumulating nodes until either we go through all active
190
        // nodes, or we reach our limit. We ensure that we meet the randomly
191
        // sample constraint as we maintain an xor accumulator to ensure we
192
        // randomly sample nodes independent of the iteration of the channel
193
        // graph.
UNCOV
194
        sampleAddrs := func() ([]*lnwire.NetAddress, error) {
×
UNCOV
195
                var (
×
UNCOV
196
                        a []*lnwire.NetAddress
×
UNCOV
197

×
UNCOV
198
                        // We'll create a special error so we can return early
×
UNCOV
199
                        // and abort the transaction once we find a match.
×
UNCOV
200
                        errFound = fmt.Errorf("found node")
×
UNCOV
201
                )
×
UNCOV
202

×
UNCOV
203
                err := c.chanGraph.ForEachNode(ctx, func(_ context.Context,
×
UNCOV
204
                        node autopilot.Node) error {
×
UNCOV
205

×
UNCOV
206
                        nID := autopilot.NodeID(node.PubKey())
×
UNCOV
207
                        if _, ok := c.tried[nID]; ok {
×
UNCOV
208
                                return nil
×
UNCOV
209
                        }
×
210

211
                        // We'll select the first node we come across who's
212
                        // public key is less than our current accumulator
213
                        // value. When comparing, we skip the first byte as
214
                        // it's 50/50. If it isn't less, than then we'll
215
                        // continue forward.
UNCOV
216
                        nodePubKeyBytes := node.PubKey()
×
UNCOV
217
                        if c.hashAccumulator.skipNode(nodePubKeyBytes) {
×
218
                                return nil
×
219
                        }
×
220

UNCOV
221
                        for _, nodeAddr := range node.Addrs() {
×
UNCOV
222
                                // If we haven't yet reached our limit, then
×
UNCOV
223
                                // we'll copy over the details of this node
×
UNCOV
224
                                // into the set of addresses to be returned.
×
UNCOV
225
                                switch nodeAddr.(type) {
×
UNCOV
226
                                case *net.TCPAddr, *tor.OnionAddr:
×
227
                                default:
×
228
                                        // If this isn't a valid address
×
229
                                        // supported by the protocol, then we'll
×
230
                                        // skip this node.
×
231
                                        return nil
×
232
                                }
233

UNCOV
234
                                nodePub, err := btcec.ParsePubKey(
×
UNCOV
235
                                        nodePubKeyBytes[:],
×
UNCOV
236
                                )
×
UNCOV
237
                                if err != nil {
×
238
                                        return err
×
239
                                }
×
240

241
                                // At this point, we've found an eligible node,
242
                                // so we'll return early with our shibboleth
243
                                // error.
UNCOV
244
                                a = append(a, &lnwire.NetAddress{
×
UNCOV
245
                                        IdentityKey: nodePub,
×
UNCOV
246
                                        Address:     nodeAddr,
×
UNCOV
247
                                })
×
248
                        }
249

UNCOV
250
                        c.tried[nID] = struct{}{}
×
UNCOV
251

×
UNCOV
252
                        return errFound
×
253
                })
UNCOV
254
                if err != nil && !errors.Is(err, errFound) {
×
255
                        return nil, err
×
256
                }
×
257

UNCOV
258
                return a, nil
×
259
        }
260

261
        // We'll loop and sample new addresses from the graph source until
262
        // we've reached our target number of outbound connections or we hit 50
263
        // attempts, which ever comes first.
UNCOV
264
        var (
×
UNCOV
265
                addrs []*lnwire.NetAddress
×
UNCOV
266
                tries uint32
×
UNCOV
267
        )
×
UNCOV
268
        for tries < 30 && uint32(len(addrs)) < numAddrs {
×
UNCOV
269
                sampleAddrs, err := sampleAddrs()
×
UNCOV
270
                if err != nil {
×
271
                        return nil, err
×
272
                }
×
273

UNCOV
274
                tries++
×
UNCOV
275

×
UNCOV
276
                // We'll now rotate our hash accumulator one value forwards.
×
UNCOV
277
                c.hashAccumulator.rotate()
×
UNCOV
278

×
UNCOV
279
                // If this attempt didn't yield any addresses, then we'll exit
×
UNCOV
280
                // early.
×
UNCOV
281
                if len(sampleAddrs) == 0 {
×
282
                        continue
×
283
                }
284

UNCOV
285
                addrs = append(addrs, sampleAddrs...)
×
286
        }
287

UNCOV
288
        log.Tracef("Ending hash accumulator state: %x", c.hashAccumulator)
×
UNCOV
289

×
UNCOV
290
        return addrs, nil
×
291
}
292

293
// Name returns a human readable string which names the concrete implementation
294
// of the NetworkPeerBootstrapper.
295
//
296
// NOTE: Part of the NetworkPeerBootstrapper interface.
UNCOV
297
func (c *ChannelGraphBootstrapper) Name() string {
×
UNCOV
298
        return "Authenticated Channel Graph"
×
UNCOV
299
}
×
300

301
// DNSSeedBootstrapper as an implementation of the NetworkPeerBootstrapper
302
// interface which implements peer bootstrapping via a special DNS seed as
303
// defined in BOLT-0010. For further details concerning Lightning's current DNS
304
// boot strapping protocol, see this link:
305
//   - https://github.com/lightningnetwork/lightning-rfc/blob/master/10-dns-bootstrap.md
306
type DNSSeedBootstrapper struct {
307
        // dnsSeeds is an array of two tuples we'll use for bootstrapping. The
308
        // first item in the tuple is the primary host we'll use to attempt the
309
        // SRV lookup we require. If we're unable to receive a response over
310
        // UDP, then we'll fall back to manual TCP resolution. The second item
311
        // in the tuple is a special A record that we'll query in order to
312
        // receive the IP address of the current authoritative DNS server for
313
        // the network seed.
314
        dnsSeeds [][2]string
315
        net      tor.Net
316

317
        // timeout is the maximum amount of time a dial will wait for a connect to
318
        // complete.
319
        timeout time.Duration
320
}
321

322
// A compile time assertion to ensure that DNSSeedBootstrapper meets the
323
// NetworkPeerjBootstrapper interface.
324
var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
325

326
// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper.
327
// The set of passed seeds should point to DNS servers that properly implement
328
// Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
329
// of passed DNS seeds should come in pairs, with the second host name to be
330
// used as a fallback for manual TCP resolution in the case of an error
331
// receiving the UDP response. The second host should return a single A record
332
// with the IP address of the authoritative name server.
333
func NewDNSSeedBootstrapper(
334
        seeds [][2]string, net tor.Net,
335
        timeout time.Duration) NetworkPeerBootstrapper {
×
336

×
337
        return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net, timeout: timeout}
×
338
}
×
339

340
// fallBackSRVLookup attempts to manually query for SRV records we need to
341
// properly bootstrap. We do this by querying the special record at the "soa."
342
// sub-domain of supporting DNS servers. The returned IP address will be the IP
343
// address of the authoritative DNS server. Once we have this IP address, we'll
344
// connect manually over TCP to request the SRV record. This is necessary as
345
// the records we return are currently too large for a class of resolvers,
346
// causing them to be filtered out. The targetEndPoint is the original end
347
// point that was meant to be hit.
348
func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string,
349
        targetEndPoint string) ([]*net.SRV, error) {
×
350

×
351
        log.Tracef("Attempting to query fallback DNS seed")
×
352

×
353
        // First, we'll lookup the IP address of the server that will act as
×
354
        // our shim.
×
355
        addrs, err := d.net.LookupHost(soaShim)
×
356
        if err != nil {
×
357
                return nil, err
×
358
        }
×
359

360
        // Once we have the IP address, we'll establish a TCP connection using
361
        // port 53.
362
        dnsServer := net.JoinHostPort(addrs[0], "53")
×
363
        conn, err := d.net.Dial("tcp", dnsServer, d.timeout)
×
364
        if err != nil {
×
365
                return nil, err
×
366
        }
×
367

368
        dnsHost := fmt.Sprintf("_nodes._tcp.%v.", targetEndPoint)
×
369
        dnsConn := &dns.Conn{Conn: conn}
×
370
        defer dnsConn.Close()
×
371

×
372
        // With the connection established, we'll craft our SRV query, write
×
373
        // toe request, then wait for the server to give our response.
×
374
        msg := new(dns.Msg)
×
375
        msg.SetQuestion(dnsHost, dns.TypeSRV)
×
376
        if err := dnsConn.WriteMsg(msg); err != nil {
×
377
                return nil, err
×
378
        }
×
379
        resp, err := dnsConn.ReadMsg()
×
380
        if err != nil {
×
381
                return nil, err
×
382
        }
×
383

384
        // If the message response code was not the success code, fail.
385
        if resp.Rcode != dns.RcodeSuccess {
×
386
                return nil, fmt.Errorf("unsuccessful SRV request, "+
×
387
                        "received: %v", resp.Rcode)
×
388
        }
×
389

390
        // Retrieve the RR(s) of the Answer section, and convert to the format
391
        // that net.LookupSRV would normally return.
392
        var rrs []*net.SRV
×
393
        for _, rr := range resp.Answer {
×
394
                srv := rr.(*dns.SRV)
×
395
                rrs = append(rrs, &net.SRV{
×
396
                        Target:   srv.Target,
×
397
                        Port:     srv.Port,
×
398
                        Priority: srv.Priority,
×
399
                        Weight:   srv.Weight,
×
400
                })
×
401
        }
×
402

403
        return rrs, nil
×
404
}
405

406
// SampleNodeAddrs uniformly samples a set of specified address from the
407
// network peer bootstrapper source. The num addrs field passed in denotes how
408
// many valid peer addresses to return. The set of DNS seeds are used
409
// successively to retrieve eligible target nodes.
410
func (d *DNSSeedBootstrapper) SampleNodeAddrs(_ context.Context,
411
        numAddrs uint32,
412
        ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, error) {
×
413

×
414
        var netAddrs []*lnwire.NetAddress
×
415

×
416
        // We'll try all the registered DNS seeds, exiting early if one of them
×
417
        // gives us all the peers we need.
×
418
        //
×
419
        // TODO(roasbeef): should combine results from both
×
420
search:
×
421
        for _, dnsSeedTuple := range d.dnsSeeds {
×
422
                // We'll first query the seed with an SRV record so we can
×
423
                // obtain a random sample of the encoded public keys of nodes.
×
424
                // We use the lndLookupSRV function for this task.
×
425
                primarySeed := dnsSeedTuple[0]
×
426
                _, addrs, err := d.net.LookupSRV(
×
427
                        "nodes", "tcp", primarySeed, d.timeout,
×
428
                )
×
429
                if err != nil {
×
430
                        log.Tracef("Unable to lookup SRV records via "+
×
431
                                "primary seed (%v): %v", primarySeed, err)
×
432

×
433
                        log.Trace("Falling back to secondary")
×
434

×
435
                        // If the host of the secondary seed is blank, then
×
436
                        // we'll bail here as we can't proceed.
×
437
                        if dnsSeedTuple[1] == "" {
×
438
                                log.Tracef("DNS seed %v has no secondary, "+
×
439
                                        "skipping fallback", primarySeed)
×
440
                                continue
×
441
                        }
442

443
                        // If we get an error when trying to query via the
444
                        // primary seed, we'll fallback to the secondary seed
445
                        // before concluding failure.
446
                        soaShim := dnsSeedTuple[1]
×
447
                        addrs, err = d.fallBackSRVLookup(
×
448
                                soaShim, primarySeed,
×
449
                        )
×
450
                        if err != nil {
×
451
                                log.Tracef("Unable to query fall "+
×
452
                                        "back dns seed (%v): %v", soaShim, err)
×
453
                                continue
×
454
                        }
455

456
                        log.Tracef("Successfully queried fallback DNS seed")
×
457
                }
458

459
                log.Tracef("Retrieved SRV records from dns seed: %v",
×
460
                        lnutils.SpewLogClosure(addrs))
×
461

×
462
                // Next, we'll need to issue an A record request for each of
×
463
                // the nodes, skipping it if nothing comes back.
×
464
                for _, nodeSrv := range addrs {
×
465
                        if uint32(len(netAddrs)) >= numAddrs {
×
466
                                break search
×
467
                        }
468

469
                        // With the SRV target obtained, we'll now perform
470
                        // another query to obtain the IP address for the
471
                        // matching bech32 encoded node key. We use the
472
                        // lndLookup function for this task.
473
                        bechNodeHost := nodeSrv.Target
×
474
                        addrs, err := d.net.LookupHost(bechNodeHost)
×
475
                        if err != nil {
×
476
                                return nil, err
×
477
                        }
×
478

479
                        if len(addrs) == 0 {
×
480
                                log.Tracef("No addresses for %v, skipping",
×
481
                                        bechNodeHost)
×
482
                                continue
×
483
                        }
484

485
                        log.Tracef("Attempting to convert: %v", bechNodeHost)
×
486

×
487
                        // If the host isn't correctly formatted, then we'll
×
488
                        // skip it.
×
489
                        if len(bechNodeHost) == 0 ||
×
490
                                !strings.Contains(bechNodeHost, ".") {
×
491

×
492
                                continue
×
493
                        }
494

495
                        // If we have a set of valid addresses, then we'll need
496
                        // to parse the public key from the original bech32
497
                        // encoded string.
498
                        bechNode := strings.Split(bechNodeHost, ".")
×
499
                        _, nodeBytes5Bits, err := bech32.Decode(bechNode[0])
×
500
                        if err != nil {
×
501
                                return nil, err
×
502
                        }
×
503

504
                        // Once we have the bech32 decoded pubkey, we'll need
505
                        // to convert the 5-bit word grouping into our regular
506
                        // 8-bit word grouping so we can convert it into a
507
                        // public key.
508
                        nodeBytes, err := bech32.ConvertBits(
×
509
                                nodeBytes5Bits, 5, 8, false,
×
510
                        )
×
511
                        if err != nil {
×
512
                                return nil, err
×
513
                        }
×
514
                        nodeKey, err := btcec.ParsePubKey(nodeBytes)
×
515
                        if err != nil {
×
516
                                return nil, err
×
517
                        }
×
518

519
                        // If we have an ignore list, and this node is in the
520
                        // ignore list, then we'll go to the next candidate.
521
                        if ignore != nil {
×
522
                                nID := autopilot.NewNodeID(nodeKey)
×
523
                                if _, ok := ignore[nID]; ok {
×
524
                                        continue
×
525
                                }
526
                        }
527

528
                        // Finally we'll convert the host:port peer to a proper
529
                        // TCP address to use within the lnwire.NetAddress. We
530
                        // don't need to use the lndResolveTCP function here
531
                        // because we already have the host:port peer.
532
                        addr := net.JoinHostPort(
×
533
                                addrs[0],
×
534
                                strconv.FormatUint(uint64(nodeSrv.Port), 10),
×
535
                        )
×
536
                        tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
×
537
                        if err != nil {
×
538
                                return nil, err
×
539
                        }
×
540

541
                        // Finally, with all the information parsed, we'll
542
                        // return this fully valid address as a connection
543
                        // attempt.
544
                        lnAddr := &lnwire.NetAddress{
×
545
                                IdentityKey: nodeKey,
×
546
                                Address:     tcpAddr,
×
547
                        }
×
548

×
549
                        log.Tracef("Obtained %v as valid reachable "+
×
550
                                "node", lnAddr)
×
551

×
552
                        netAddrs = append(netAddrs, lnAddr)
×
553
                }
554
        }
555

556
        return netAddrs, nil
×
557
}
558

559
// Name returns a human readable string which names the concrete
560
// implementation of the NetworkPeerBootstrapper.
561
func (d *DNSSeedBootstrapper) Name() string {
×
562
        return fmt.Sprintf("BOLT-0010 DNS Seed: %v", d.dnsSeeds)
×
563
}
×
564

565
// hashAccumulator is an interface that defines the methods required for
566
// a hash accumulator used to sample nodes from the channel graph.
567
type hashAccumulator interface {
568
        // rotate rotates the hash accumulator value.
569
        rotate()
570

571
        // skipNode returns true if the node with the given public key
572
        // should be skipped based on the current hash accumulator state.
573
        skipNode(pubKey route.Vertex) bool
574
}
575

576
// randomHashAccumulator is an implementation of the hashAccumulator
577
// interface that uses a random hash to sample nodes from the channel graph.
578
type randomHashAccumulator struct {
579
        hash [32]byte
580
}
581

582
// A compile time assertion to ensure that randomHashAccumulator meets the
583
// hashAccumulator interface.
584
var _ hashAccumulator = (*randomHashAccumulator)(nil)
585

586
// newRandomHashAccumulator returns a new instance of a randomHashAccumulator.
587
// This accumulator is used to randomly sample nodes from the channel graph.
588
func newRandomHashAccumulator() (*randomHashAccumulator, error) {
×
589
        var r randomHashAccumulator
×
590

×
591
        if _, err := rand.Read(r.hash[:]); err != nil {
×
592
                return nil, fmt.Errorf("unable to read random bytes: %w", err)
×
593
        }
×
594

595
        return &r, nil
×
596
}
597

598
// rotate rotates the hash accumulator by hashing the current value
599
// with itself. This ensures that we have a new random value to compare
600
// against when we sample nodes from the channel graph.
601
//
602
// NOTE: this is part of the hashAccumulator interface.
603
func (r *randomHashAccumulator) rotate() {
×
604
        r.hash = sha256.Sum256(r.hash[:])
×
605
}
×
606

607
// skipNode returns true if the node with the given public key should be skipped
608
// based on the current hash accumulator state. It will return false for the
609
// pub key if it is lexicographically less than our current accumulator value.
610
// It does so by comparing the current hash accumulator value with the passed
611
// byte slice. When comparing, we skip the first byte as it's 50/50 between 02
612
// and 03 for compressed pub keys.
613
//
614
// NOTE: this is part of the hashAccumulator interface.
615
func (r *randomHashAccumulator) skipNode(pub route.Vertex) bool {
×
616
        return bytes.Compare(r.hash[:], pub[1:]) > 0
×
617
}
×
618

619
// noOpHashAccumulator is a no-op implementation of the hashAccumulator
620
// interface. This is used when we want deterministic behavior and don't
621
// want to sample nodes randomly from the channel graph.
622
type noOpHashAccumulator struct{}
623

624
// newNoOpHashAccumulator returns a new instance of a noOpHashAccumulator.
UNCOV
625
func newNoOpHashAccumulator() *noOpHashAccumulator {
×
UNCOV
626
        return &noOpHashAccumulator{}
×
UNCOV
627
}
×
628

629
// rotate is a no-op for the noOpHashAccumulator.
630
//
631
// NOTE: this is part of the hashAccumulator interface.
UNCOV
632
func (*noOpHashAccumulator) rotate() {}
×
633

634
// skipNode always returns false, meaning that no nodes will be skipped.
635
//
636
// NOTE: this is part of the hashAccumulator interface.
UNCOV
637
func (*noOpHashAccumulator) skipNode(route.Vertex) bool {
×
UNCOV
638
        return false
×
UNCOV
639
}
×
640

641
// A compile-time assertion to ensure that noOpHashAccumulator meets the
642
// hashAccumulator interface.
643
var _ hashAccumulator = (*noOpHashAccumulator)(nil)
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc