• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13035292482

29 Jan 2025 03:59PM UTC coverage: 49.3% (-9.5%) from 58.777%
13035292482

Pull #9456

github

mohamedawnallah
docs: update release-notes-0.19.0.md

In this commit, we warn users about the removal
of RPCs `SendToRoute`, `SendToRouteSync`, `SendPayment`,
and `SendPaymentSync` in the next release 0.20.
Pull Request #9456: lnrpc+docs: deprecate warning `SendToRoute`, `SendToRouteSync`, `SendPayment`, and `SendPaymentSync` in Release 0.19

100634 of 204126 relevant lines covered (49.3%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/channeldb/migration30/migration.go
1
package migration30
2

3
import (
4
        "bytes"
5
        "encoding/binary"
6
        "fmt"
7
        "math"
8
        "sync"
9

10
        mig24 "github.com/lightningnetwork/lnd/channeldb/migration24"
11
        mig26 "github.com/lightningnetwork/lnd/channeldb/migration26"
12
        mig "github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
13
        "github.com/lightningnetwork/lnd/kvdb"
14
)
15

16
// recordsPerTx specifies the number of records to be migrated in each database
17
// transaction. In the worst case, each old revocation log is 28,057 bytes.
18
// 20,000 records would consume 0.56 GB of ram, which is feasible for a modern
19
// machine.
20
//
21
// NOTE: we could've used more ram but it doesn't help with the speed of the
22
// migration since the most of the CPU time is used for calculating the output
23
// indexes.
24
const recordsPerTx = 20_000
25

26
// MigrateRevLogConfig is an interface that defines the config that should be
27
// passed to the MigrateRevocationLog function.
28
type MigrateRevLogConfig interface {
29
        // GetNoAmountData returns true if the amount data of revoked commitment
30
        // transactions should not be stored in the revocation log.
31
        GetNoAmountData() bool
32
}
33

34
// MigrateRevLogConfigImpl implements the MigrationRevLogConfig interface.
35
type MigrateRevLogConfigImpl struct {
36
        // NoAmountData if set to true will result in the amount data of revoked
37
        // commitment transactions not being stored in the revocation log.
38
        NoAmountData bool
39
}
40

41
// GetNoAmountData returns true if the amount data of revoked commitment
42
// transactions should not be stored in the revocation log.
43
func (c *MigrateRevLogConfigImpl) GetNoAmountData() bool {
×
44
        return c.NoAmountData
×
45
}
×
46

47
// MigrateRevocationLog migrates the old revocation logs into the newer format
48
// and deletes them once finished, with the deletion only happens once ALL the
49
// old logs have been migrates.
50
func MigrateRevocationLog(db kvdb.Backend, cfg MigrateRevLogConfig) error {
×
51
        log.Infof("Migrating revocation logs, might take a while...")
×
52

×
53
        var (
×
54
                err error
×
55

×
56
                // finished is used to exit the for loop.
×
57
                finished bool
×
58

×
59
                // total is the number of total records.
×
60
                total uint64
×
61

×
62
                // migrated is the number of already migrated records.
×
63
                migrated uint64
×
64
        )
×
65

×
66
        // First of all, read the stats of the revocation logs.
×
67
        total, migrated, err = logMigrationStat(db)
×
68
        if err != nil {
×
69
                return err
×
70
        }
×
71
        log.Infof("Total logs=%d, migrated=%d", total, migrated)
×
72

×
73
        // Exit early if the old logs have already been migrated and deleted.
×
74
        if total == 0 {
×
75
                log.Info("Migration already finished!")
×
76
                return nil
×
77
        }
×
78

79
        for {
×
80
                if finished {
×
81
                        log.Infof("Migrating old revocation logs finished, " +
×
82
                                "now checking the migration results...")
×
83
                        break
×
84
                }
85

86
                // Process the migration.
87
                err = kvdb.Update(db, func(tx kvdb.RwTx) error {
×
88
                        finished, err = processMigration(tx, cfg)
×
89
                        if err != nil {
×
90
                                return err
×
91
                        }
×
92
                        return nil
×
93
                }, func() {})
×
94
                if err != nil {
×
95
                        return err
×
96
                }
×
97

98
                // Each time we finished the above process, we'd read the stats
99
                // again to understand the current progress.
100
                total, migrated, err = logMigrationStat(db)
×
101
                if err != nil {
×
102
                        return err
×
103
                }
×
104

105
                // Calculate and log the progress if the progress is less than
106
                // one.
107
                progress := float64(migrated) / float64(total) * 100
×
108
                if progress >= 100 {
×
109
                        continue
×
110
                }
111

112
                log.Infof("Migration progress: %.3f%%, still have: %d",
×
113
                        progress, total-migrated)
×
114
        }
115

116
        // Before we can safety delete the old buckets, we perform a check to
117
        // make sure the logs are migrated as expected.
118
        err = kvdb.Update(db, validateMigration, func() {})
×
119
        if err != nil {
×
120
                return fmt.Errorf("validate migration failed: %w", err)
×
121
        }
×
122

123
        log.Info("Migration check passed, now deleting the old logs...")
×
124

×
125
        // Once the migration completes, we can now safety delete the old
×
126
        // revocation logs.
×
127
        if err := deleteOldBuckets(db); err != nil {
×
128
                return fmt.Errorf("deleteOldBuckets err: %w", err)
×
129
        }
×
130

131
        log.Info("Old revocation log buckets removed!")
×
132
        return nil
×
133
}
134

135
// processMigration finds the next un-migrated revocation logs, reads a max
136
// number of `recordsPerTx` records, converts them into the new revocation logs
137
// and save them to disk.
138
func processMigration(tx kvdb.RwTx, cfg MigrateRevLogConfig) (bool, error) {
×
139
        openChanBucket := tx.ReadWriteBucket(openChannelBucket)
×
140

×
141
        // If no bucket is found, we can exit early.
×
142
        if openChanBucket == nil {
×
143
                return false, fmt.Errorf("root bucket not found")
×
144
        }
×
145

146
        // Locate the next migration height.
147
        locator, err := locateNextUpdateNum(openChanBucket)
×
148
        if err != nil {
×
149
                return false, fmt.Errorf("locator got error: %w", err)
×
150
        }
×
151

152
        // If the returned locator is nil, we've done migrating the logs.
153
        if locator == nil {
×
154
                return true, nil
×
155
        }
×
156

157
        // Read a list of old revocation logs.
158
        entryMap, err := readOldRevocationLogs(openChanBucket, locator, cfg)
×
159
        if err != nil {
×
160
                return false, fmt.Errorf("read old logs err: %w", err)
×
161
        }
×
162

163
        // Migrate the revocation logs.
164
        return false, writeRevocationLogs(openChanBucket, entryMap)
×
165
}
166

167
// deleteOldBuckets iterates all the channel buckets and deletes the old
168
// revocation buckets.
169
func deleteOldBuckets(db kvdb.Backend) error {
×
170
        // locators records all the chan buckets found in the database.
×
171
        var locators []*updateLocator
×
172

×
173
        // reader is a helper closure that saves the locator found. Each
×
174
        // locator is relatively small(33+32+36+8=109 bytes), assuming 1 GB of
×
175
        // ram we can fit roughly 10 million records. Since each record
×
176
        // corresponds to a channel, we should have more than enough memory to
×
177
        // read them all.
×
178
        reader := func(_ kvdb.RwBucket, l *updateLocator) error { // nolint:unparam
×
179
                locators = append(locators, l)
×
180
                return nil
×
181
        }
×
182

183
        // remover is a helper closure that removes the old revocation log
184
        // bucket under the specified chan bucket by the given locator.
185
        remover := func(rootBucket kvdb.RwBucket, l *updateLocator) error {
×
186
                chanBucket, err := l.locateChanBucket(rootBucket)
×
187
                if err != nil {
×
188
                        return err
×
189
                }
×
190

191
                return chanBucket.DeleteNestedBucket(
×
192
                        revocationLogBucketDeprecated,
×
193
                )
×
194
        }
195

196
        // Perform the deletion in one db transaction. This should not cause
197
        // any memory issue as the deletion doesn't load any data from the
198
        // buckets.
199
        return kvdb.Update(db, func(tx kvdb.RwTx) error {
×
200
                openChanBucket := tx.ReadWriteBucket(openChannelBucket)
×
201

×
202
                // Exit early if there's no bucket.
×
203
                if openChanBucket == nil {
×
204
                        return nil
×
205
                }
×
206

207
                // Iterate the buckets to find all the locators.
208
                err := iterateBuckets(openChanBucket, nil, reader)
×
209
                if err != nil {
×
210
                        return err
×
211
                }
×
212

213
                // Iterate the locators and delete all the old revocation log
214
                // buckets.
215
                for _, l := range locators {
×
216
                        err := remover(openChanBucket, l)
×
217
                        // If the bucket doesn't exist, we can exit safety.
×
218
                        if err != nil && err != kvdb.ErrBucketNotFound {
×
219
                                return err
×
220
                        }
×
221
                }
222

223
                return nil
×
224
        }, func() {})
×
225
}
226

227
// writeRevocationLogs unwraps the entryMap and writes the new revocation logs.
228
func writeRevocationLogs(openChanBucket kvdb.RwBucket,
229
        entryMap logEntries) error {
×
230

×
231
        for locator, logs := range entryMap {
×
232
                // Find the channel bucket.
×
233
                chanBucket, err := locator.locateChanBucket(openChanBucket)
×
234
                if err != nil {
×
235
                        return fmt.Errorf("locateChanBucket err: %w", err)
×
236
                }
×
237

238
                // Create the new log bucket.
239
                logBucket, err := chanBucket.CreateBucketIfNotExists(
×
240
                        revocationLogBucket,
×
241
                )
×
242
                if err != nil {
×
243
                        return fmt.Errorf("create log bucket err: %w", err)
×
244
                }
×
245

246
                // Write the new logs.
247
                for _, entry := range logs {
×
248
                        var b bytes.Buffer
×
249
                        err := serializeRevocationLog(&b, entry.log)
×
250
                        if err != nil {
×
251
                                return err
×
252
                        }
×
253

254
                        logEntrykey := mig24.MakeLogKey(entry.commitHeight)
×
255
                        err = logBucket.Put(logEntrykey[:], b.Bytes())
×
256
                        if err != nil {
×
257
                                return fmt.Errorf("putRevocationLog err: %w",
×
258
                                        err)
×
259
                        }
×
260
                }
261
        }
262

263
        return nil
×
264
}
265

266
// logMigrationStat reads the buckets to provide stats over current migration
267
// progress. The returned values are the numbers of total records and already
268
// migrated records.
269
func logMigrationStat(db kvdb.Backend) (uint64, uint64, error) {
×
270
        var (
×
271
                err error
×
272

×
273
                // total is the number of total records.
×
274
                total uint64
×
275

×
276
                // unmigrated is the number of unmigrated records.
×
277
                unmigrated uint64
×
278
        )
×
279

×
280
        err = kvdb.Update(db, func(tx kvdb.RwTx) error {
×
281
                total, unmigrated, err = fetchLogStats(tx)
×
282
                return err
×
283
        }, func() {})
×
284

285
        log.Debugf("Total logs=%d, unmigrated=%d", total, unmigrated)
×
286
        return total, total - unmigrated, err
×
287
}
288

289
// fetchLogStats iterates all the chan buckets to provide stats about the logs.
290
// The returned values are num of total records, and num of un-migrated
291
// records.
292
func fetchLogStats(tx kvdb.RwTx) (uint64, uint64, error) {
×
293
        var (
×
294
                total           uint64
×
295
                totalUnmigrated uint64
×
296
        )
×
297

×
298
        openChanBucket := tx.ReadWriteBucket(openChannelBucket)
×
299

×
300
        // If no bucket is found, we can exit early.
×
301
        if openChanBucket == nil {
×
302
                return 0, 0, fmt.Errorf("root bucket not found")
×
303
        }
×
304

305
        // counter is a helper closure used to count the number of records
306
        // based on the given bucket.
307
        counter := func(chanBucket kvdb.RwBucket, bucket []byte) uint64 {
×
308
                // Read the sub-bucket level 4.
×
309
                logBucket := chanBucket.NestedReadBucket(bucket)
×
310

×
311
                // Exit early if we don't have the bucket.
×
312
                if logBucket == nil {
×
313
                        return 0
×
314
                }
×
315

316
                // Jump to the end of the cursor.
317
                key, _ := logBucket.ReadCursor().Last()
×
318

×
319
                // Since the CommitHeight is a zero-based monotonically
×
320
                // increased index, its value plus one reflects the total
×
321
                // records under this chan bucket.
×
322
                lastHeight := binary.BigEndian.Uint64(key) + 1
×
323

×
324
                return lastHeight
×
325
        }
326

327
        // countTotal is a callback function used to count the total number of
328
        // records.
329
        countTotal := func(chanBucket kvdb.RwBucket, l *updateLocator) error {
×
330
                total += counter(chanBucket, revocationLogBucketDeprecated)
×
331
                return nil
×
332
        }
×
333

334
        // countUnmigrated is a callback function used to count the total
335
        // number of un-migrated records.
336
        countUnmigrated := func(chanBucket kvdb.RwBucket,
×
337
                l *updateLocator) error {
×
338

×
339
                totalUnmigrated += counter(
×
340
                        chanBucket, revocationLogBucketDeprecated,
×
341
                )
×
342
                return nil
×
343
        }
×
344

345
        // Locate the next migration height.
346
        locator, err := locateNextUpdateNum(openChanBucket)
×
347
        if err != nil {
×
348
                return 0, 0, fmt.Errorf("locator got error: %w", err)
×
349
        }
×
350

351
        // If the returned locator is not nil, we still have un-migrated
352
        // records so we need to count them. Otherwise we've done migrating the
353
        // logs.
354
        if locator != nil {
×
355
                err = iterateBuckets(openChanBucket, locator, countUnmigrated)
×
356
                if err != nil {
×
357
                        return 0, 0, err
×
358
                }
×
359
        }
360

361
        // Count the total number of records by supplying a nil locator.
362
        err = iterateBuckets(openChanBucket, nil, countTotal)
×
363
        if err != nil {
×
364
                return 0, 0, err
×
365
        }
×
366

367
        return total, totalUnmigrated, err
×
368
}
369

370
// logEntry houses the info needed to write a new revocation log.
371
type logEntry struct {
372
        log          *RevocationLog
373
        commitHeight uint64
374
        ourIndex     uint32
375
        theirIndex   uint32
376
        locator      *updateLocator
377
}
378

379
// logEntries maps a bucket locator to a list of entries under that bucket.
380
type logEntries map[*updateLocator][]*logEntry
381

382
// result is made of two channels that's used to send back the constructed new
383
// revocation log or an error.
384
type result struct {
385
        newLog  chan *logEntry
386
        errChan chan error
387
}
388

389
// readOldRevocationLogs finds a list of old revocation logs and converts them
390
// into the new revocation logs.
391
func readOldRevocationLogs(openChanBucket kvdb.RwBucket,
392
        locator *updateLocator, cfg MigrateRevLogConfig) (logEntries, error) {
×
393

×
394
        entries := make(logEntries)
×
395
        results := make([]*result, 0)
×
396

×
397
        var wg sync.WaitGroup
×
398

×
399
        // collectLogs is a helper closure that reads all newly created
×
400
        // revocation logs sent over the result channels.
×
401
        //
×
402
        // NOTE: the order of the logs cannot be guaranteed, which is fine as
×
403
        // boltdb will take care of the orders when saving them.
×
404
        collectLogs := func() error {
×
405
                wg.Wait()
×
406

×
407
                for _, r := range results {
×
408
                        select {
×
409
                        case entry := <-r.newLog:
×
410
                                entries[entry.locator] = append(
×
411
                                        entries[entry.locator], entry,
×
412
                                )
×
413

414
                        case err := <-r.errChan:
×
415
                                return err
×
416
                        }
417
                }
418

419
                return nil
×
420
        }
421

422
        // createLog is a helper closure that constructs a new revocation log.
423
        //
424
        // NOTE: used as a goroutine.
425
        createLog := func(chanState *mig26.OpenChannel,
×
426
                c mig.ChannelCommitment, l *updateLocator, r *result) {
×
427

×
428
                defer wg.Done()
×
429

×
430
                // Find the output indexes.
×
431
                ourIndex, theirIndex, err := findOutputIndexes(chanState, &c)
×
432
                if err != nil {
×
433
                        r.errChan <- err
×
434
                }
×
435

436
                // Convert the old logs into the new logs. We do this early in
437
                // the read tx so the old large revocation log can be set to
438
                // nil here so save us some memory space.
439
                newLog, err := convertRevocationLog(
×
440
                        &c, ourIndex, theirIndex, cfg.GetNoAmountData(),
×
441
                )
×
442
                if err != nil {
×
443
                        r.errChan <- err
×
444
                }
×
445
                // Create the entry that will be used to create the new log.
446
                entry := &logEntry{
×
447
                        log:          newLog,
×
448
                        commitHeight: c.CommitHeight,
×
449
                        ourIndex:     ourIndex,
×
450
                        theirIndex:   theirIndex,
×
451
                        locator:      l,
×
452
                }
×
453

×
454
                r.newLog <- entry
×
455
        }
456

457
        // innerCb is the stepping function used when iterating the old log
458
        // bucket.
459
        innerCb := func(chanState *mig26.OpenChannel, l *updateLocator,
×
460
                _, v []byte) error {
×
461

×
462
                reader := bytes.NewReader(v)
×
463
                c, err := mig.DeserializeChanCommit(reader)
×
464
                if err != nil {
×
465
                        return err
×
466
                }
×
467

468
                r := &result{
×
469
                        newLog:  make(chan *logEntry, 1),
×
470
                        errChan: make(chan error, 1),
×
471
                }
×
472
                results = append(results, r)
×
473

×
474
                // We perform the log creation in a goroutine as it takes some
×
475
                // time to compute and find output indexes.
×
476
                wg.Add(1)
×
477
                go createLog(chanState, c, l, r)
×
478

×
479
                // Check the records read so far and signals exit when we've
×
480
                // reached our memory cap.
×
481
                if len(results) >= recordsPerTx {
×
482
                        return errExit
×
483
                }
×
484

485
                return nil
×
486
        }
487

488
        // cb is the callback function to be used when iterating the buckets.
489
        cb := func(chanBucket kvdb.RwBucket, l *updateLocator) error {
×
490
                // Read the open channel.
×
491
                c := &mig26.OpenChannel{}
×
492
                err := mig26.FetchChanInfo(chanBucket, c, false)
×
493
                if err != nil {
×
494
                        return fmt.Errorf("unable to fetch chan info: %w", err)
×
495
                }
×
496

497
                err = fetchChanRevocationState(chanBucket, c)
×
498
                if err != nil {
×
499
                        return fmt.Errorf("unable to fetch revocation "+
×
500
                                "state: %v", err)
×
501
                }
×
502

503
                // Read the sub-bucket level 4.
504
                logBucket := chanBucket.NestedReadBucket(
×
505
                        revocationLogBucketDeprecated,
×
506
                )
×
507
                // Exit early if we don't have the old bucket.
×
508
                if logBucket == nil {
×
509
                        return nil
×
510
                }
×
511

512
                // Init the map key when needed.
513
                _, ok := entries[l]
×
514
                if !ok {
×
515
                        entries[l] = make([]*logEntry, 0, recordsPerTx)
×
516
                }
×
517

518
                return iterator(
×
519
                        logBucket, locator.nextHeight,
×
520
                        func(k, v []byte) error {
×
521
                                // Reset the nextHeight for following chan
×
522
                                // buckets.
×
523
                                locator.nextHeight = nil
×
524
                                return innerCb(c, l, k, v)
×
525
                        },
×
526
                )
527
        }
528

529
        err := iterateBuckets(openChanBucket, locator, cb)
×
530
        // If there's an error and it's not exit signal, we won't collect the
×
531
        // logs from the result channels.
×
532
        if err != nil && err != errExit {
×
533
                return nil, err
×
534
        }
×
535

536
        // Otherwise, collect the logs.
537
        err = collectLogs()
×
538

×
539
        return entries, err
×
540
}
541

542
// convertRevocationLog uses the fields `CommitTx` and `Htlcs` from a
543
// ChannelCommitment to construct a revocation log entry.
544
func convertRevocationLog(commit *mig.ChannelCommitment,
545
        ourOutputIndex, theirOutputIndex uint32,
546
        noAmtData bool) (*RevocationLog, error) {
×
547

×
548
        // Sanity check that the output indexes can be safely converted.
×
549
        if ourOutputIndex > math.MaxUint16 {
×
550
                return nil, ErrOutputIndexTooBig
×
551
        }
×
552
        if theirOutputIndex > math.MaxUint16 {
×
553
                return nil, ErrOutputIndexTooBig
×
554
        }
×
555

556
        rl := &RevocationLog{
×
557
                OurOutputIndex:   uint16(ourOutputIndex),
×
558
                TheirOutputIndex: uint16(theirOutputIndex),
×
559
                CommitTxHash:     commit.CommitTx.TxHash(),
×
560
                HTLCEntries:      make([]*HTLCEntry, 0, len(commit.Htlcs)),
×
561
        }
×
562

×
563
        if !noAmtData {
×
564
                rl.TheirBalance = &commit.RemoteBalance
×
565
                rl.OurBalance = &commit.LocalBalance
×
566
        }
×
567

568
        for _, htlc := range commit.Htlcs {
×
569
                // Skip dust HTLCs.
×
570
                if htlc.OutputIndex < 0 {
×
571
                        continue
×
572
                }
573

574
                // Sanity check that the output indexes can be safely
575
                // converted.
576
                if htlc.OutputIndex > math.MaxUint16 {
×
577
                        return nil, ErrOutputIndexTooBig
×
578
                }
×
579

580
                entry := &HTLCEntry{
×
581
                        RHash:         htlc.RHash,
×
582
                        RefundTimeout: htlc.RefundTimeout,
×
583
                        Incoming:      htlc.Incoming,
×
584
                        OutputIndex:   uint16(htlc.OutputIndex),
×
585
                        Amt:           htlc.Amt.ToSatoshis(),
×
586
                }
×
587
                rl.HTLCEntries = append(rl.HTLCEntries, entry)
×
588
        }
589

590
        return rl, nil
×
591
}
592

593
// validateMigration checks that the data saved in the new buckets match those
594
// saved in the old buckets. It does so by checking the last keys saved in both
595
// buckets can match, given the assumption that the `CommitHeight` is
596
// monotonically increased value so the last key represents the total number of
597
// records saved.
598
func validateMigration(tx kvdb.RwTx) error {
×
599
        openChanBucket := tx.ReadWriteBucket(openChannelBucket)
×
600

×
601
        // If no bucket is found, we can exit early.
×
602
        if openChanBucket == nil {
×
603
                return nil
×
604
        }
×
605

606
        // exitWithErr is a helper closure that prepends an error message with
607
        // the locator info.
608
        exitWithErr := func(l *updateLocator, msg string) error {
×
609
                return fmt.Errorf("unmatched records found under <nodePub=%x"+
×
610
                        ", chainHash=%x, fundingOutpoint=%x>: %v", l.nodePub,
×
611
                        l.chainHash, l.fundingOutpoint, msg)
×
612
        }
×
613

614
        // cb is the callback function to be used when iterating the buckets.
615
        cb := func(chanBucket kvdb.RwBucket, l *updateLocator) error {
×
616
                // Read both the old and new revocation log buckets.
×
617
                oldBucket := chanBucket.NestedReadBucket(
×
618
                        revocationLogBucketDeprecated,
×
619
                )
×
620
                newBucket := chanBucket.NestedReadBucket(revocationLogBucket)
×
621

×
622
                // Exit early if the old bucket is nil.
×
623
                //
×
624
                // NOTE: the new bucket may not be nil here as new logs might
×
625
                // have been created using lnd@v0.15.0.
×
626
                if oldBucket == nil {
×
627
                        return nil
×
628
                }
×
629

630
                // Return an error if the expected new bucket cannot be found.
631
                if newBucket == nil {
×
632
                        return exitWithErr(l, "expected new bucket")
×
633
                }
×
634

635
                // Acquire the cursors.
636
                oldCursor := oldBucket.ReadCursor()
×
637
                newCursor := newBucket.ReadCursor()
×
638

×
639
                // Jump to the end of the cursors to do a quick check.
×
640
                newKey, _ := oldCursor.Last()
×
641
                oldKey, _ := newCursor.Last()
×
642

×
643
                // We expected the CommitHeights to be matched for nodes prior
×
644
                // to v0.15.0.
×
645
                if bytes.Equal(newKey, oldKey) {
×
646
                        return nil
×
647
                }
×
648

649
                // If the keys do not match, it's likely the node is running
650
                // v0.15.0 and have new logs created. In this case, we will
651
                // validate that every record in the old bucket can be found in
652
                // the new bucket.
653
                oldKey, _ = oldCursor.First()
×
654

×
655
                for {
×
656
                        // Try to locate the old key in the new bucket and we
×
657
                        // expect it to be found.
×
658
                        newKey, _ := newCursor.Seek(oldKey)
×
659

×
660
                        // If the old key is not found in the new bucket,
×
661
                        // return an error.
×
662
                        //
×
663
                        // NOTE: because Seek will return the next key when the
×
664
                        // passed key cannot be found, we need to compare the
×
665
                        // keys to deicde whether the old key is found or not.
×
666
                        if !bytes.Equal(newKey, oldKey) {
×
667
                                errMsg := fmt.Sprintf("old bucket has "+
×
668
                                        "CommitHeight=%v cannot be found in "+
×
669
                                        "new bucket", oldKey)
×
670
                                return exitWithErr(l, errMsg)
×
671
                        }
×
672

673
                        // Otherwise, keep iterating the old bucket.
674
                        oldKey, _ = oldCursor.Next()
×
675

×
676
                        // If we've done iterating, all keys have been matched
×
677
                        // and we can safely exit.
×
678
                        if oldKey == nil {
×
679
                                return nil
×
680
                        }
×
681
                }
682
        }
683

684
        return iterateBuckets(openChanBucket, nil, cb)
×
685
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc