• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16073799408

04 Jul 2025 12:28PM UTC coverage: 67.44%. First build
16073799408

Pull #10037

github

web-flow
Merge 47376426c into ab4a75e5b
Pull Request #10037: [graph mig 2]: graph/db: migrate graph channels and policies from kvdb to SQL

10 of 222 new or added lines in 2 files covered. (4.5%)

135152 of 200404 relevant lines covered (67.44%)

21828.25 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/graph/db/sql_migration.go
1
package graphdb
2

3
import (
4
        "context"
5
        "errors"
6
        "fmt"
7
        "reflect"
8
        "sort"
9
        "time"
10

11
        "github.com/btcsuite/btcd/chaincfg/chainhash"
12
        "github.com/davecgh/go-spew/spew"
13
        "github.com/lightningnetwork/lnd/graph/db/models"
14
        "github.com/lightningnetwork/lnd/kvdb"
15
        "github.com/lightningnetwork/lnd/sqldb/sqlc"
16
        "github.com/pmezard/go-difflib/difflib"
17
)
18

19
// ErrMigrationMismatch is returned when a migrated graph record does not match
20
// the original record.
21
var ErrMigrationMismatch = fmt.Errorf("migrated graph record does not match " +
22
        "original record")
23

24
// MigrateGraphToSQL migrates the graph store from a KV backend to a SQL
25
// backend.
26
//
27
// NOTE: this is currently not called from any code path. It is called via tests
28
// only for now and will be called from the main lnd binary once the
29
// migration is fully implemented and tested.
30
func MigrateGraphToSQL(ctx context.Context, kvBackend kvdb.Backend,
NEW
31
        sqlDB SQLQueries, chain chainhash.Hash) error {
×
32

×
33
        log.Infof("Starting migration of the graph store from KV to SQL")
×
34
        t0 := time.Now()
×
35

×
36
        // Check if there is a graph to migrate.
×
37
        graphExists, err := checkGraphExists(kvBackend)
×
38
        if err != nil {
×
39
                return fmt.Errorf("failed to check graph existence: %w", err)
×
40
        }
×
41
        if !graphExists {
×
42
                log.Infof("No graph found in KV store, skipping the migration")
×
43
                return nil
×
44
        }
×
45

46
        // 1) Migrate all the nodes.
47
        if err := migrateNodes(ctx, kvBackend, sqlDB); err != nil {
×
48
                return fmt.Errorf("could not migrate nodes: %w", err)
×
49
        }
×
50

51
        // 2) Migrate the source node.
52
        if err := migrateSourceNode(ctx, kvBackend, sqlDB); err != nil {
×
53
                return fmt.Errorf("could not migrate source node: %w", err)
×
54
        }
×
55

56
        // 3) Migrate all the channels and channel policies.
NEW
57
        err = migrateChannelsAndPolicies(ctx, kvBackend, sqlDB, chain)
×
NEW
58
        if err != nil {
×
NEW
59
                return fmt.Errorf("could not migrate channels and policies: %w",
×
NEW
60
                        err)
×
NEW
61
        }
×
62

63
        log.Infof("Finished migration of the graph store from KV to SQL in %v",
×
64
                time.Since(t0))
×
65

×
66
        return nil
×
67
}
68

69
// checkGraphExists checks if the graph exists in the KV backend.
70
func checkGraphExists(db kvdb.Backend) (bool, error) {
×
71
        // Check if there is even a graph to migrate.
×
72
        err := db.View(func(tx kvdb.RTx) error {
×
73
                // Check for the existence of the node bucket which is a top
×
74
                // level bucket that would have been created on the initial
×
75
                // creation of the graph store.
×
76
                nodes := tx.ReadBucket(nodeBucket)
×
77
                if nodes == nil {
×
78
                        return ErrGraphNotFound
×
79
                }
×
80

81
                return nil
×
82
        }, func() {})
×
83
        if errors.Is(err, ErrGraphNotFound) {
×
84
                return false, nil
×
85
        } else if err != nil {
×
86
                return false, fmt.Errorf("failed to check graph existence: %w",
×
87
                        err)
×
88
        }
×
89

90
        return true, nil
×
91
}
92

93
// migrateNodes migrates all nodes from the KV backend to the SQL database.
94
// This includes doing a sanity check after each migration to ensure that the
95
// migrated node matches the original node.
96
func migrateNodes(ctx context.Context, kvBackend kvdb.Backend,
97
        sqlDB SQLQueries) error {
×
98

×
99
        // Keep track of the number of nodes migrated and the number of
×
100
        // nodes skipped due to errors.
×
101
        var (
×
102
                count   uint64
×
103
                skipped uint64
×
104
        )
×
105

×
106
        // Loop through each node in the KV store and insert it into the SQL
×
107
        // database.
×
108
        err := forEachNode(kvBackend, func(_ kvdb.RTx,
×
109
                node *models.LightningNode) error {
×
110

×
111
                pub := node.PubKeyBytes
×
112

×
113
                // Sanity check to ensure that the node has valid extra opaque
×
114
                // data. If it does not, we'll skip it. We need to do this
×
115
                // because previously we would just persist any TLV bytes that
×
116
                // we received without validating them. Now, however, we
×
117
                // normalise the storage of extra opaque data, so we need to
×
118
                // ensure that the data is valid. We don't want to abort the
×
119
                // migration if we encounter a node with invalid extra opaque
×
120
                // data, so we'll just skip it and log a warning.
×
121
                _, err := marshalExtraOpaqueData(node.ExtraOpaqueData)
×
122
                if errors.Is(err, ErrParsingExtraTLVBytes) {
×
123
                        skipped++
×
124
                        log.Warnf("Skipping migration of node %x with invalid "+
×
125
                                "extra opaque data: %v", pub,
×
126
                                node.ExtraOpaqueData)
×
127

×
128
                        return nil
×
129
                } else if err != nil {
×
130
                        return fmt.Errorf("unable to marshal extra "+
×
131
                                "opaque data for node %x: %w", pub, err)
×
132
                }
×
133

134
                count++
×
135

×
136
                // Write the node to the SQL database.
×
137
                id, err := upsertNode(ctx, sqlDB, node)
×
138
                if err != nil {
×
139
                        return fmt.Errorf("could not persist node(%x): %w", pub,
×
140
                                err)
×
141
                }
×
142

143
                // Fetch it from the SQL store and compare it against the
144
                // original node object to ensure the migration was successful.
145
                dbNode, err := sqlDB.GetNodeByPubKey(
×
146
                        ctx, sqlc.GetNodeByPubKeyParams{
×
147
                                PubKey:  node.PubKeyBytes[:],
×
148
                                Version: int16(ProtocolV1),
×
149
                        },
×
150
                )
×
151
                if err != nil {
×
152
                        return fmt.Errorf("could not get node by pubkey (%x)"+
×
153
                                "after migration: %w", pub, err)
×
154
                }
×
155

156
                // Sanity check: ensure the migrated node ID matches the one we
157
                // just inserted.
158
                if dbNode.ID != id {
×
159
                        return fmt.Errorf("node ID mismatch for node (%x) "+
×
160
                                "after migration: expected %d, got %d",
×
161
                                pub, id, dbNode.ID)
×
162
                }
×
163

164
                migratedNode, err := buildNode(ctx, sqlDB, &dbNode)
×
165
                if err != nil {
×
166
                        return fmt.Errorf("could not build migrated node "+
×
167
                                "from dbNode(db id: %d, node pub: %x): %w",
×
168
                                dbNode.ID, pub, err)
×
169
                }
×
170

171
                // Make sure that the node addresses are sorted before
172
                // comparing them to ensure that the order of addresses does
173
                // not affect the comparison.
174
                sort.Slice(node.Addresses, func(i, j int) bool {
×
175
                        return node.Addresses[i].String() <
×
176
                                node.Addresses[j].String()
×
177
                })
×
178
                sort.Slice(migratedNode.Addresses, func(i, j int) bool {
×
179
                        return migratedNode.Addresses[i].String() <
×
180
                                migratedNode.Addresses[j].String()
×
181
                })
×
182

183
                return compare(node, migratedNode, fmt.Sprintf("node %x", pub))
×
184
        })
185
        if err != nil {
×
186
                return fmt.Errorf("could not migrate nodes: %w", err)
×
187
        }
×
188

189
        log.Infof("Migrated %d nodes from KV to SQL (skipped %d nodes due to "+
×
190
                "invalid TLV streams)", count, skipped)
×
191

×
192
        return nil
×
193
}
194

195
// migrateSourceNode migrates the source node from the KV backend to the
196
// SQL database.
197
func migrateSourceNode(ctx context.Context, kvdb kvdb.Backend,
198
        sqlDB SQLQueries) error {
×
199

×
200
        sourceNode, err := sourceNode(kvdb)
×
201
        if errors.Is(err, ErrSourceNodeNotSet) {
×
202
                // If the source node has not been set yet, we can skip this
×
203
                // migration step.
×
204
                return nil
×
205
        } else if err != nil {
×
206
                return fmt.Errorf("could not get source node from kv "+
×
207
                        "store: %w", err)
×
208
        }
×
209

210
        pub := sourceNode.PubKeyBytes
×
211

×
212
        // Get the DB ID of the source node by its public key. This node must
×
213
        // already exist in the SQL database, as it should have been migrated
×
214
        // in the previous node-migration step.
×
215
        id, err := sqlDB.GetNodeIDByPubKey(
×
216
                ctx, sqlc.GetNodeIDByPubKeyParams{
×
217
                        PubKey:  pub[:],
×
218
                        Version: int16(ProtocolV1),
×
219
                },
×
220
        )
×
221
        if err != nil {
×
222
                return fmt.Errorf("could not get source node ID: %w", err)
×
223
        }
×
224

225
        // Now we can add the source node to the SQL database.
226
        err = sqlDB.AddSourceNode(ctx, id)
×
227
        if err != nil {
×
228
                return fmt.Errorf("could not add source node to SQL store: %w",
×
229
                        err)
×
230
        }
×
231

232
        // Verify that the source node was added correctly by fetching it back
233
        // from the SQL database and checking that the expected DB ID and
234
        // pub key are returned. We don't need to do a whole node comparison
235
        // here, as this was already done in the previous migration step.
236
        srcNodes, err := sqlDB.GetSourceNodesByVersion(ctx, int16(ProtocolV1))
×
237
        if err != nil {
×
238
                return fmt.Errorf("could not get source nodes from SQL "+
×
239
                        "store: %w", err)
×
240
        }
×
241

242
        // The SQL store has support for multiple source nodes (for future
243
        // protocol versions) but this migration is purely aimed at the V1
244
        // store, and so we expect exactly one source node to be present.
245
        if len(srcNodes) != 1 {
×
246
                return fmt.Errorf("expected exactly one source node, "+
×
247
                        "got %d", len(srcNodes))
×
248
        }
×
249

250
        // Check that the source node ID and pub key match the original
251
        // source node.
252
        if srcNodes[0].NodeID != id {
×
253
                return fmt.Errorf("source node ID mismatch after migration: "+
×
254
                        "expected %d, got %d", id, srcNodes[0].NodeID)
×
255
        }
×
256
        err = compare(pub[:], srcNodes[0].PubKey, "source node")
×
257
        if err != nil {
×
258
                return fmt.Errorf("source node pubkey mismatch after "+
×
259
                        "migration: %w", err)
×
260
        }
×
261

262
        log.Infof("Migrated source node with pubkey %x to SQL", pub[:])
×
263

×
264
        return nil
×
265
}
266

267
// migrateChannelsAndPolicies migrates all channels and their policies
268
// from the KV backend to the SQL database.
269
func migrateChannelsAndPolicies(ctx context.Context, kvBackend kvdb.Backend,
NEW
270
        sqlDB SQLQueries, chain chainhash.Hash) error {
×
NEW
271

×
NEW
272
        var (
×
NEW
273
                channelCount       uint64
×
NEW
274
                skippedChanCount   uint64
×
NEW
275
                policyCount        uint64
×
NEW
276
                skippedPolicyCount uint64
×
NEW
277
        )
×
NEW
278
        migChanPolicy := func(policy *models.ChannelEdgePolicy) error {
×
NEW
279
                // If the policy is nil, we can skip it.
×
NEW
280
                if policy == nil {
×
NEW
281
                        return nil
×
NEW
282
                }
×
283

284
                // Sanity check to ensure that the policy has valid extra opaque
285
                // data. If it does not, we'll skip it. We need to do this
286
                // because previously we would just persist any TLV bytes that
287
                // we received without validating them. Now, however, we
288
                // normalise the storage of extra opaque data, so we need to
289
                // ensure that the data is valid. We don't want to abort the
290
                // migration if we encounter a policy with invalid extra opaque
291
                // data, so we'll just skip it and log a warning.
NEW
292
                _, err := marshalExtraOpaqueData(policy.ExtraOpaqueData)
×
NEW
293
                if errors.Is(err, ErrParsingExtraTLVBytes) {
×
NEW
294
                        skippedPolicyCount++
×
NEW
295
                        log.Warnf("Skipping policy for channel %d with "+
×
NEW
296
                                "invalid extra opaque data: %v",
×
NEW
297
                                policy.ChannelID, policy.ExtraOpaqueData)
×
NEW
298

×
NEW
299
                        return nil
×
NEW
300
                } else if err != nil {
×
NEW
301
                        return fmt.Errorf("unable to marshal extra opaque "+
×
NEW
302
                                "data: %w. %+v", err, policy.ExtraOpaqueData)
×
NEW
303
                }
×
304

NEW
305
                policyCount++
×
NEW
306

×
NEW
307
                _, _, _, err = updateChanEdgePolicy(ctx, sqlDB, policy)
×
NEW
308
                if err != nil {
×
NEW
309
                        return fmt.Errorf("could not migrate channel "+
×
NEW
310
                                "policy %d: %w", policy.ChannelID, err)
×
NEW
311
                }
×
312

NEW
313
                return nil
×
314
        }
315

316
        // Iterate over each channel in the KV store and migrate it and its
317
        // policies to the SQL database.
NEW
318
        err := forEachChannel(kvBackend, func(channel *models.ChannelEdgeInfo,
×
NEW
319
                policy1 *models.ChannelEdgePolicy,
×
NEW
320
                policy2 *models.ChannelEdgePolicy) error {
×
NEW
321

×
NEW
322
                scid := channel.ChannelID
×
NEW
323

×
NEW
324
                // Here, we do a sanity check to ensure that the chain hash of
×
NEW
325
                // the channel returned by the KV store matches the expected
×
NEW
326
                // chain hash. This is important since in the SQL store, we will
×
NEW
327
                // no longer explicitly store the chain hash in the channel
×
NEW
328
                // info, but rather rely on the chain hash LND is running with.
×
NEW
329
                // So this is our way of ensuring that LND is running on the
×
NEW
330
                // correct network at migration time.
×
NEW
331
                if channel.ChainHash != chain {
×
NEW
332
                        return fmt.Errorf("channel %d has chain hash %s, "+
×
NEW
333
                                "expected %s", scid, channel.ChainHash, chain)
×
NEW
334
                }
×
335

336
                // Sanity check to ensure that the channel has valid extra
337
                // opaque data. If it does not, we'll skip it. We need to do
338
                // this because previously we would just persist any TLV bytes
339
                // that we received without validating them. Now, however, we
340
                // normalise the storage of extra opaque data, so we need to
341
                // ensure that the data is valid. We don't want to abort the
342
                // migration if we encounter a channel with invalid extra opaque
343
                // data, so we'll just skip it and log a warning.
NEW
344
                _, err := marshalExtraOpaqueData(channel.ExtraOpaqueData)
×
NEW
345
                if errors.Is(err, ErrParsingExtraTLVBytes) {
×
NEW
346
                        log.Warnf("Skipping channel %d with invalid "+
×
NEW
347
                                "extra opaque data: %v", scid,
×
NEW
348
                                channel.ExtraOpaqueData)
×
NEW
349

×
NEW
350
                        skippedChanCount++
×
NEW
351

×
NEW
352
                        // If we skip a channel, we also skip its policies.
×
NEW
353
                        if policy1 != nil {
×
NEW
354
                                skippedPolicyCount++
×
NEW
355
                        }
×
NEW
356
                        if policy2 != nil {
×
NEW
357
                                skippedPolicyCount++
×
NEW
358
                        }
×
359

NEW
360
                        return nil
×
NEW
361
                } else if err != nil {
×
NEW
362
                        return fmt.Errorf("unable to marshal extra opaque "+
×
NEW
363
                                "data for channel %d: %w %v", scid, err,
×
NEW
364
                                channel.ExtraOpaqueData)
×
NEW
365
                }
×
366

NEW
367
                channelCount++
×
NEW
368
                err = migrateSingleChannel(
×
NEW
369
                        ctx, sqlDB, channel, policy1, policy2, migChanPolicy,
×
NEW
370
                )
×
NEW
371
                if err != nil {
×
NEW
372
                        return fmt.Errorf("could not migrate channel %d: %w",
×
NEW
373
                                scid, err)
×
NEW
374
                }
×
375

NEW
376
                return nil
×
377
        })
NEW
378
        if err != nil {
×
NEW
379
                return fmt.Errorf("could not migrate channels and policies: %w",
×
NEW
380
                        err)
×
NEW
381
        }
×
382

NEW
383
        log.Infof("Migrated %d channels and %d policies from KV to SQL "+
×
NEW
384
                "(skipped %d channels and %d policies due to invalid TLV "+
×
NEW
385
                "streams)", channelCount, policyCount, skippedChanCount,
×
NEW
386
                skippedPolicyCount)
×
NEW
387

×
NEW
388
        return nil
×
389
}
390

391
func migrateSingleChannel(ctx context.Context, sqlDB SQLQueries,
392
        channel *models.ChannelEdgeInfo,
393
        policy1, policy2 *models.ChannelEdgePolicy,
NEW
394
        migChanPolicy func(*models.ChannelEdgePolicy) error) error {
×
NEW
395

×
NEW
396
        scid := channel.ChannelID
×
NEW
397

×
NEW
398
        // First, migrate the channel info along with its policies.
×
NEW
399
        dbChanInfo, err := insertChannel(ctx, sqlDB, channel)
×
NEW
400
        if err != nil {
×
NEW
401
                return fmt.Errorf("could not insert record for channel %d "+
×
NEW
402
                        "in SQL store: %w", scid, err)
×
NEW
403
        }
×
404

405
        // Now, migrate the two channel policies.
NEW
406
        err = migChanPolicy(policy1)
×
NEW
407
        if err != nil {
×
NEW
408
                return fmt.Errorf("could not migrate policy1(%d): %w", scid,
×
NEW
409
                        err)
×
NEW
410
        }
×
NEW
411
        err = migChanPolicy(policy2)
×
NEW
412
        if err != nil {
×
NEW
413
                return fmt.Errorf("could not migrate policy2(%d): %w", scid,
×
NEW
414
                        err)
×
NEW
415
        }
×
416

417
        // Now, fetch the channel and its policies from the SQL DB.
NEW
418
        row, err := sqlDB.GetChannelBySCIDWithPolicies(
×
NEW
419
                ctx, sqlc.GetChannelBySCIDWithPoliciesParams{
×
NEW
420
                        Scid:    channelIDToBytes(scid),
×
NEW
421
                        Version: int16(ProtocolV1),
×
NEW
422
                },
×
NEW
423
        )
×
NEW
424
        if err != nil {
×
NEW
425
                return fmt.Errorf("could not get channel by SCID(%d): %w", scid,
×
NEW
426
                        err)
×
NEW
427
        }
×
428

429
        // Assert that the DB IDs for the channel and nodes are as expected
430
        // given the inserted channel info.
NEW
431
        err = compare(dbChanInfo.channelID, row.Channel.ID, "channel DB ID")
×
NEW
432
        if err != nil {
×
NEW
433
                return err
×
NEW
434
        }
×
NEW
435
        err = compare(dbChanInfo.node1ID, row.Node.ID, "node1 DB ID")
×
NEW
436
        if err != nil {
×
NEW
437
                return err
×
NEW
438
        }
×
NEW
439
        err = compare(dbChanInfo.node2ID, row.Node_2.ID, "node2 DB ID")
×
NEW
440
        if err != nil {
×
NEW
441
                return err
×
NEW
442
        }
×
443

NEW
444
        migChan, migPol1, migPol2, err := getAndBuildChanAndPolicies(
×
NEW
445
                ctx, sqlDB, row, channel.ChainHash,
×
NEW
446
        )
×
NEW
447
        if err != nil {
×
NEW
448
                return fmt.Errorf("could not build migrated channel and "+
×
NEW
449
                        "policies: %w", err)
×
NEW
450
        }
×
451

452
        // Finally, compare the original channel info and
453
        // policies with the migrated ones to ensure they match.
NEW
454
        if len(channel.ExtraOpaqueData) == 0 {
×
NEW
455
                channel.ExtraOpaqueData = nil
×
NEW
456
        }
×
NEW
457
        if len(migChan.ExtraOpaqueData) == 0 {
×
NEW
458
                migChan.ExtraOpaqueData = nil
×
NEW
459
        }
×
460

NEW
461
        err = compare(channel, migChan, fmt.Sprintf("channel %d", scid))
×
NEW
462
        if err != nil {
×
NEW
463
                return err
×
NEW
464
        }
×
465

NEW
466
        checkPolicy := func(expPolicy,
×
NEW
467
                migPolicy *models.ChannelEdgePolicy) error {
×
NEW
468

×
NEW
469
                switch {
×
470
                // Both policies are nil, nothing to compare.
NEW
471
                case expPolicy == nil && migPolicy == nil:
×
NEW
472
                        return nil
×
473

474
                // One of the policies is nil, but the other is not.
NEW
475
                case expPolicy == nil || migPolicy == nil:
×
NEW
476
                        return fmt.Errorf("expected both policies to be "+
×
NEW
477
                                "non-nil. Got expPolicy: %v, "+
×
NEW
478
                                "migPolicy: %v", expPolicy, migPolicy)
×
479

480
                // Both policies are non-nil, we can compare them.
NEW
481
                default:
×
482
                }
483

NEW
484
                if len(expPolicy.ExtraOpaqueData) == 0 {
×
NEW
485
                        expPolicy.ExtraOpaqueData = nil
×
NEW
486
                }
×
NEW
487
                if len(migPolicy.ExtraOpaqueData) == 0 {
×
NEW
488
                        migPolicy.ExtraOpaqueData = nil
×
NEW
489
                }
×
490

NEW
491
                return compare(*expPolicy, *migPolicy, "channel policy")
×
492
        }
493

NEW
494
        err = checkPolicy(policy1, migPol1)
×
NEW
495
        if err != nil {
×
NEW
496
                return fmt.Errorf("policy1 mismatch for channel %d: %w", scid,
×
NEW
497
                        err)
×
NEW
498
        }
×
499

NEW
500
        err = checkPolicy(policy2, migPol2)
×
NEW
501
        if err != nil {
×
NEW
502
                return fmt.Errorf("policy2 mismatch for channel %d: %w", scid,
×
NEW
503
                        err)
×
NEW
504
        }
×
505

NEW
506
        return nil
×
507
}
508

509
func getAndBuildChanAndPolicies(ctx context.Context, db SQLQueries,
510
        row sqlc.GetChannelBySCIDWithPoliciesRow,
511
        chain chainhash.Hash) (*models.ChannelEdgeInfo,
NEW
512
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy, error) {
×
NEW
513

×
NEW
514
        node1, node2, err := buildNodeVertices(
×
NEW
515
                row.Node.PubKey, row.Node_2.PubKey,
×
NEW
516
        )
×
NEW
517
        if err != nil {
×
NEW
518
                return nil, nil, nil, err
×
NEW
519
        }
×
520

NEW
521
        edge, err := getAndBuildEdgeInfo(
×
NEW
522
                ctx, db, chain, row.Channel.ID, row.Channel, node1, node2,
×
NEW
523
        )
×
NEW
524
        if err != nil {
×
NEW
525
                return nil, nil, nil, fmt.Errorf("unable to build channel "+
×
NEW
526
                        "info: %w", err)
×
NEW
527
        }
×
528

NEW
529
        dbPol1, dbPol2, err := extractChannelPolicies(row)
×
NEW
530
        if err != nil {
×
NEW
531
                return nil, nil, nil, fmt.Errorf("unable to extract channel "+
×
NEW
532
                        "policies: %w", err)
×
NEW
533
        }
×
534

NEW
535
        policy1, policy2, err := getAndBuildChanPolicies(
×
NEW
536
                ctx, db, dbPol1, dbPol2, edge.ChannelID, node1, node2,
×
NEW
537
        )
×
NEW
538
        if err != nil {
×
NEW
539
                return nil, nil, nil, fmt.Errorf("unable to build channel "+
×
NEW
540
                        "policies: %w", err)
×
NEW
541
        }
×
542

NEW
543
        return edge, policy1, policy2, nil
×
544
}
545

546
// compare checks if the original and migrated objects are equal. If they
547
// are not, it returns an error with a unified diff of the two objects.
548
func compare(original, migrated any, identifier string) error {
×
549
        if reflect.DeepEqual(original, migrated) {
×
550
                return nil
×
551
        }
×
552

553
        diff := difflib.UnifiedDiff{
×
554
                A:        difflib.SplitLines(spew.Sdump(original)),
×
555
                B:        difflib.SplitLines(spew.Sdump(migrated)),
×
556
                FromFile: "Expected",
×
557
                FromDate: "",
×
558
                ToFile:   "Actual",
×
559
                ToDate:   "",
×
560
                Context:  3,
×
561
        }
×
562
        diffText, _ := difflib.GetUnifiedDiffString(diff)
×
563

×
564
        return fmt.Errorf("%w: %s.\n%v", ErrMigrationMismatch, identifier,
×
565
                diffText)
×
566
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc