• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16164112462

09 Jul 2025 08:19AM UTC coverage: 57.644% (-0.05%) from 57.695%
16164112462

Pull #10050

github

web-flow
Merge 640caeff3 into fb68f36ac
Pull Request #10050: [graph mig 2]: graph/db: migrate graph channels and policies from kvdb to SQL

10 of 223 new or added lines in 2 files covered. (4.48%)

20 existing lines in 8 files now uncovered.

98551 of 170965 relevant lines covered (57.64%)

1.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/graph/db/sql_migration.go
1
package graphdb
2

3
import (
4
        "cmp"
5
        "context"
6
        "errors"
7
        "fmt"
8
        "net"
9
        "slices"
10
        "time"
11

12
        "github.com/btcsuite/btcd/chaincfg/chainhash"
13
        "github.com/lightningnetwork/lnd/graph/db/models"
14
        "github.com/lightningnetwork/lnd/kvdb"
15
        "github.com/lightningnetwork/lnd/sqldb"
16
        "github.com/lightningnetwork/lnd/sqldb/sqlc"
17
)
18

19
// MigrateGraphToSQL migrates the graph store from a KV backend to a SQL
20
// backend.
21
//
22
// NOTE: this is currently not called from any code path. It is called via tests
23
// only for now and will be called from the main lnd binary once the
24
// migration is fully implemented and tested.
25
func MigrateGraphToSQL(ctx context.Context, kvBackend kvdb.Backend,
NEW
26
        sqlDB SQLQueries, chain chainhash.Hash) error {
×
27

×
28
        log.Infof("Starting migration of the graph store from KV to SQL")
×
29
        t0 := time.Now()
×
30

×
31
        // Check if there is a graph to migrate.
×
32
        graphExists, err := checkGraphExists(kvBackend)
×
33
        if err != nil {
×
34
                return fmt.Errorf("failed to check graph existence: %w", err)
×
35
        }
×
36
        if !graphExists {
×
37
                log.Infof("No graph found in KV store, skipping the migration")
×
38
                return nil
×
39
        }
×
40

41
        // 1) Migrate all the nodes.
42
        if err := migrateNodes(ctx, kvBackend, sqlDB); err != nil {
×
43
                return fmt.Errorf("could not migrate nodes: %w", err)
×
44
        }
×
45

46
        // 2) Migrate the source node.
47
        if err := migrateSourceNode(ctx, kvBackend, sqlDB); err != nil {
×
48
                return fmt.Errorf("could not migrate source node: %w", err)
×
49
        }
×
50

51
        // 3) Migrate all the channels and channel policies.
NEW
52
        err = migrateChannelsAndPolicies(ctx, kvBackend, sqlDB, chain)
×
NEW
53
        if err != nil {
×
NEW
54
                return fmt.Errorf("could not migrate channels and policies: %w",
×
NEW
55
                        err)
×
NEW
56
        }
×
57

58
        log.Infof("Finished migration of the graph store from KV to SQL in %v",
×
59
                time.Since(t0))
×
60

×
61
        return nil
×
62
}
63

64
// checkGraphExists checks if the graph exists in the KV backend.
65
func checkGraphExists(db kvdb.Backend) (bool, error) {
×
66
        // Check if there is even a graph to migrate.
×
67
        err := db.View(func(tx kvdb.RTx) error {
×
68
                // Check for the existence of the node bucket which is a top
×
69
                // level bucket that would have been created on the initial
×
70
                // creation of the graph store.
×
71
                nodes := tx.ReadBucket(nodeBucket)
×
72
                if nodes == nil {
×
73
                        return ErrGraphNotFound
×
74
                }
×
75

76
                return nil
×
77
        }, func() {})
×
78
        if errors.Is(err, ErrGraphNotFound) {
×
79
                return false, nil
×
80
        } else if err != nil {
×
81
                return false, err
×
82
        }
×
83

84
        return true, nil
×
85
}
86

87
// migrateNodes migrates all nodes from the KV backend to the SQL database.
88
// This includes doing a sanity check after each migration to ensure that the
89
// migrated node matches the original node.
90
func migrateNodes(ctx context.Context, kvBackend kvdb.Backend,
91
        sqlDB SQLQueries) error {
×
92

×
93
        // Keep track of the number of nodes migrated and the number of
×
94
        // nodes skipped due to errors.
×
95
        var (
×
96
                count   uint64
×
97
                skipped uint64
×
98
        )
×
99

×
100
        // Loop through each node in the KV store and insert it into the SQL
×
101
        // database.
×
102
        err := forEachNode(kvBackend, func(_ kvdb.RTx,
×
103
                node *models.LightningNode) error {
×
104

×
105
                pub := node.PubKeyBytes
×
106

×
107
                // Sanity check to ensure that the node has valid extra opaque
×
108
                // data. If it does not, we'll skip it. We need to do this
×
109
                // because previously we would just persist any TLV bytes that
×
110
                // we received without validating them. Now, however, we
×
111
                // normalise the storage of extra opaque data, so we need to
×
112
                // ensure that the data is valid. We don't want to abort the
×
113
                // migration if we encounter a node with invalid extra opaque
×
114
                // data, so we'll just skip it and log a warning.
×
115
                _, err := marshalExtraOpaqueData(node.ExtraOpaqueData)
×
116
                if errors.Is(err, ErrParsingExtraTLVBytes) {
×
117
                        skipped++
×
118
                        log.Warnf("Skipping migration of node %x with invalid "+
×
119
                                "extra opaque data: %v", pub,
×
120
                                node.ExtraOpaqueData)
×
121

×
122
                        return nil
×
123
                } else if err != nil {
×
124
                        return fmt.Errorf("unable to marshal extra "+
×
125
                                "opaque data for node %x: %w", pub, err)
×
126
                }
×
127

128
                count++
×
129

×
130
                // TODO(elle): At this point, we should check the loaded node
×
131
                // to see if we should extract any DNS addresses from its
×
132
                // opaque type addresses. This is expected to be done in:
×
133
                // https://github.com/lightningnetwork/lnd/pull/9455.
×
134
                // This TODO is being tracked in
×
135
                //  https://github.com/lightningnetwork/lnd/issues/9795 as this
×
136
                // must be addressed before making this code path active in
×
137
                // production.
×
138

×
139
                // Write the node to the SQL database.
×
140
                id, err := upsertNode(ctx, sqlDB, node)
×
141
                if err != nil {
×
142
                        return fmt.Errorf("could not persist node(%x): %w", pub,
×
143
                                err)
×
144
                }
×
145

146
                // Fetch it from the SQL store and compare it against the
147
                // original node object to ensure the migration was successful.
148
                dbNode, err := sqlDB.GetNodeByPubKey(
×
149
                        ctx, sqlc.GetNodeByPubKeyParams{
×
150
                                PubKey:  node.PubKeyBytes[:],
×
151
                                Version: int16(ProtocolV1),
×
152
                        },
×
153
                )
×
154
                if err != nil {
×
155
                        return fmt.Errorf("could not get node by pubkey (%x)"+
×
156
                                "after migration: %w", pub, err)
×
157
                }
×
158

159
                // Sanity check: ensure the migrated node ID matches the one we
160
                // just inserted.
161
                if dbNode.ID != id {
×
162
                        return fmt.Errorf("node ID mismatch for node (%x) "+
×
163
                                "after migration: expected %d, got %d",
×
164
                                pub, id, dbNode.ID)
×
165
                }
×
166

167
                migratedNode, err := buildNode(ctx, sqlDB, &dbNode)
×
168
                if err != nil {
×
169
                        return fmt.Errorf("could not build migrated node "+
×
170
                                "from dbNode(db id: %d, node pub: %x): %w",
×
171
                                dbNode.ID, pub, err)
×
172
                }
×
173

174
                // Make sure that the node addresses are sorted before
175
                // comparing them to ensure that the order of addresses does
176
                // not affect the comparison.
177
                slices.SortFunc(node.Addresses, func(i, j net.Addr) int {
×
178
                        return cmp.Compare(i.String(), j.String())
×
179
                })
×
180
                slices.SortFunc(
×
181
                        migratedNode.Addresses, func(i, j net.Addr) int {
×
182
                                return cmp.Compare(i.String(), j.String())
×
183
                        },
×
184
                )
185

186
                return sqldb.CompareRecords(
×
187
                        node, migratedNode, fmt.Sprintf("node %x", pub),
×
188
                )
×
189
        })
190
        if err != nil {
×
191
                return fmt.Errorf("could not migrate nodes: %w", err)
×
192
        }
×
193

194
        log.Infof("Migrated %d nodes from KV to SQL (skipped %d nodes due to "+
×
195
                "invalid TLV streams)", count, skipped)
×
196

×
197
        return nil
×
198
}
199

200
// migrateSourceNode migrates the source node from the KV backend to the
201
// SQL database.
202
func migrateSourceNode(ctx context.Context, kvdb kvdb.Backend,
203
        sqlDB SQLQueries) error {
×
204

×
205
        sourceNode, err := sourceNode(kvdb)
×
206
        if errors.Is(err, ErrSourceNodeNotSet) {
×
207
                // If the source node has not been set yet, we can skip this
×
208
                // migration step.
×
209
                return nil
×
210
        } else if err != nil {
×
211
                return fmt.Errorf("could not get source node from kv "+
×
212
                        "store: %w", err)
×
213
        }
×
214

215
        pub := sourceNode.PubKeyBytes
×
216

×
217
        // Get the DB ID of the source node by its public key. This node must
×
218
        // already exist in the SQL database, as it should have been migrated
×
219
        // in the previous node-migration step.
×
220
        id, err := sqlDB.GetNodeIDByPubKey(
×
221
                ctx, sqlc.GetNodeIDByPubKeyParams{
×
222
                        PubKey:  pub[:],
×
223
                        Version: int16(ProtocolV1),
×
224
                },
×
225
        )
×
226
        if err != nil {
×
227
                return fmt.Errorf("could not get source node ID: %w", err)
×
228
        }
×
229

230
        // Now we can add the source node to the SQL database.
231
        err = sqlDB.AddSourceNode(ctx, id)
×
232
        if err != nil {
×
233
                return fmt.Errorf("could not add source node to SQL store: %w",
×
234
                        err)
×
235
        }
×
236

237
        // Verify that the source node was added correctly by fetching it back
238
        // from the SQL database and checking that the expected DB ID and
239
        // pub key are returned. We don't need to do a whole node comparison
240
        // here, as this was already done in the previous migration step.
241
        srcNodes, err := sqlDB.GetSourceNodesByVersion(ctx, int16(ProtocolV1))
×
242
        if err != nil {
×
243
                return fmt.Errorf("could not get source nodes from SQL "+
×
244
                        "store: %w", err)
×
245
        }
×
246

247
        // The SQL store has support for multiple source nodes (for future
248
        // protocol versions) but this migration is purely aimed at the V1
249
        // store, and so we expect exactly one source node to be present.
250
        if len(srcNodes) != 1 {
×
251
                return fmt.Errorf("expected exactly one source node, "+
×
252
                        "got %d", len(srcNodes))
×
253
        }
×
254

255
        // Check that the source node ID and pub key match the original
256
        // source node.
257
        if srcNodes[0].NodeID != id {
×
258
                return fmt.Errorf("source node ID mismatch after migration: "+
×
259
                        "expected %d, got %d", id, srcNodes[0].NodeID)
×
260
        }
×
261
        err = sqldb.CompareRecords(pub[:], srcNodes[0].PubKey, "source node")
×
262
        if err != nil {
×
263
                return fmt.Errorf("source node pubkey mismatch after "+
×
264
                        "migration: %w", err)
×
265
        }
×
266

267
        log.Infof("Migrated source node with pubkey %x to SQL", pub[:])
×
268

×
269
        return nil
×
270
}
271

272
// migrateChannelsAndPolicies migrates all channels and their policies
273
// from the KV backend to the SQL database.
274
func migrateChannelsAndPolicies(ctx context.Context, kvBackend kvdb.Backend,
NEW
275
        sqlDB SQLQueries, chain chainhash.Hash) error {
×
NEW
276

×
NEW
277
        var (
×
NEW
278
                channelCount       uint64
×
NEW
279
                skippedChanCount   uint64
×
NEW
280
                policyCount        uint64
×
NEW
281
                skippedPolicyCount uint64
×
NEW
282
        )
×
NEW
283
        migChanPolicy := func(policy *models.ChannelEdgePolicy) error {
×
NEW
284
                // If the policy is nil, we can skip it.
×
NEW
285
                if policy == nil {
×
NEW
286
                        return nil
×
NEW
287
                }
×
288

289
                // Unlike the special case of invalid TLV bytes for node and
290
                // channel announcements, we don't need to handle the case for
291
                // channel policies here because it is already handled in the
292
                // `forEachChannel` function. If the policy has invalid TLV
293
                // bytes, then `nil` will be passed to this function.
294

NEW
295
                policyCount++
×
NEW
296

×
NEW
297
                _, _, _, err := updateChanEdgePolicy(ctx, sqlDB, policy)
×
NEW
298
                if err != nil {
×
NEW
299
                        return fmt.Errorf("could not migrate channel "+
×
NEW
300
                                "policy %d: %w", policy.ChannelID, err)
×
NEW
301
                }
×
302

NEW
303
                return nil
×
304
        }
305

306
        // Iterate over each channel in the KV store and migrate it and its
307
        // policies to the SQL database.
NEW
308
        err := forEachChannel(kvBackend, func(channel *models.ChannelEdgeInfo,
×
NEW
309
                policy1 *models.ChannelEdgePolicy,
×
NEW
310
                policy2 *models.ChannelEdgePolicy) error {
×
NEW
311

×
NEW
312
                scid := channel.ChannelID
×
NEW
313

×
NEW
314
                // Here, we do a sanity check to ensure that the chain hash of
×
NEW
315
                // the channel returned by the KV store matches the expected
×
NEW
316
                // chain hash. This is important since in the SQL store, we will
×
NEW
317
                // no longer explicitly store the chain hash in the channel
×
NEW
318
                // info, but rather rely on the chain hash LND is running with.
×
NEW
319
                // So this is our way of ensuring that LND is running on the
×
NEW
320
                // correct network at migration time.
×
NEW
321
                if channel.ChainHash != chain {
×
NEW
322
                        return fmt.Errorf("channel %d has chain hash %s, "+
×
NEW
323
                                "expected %s", scid, channel.ChainHash, chain)
×
NEW
324
                }
×
325

326
                // Sanity check to ensure that the channel has valid extra
327
                // opaque data. If it does not, we'll skip it. We need to do
328
                // this because previously we would just persist any TLV bytes
329
                // that we received without validating them. Now, however, we
330
                // normalise the storage of extra opaque data, so we need to
331
                // ensure that the data is valid. We don't want to abort the
332
                // migration if we encounter a channel with invalid extra opaque
333
                // data, so we'll just skip it and log a warning.
NEW
334
                _, err := marshalExtraOpaqueData(channel.ExtraOpaqueData)
×
NEW
335
                if errors.Is(err, ErrParsingExtraTLVBytes) {
×
NEW
336
                        log.Warnf("Skipping channel %d with invalid "+
×
NEW
337
                                "extra opaque data: %v", scid,
×
NEW
338
                                channel.ExtraOpaqueData)
×
NEW
339

×
NEW
340
                        skippedChanCount++
×
NEW
341

×
NEW
342
                        // If we skip a channel, we also skip its policies.
×
NEW
343
                        if policy1 != nil {
×
NEW
344
                                skippedPolicyCount++
×
NEW
345
                        }
×
NEW
346
                        if policy2 != nil {
×
NEW
347
                                skippedPolicyCount++
×
NEW
348
                        }
×
349

NEW
350
                        return nil
×
NEW
351
                } else if err != nil {
×
NEW
352
                        return fmt.Errorf("unable to marshal extra opaque "+
×
NEW
353
                                "data for channel %d (%v): %w", scid,
×
NEW
354
                                channel.ExtraOpaqueData, err)
×
NEW
355
                }
×
356

NEW
357
                channelCount++
×
NEW
358
                err = migrateSingleChannel(
×
NEW
359
                        ctx, sqlDB, channel, policy1, policy2, migChanPolicy,
×
NEW
360
                )
×
NEW
361
                if err != nil {
×
NEW
362
                        return fmt.Errorf("could not migrate channel %d: %w",
×
NEW
363
                                scid, err)
×
NEW
364
                }
×
365

NEW
366
                return nil
×
367
        })
NEW
368
        if err != nil {
×
NEW
369
                return fmt.Errorf("could not migrate channels and policies: %w",
×
NEW
370
                        err)
×
NEW
371
        }
×
372

NEW
373
        log.Infof("Migrated %d channels and %d policies from KV to SQL "+
×
NEW
374
                "(skipped %d channels and %d policies due to invalid TLV "+
×
NEW
375
                "streams)", channelCount, policyCount, skippedChanCount,
×
NEW
376
                skippedPolicyCount)
×
NEW
377

×
NEW
378
        return nil
×
379
}
380

381
func migrateSingleChannel(ctx context.Context, sqlDB SQLQueries,
382
        channel *models.ChannelEdgeInfo,
383
        policy1, policy2 *models.ChannelEdgePolicy,
NEW
384
        migChanPolicy func(*models.ChannelEdgePolicy) error) error {
×
NEW
385

×
NEW
386
        scid := channel.ChannelID
×
NEW
387

×
NEW
388
        // First, migrate the channel info along with its policies.
×
NEW
389
        dbChanInfo, err := insertChannel(ctx, sqlDB, channel)
×
NEW
390
        if err != nil {
×
NEW
391
                return fmt.Errorf("could not insert record for channel %d "+
×
NEW
392
                        "in SQL store: %w", scid, err)
×
NEW
393
        }
×
394

395
        // Now, migrate the two channel policies.
NEW
396
        err = migChanPolicy(policy1)
×
NEW
397
        if err != nil {
×
NEW
398
                return fmt.Errorf("could not migrate policy1(%d): %w", scid,
×
NEW
399
                        err)
×
NEW
400
        }
×
NEW
401
        err = migChanPolicy(policy2)
×
NEW
402
        if err != nil {
×
NEW
403
                return fmt.Errorf("could not migrate policy2(%d): %w", scid,
×
NEW
404
                        err)
×
NEW
405
        }
×
406

407
        // Now, fetch the channel and its policies from the SQL DB.
NEW
408
        row, err := sqlDB.GetChannelBySCIDWithPolicies(
×
NEW
409
                ctx, sqlc.GetChannelBySCIDWithPoliciesParams{
×
NEW
410
                        Scid:    channelIDToBytes(scid),
×
NEW
411
                        Version: int16(ProtocolV1),
×
NEW
412
                },
×
NEW
413
        )
×
NEW
414
        if err != nil {
×
NEW
415
                return fmt.Errorf("could not get channel by SCID(%d): %w", scid,
×
NEW
416
                        err)
×
NEW
417
        }
×
418

419
        // Assert that the DB IDs for the channel and nodes are as expected
420
        // given the inserted channel info.
NEW
421
        err = sqldb.CompareRecords(
×
NEW
422
                dbChanInfo.channelID, row.Channel.ID, "channel DB ID",
×
NEW
423
        )
×
NEW
424
        if err != nil {
×
NEW
425
                return err
×
NEW
426
        }
×
NEW
427
        err = sqldb.CompareRecords(
×
NEW
428
                dbChanInfo.node1ID, row.Node.ID, "node1 DB ID",
×
NEW
429
        )
×
NEW
430
        if err != nil {
×
NEW
431
                return err
×
NEW
432
        }
×
NEW
433
        err = sqldb.CompareRecords(
×
NEW
434
                dbChanInfo.node2ID, row.Node_2.ID, "node2 DB ID",
×
NEW
435
        )
×
NEW
436
        if err != nil {
×
NEW
437
                return err
×
NEW
438
        }
×
439

NEW
440
        migChan, migPol1, migPol2, err := getAndBuildChanAndPolicies(
×
NEW
441
                ctx, sqlDB, row, channel.ChainHash,
×
NEW
442
        )
×
NEW
443
        if err != nil {
×
NEW
444
                return fmt.Errorf("could not build migrated channel and "+
×
NEW
445
                        "policies: %w", err)
×
NEW
446
        }
×
447

448
        // Finally, compare the original channel info and
449
        // policies with the migrated ones to ensure they match.
NEW
450
        if len(channel.ExtraOpaqueData) == 0 {
×
NEW
451
                channel.ExtraOpaqueData = nil
×
NEW
452
        }
×
NEW
453
        if len(migChan.ExtraOpaqueData) == 0 {
×
NEW
454
                migChan.ExtraOpaqueData = nil
×
NEW
455
        }
×
456

NEW
457
        err = sqldb.CompareRecords(
×
NEW
458
                channel, migChan, fmt.Sprintf("channel %d", scid),
×
NEW
459
        )
×
NEW
460
        if err != nil {
×
NEW
461
                return err
×
NEW
462
        }
×
463

NEW
464
        checkPolicy := func(expPolicy,
×
NEW
465
                migPolicy *models.ChannelEdgePolicy) error {
×
NEW
466

×
NEW
467
                switch {
×
468
                // Both policies are nil, nothing to compare.
NEW
469
                case expPolicy == nil && migPolicy == nil:
×
NEW
470
                        return nil
×
471

472
                // One of the policies is nil, but the other is not.
NEW
473
                case expPolicy == nil || migPolicy == nil:
×
NEW
474
                        return fmt.Errorf("expected both policies to be "+
×
NEW
475
                                "non-nil. Got expPolicy: %v, "+
×
NEW
476
                                "migPolicy: %v", expPolicy, migPolicy)
×
477

478
                // Both policies are non-nil, we can compare them.
NEW
479
                default:
×
480
                }
481

NEW
482
                if len(expPolicy.ExtraOpaqueData) == 0 {
×
NEW
483
                        expPolicy.ExtraOpaqueData = nil
×
NEW
484
                }
×
NEW
485
                if len(migPolicy.ExtraOpaqueData) == 0 {
×
NEW
486
                        migPolicy.ExtraOpaqueData = nil
×
NEW
487
                }
×
488

NEW
489
                return sqldb.CompareRecords(
×
NEW
490
                        *expPolicy, *migPolicy, "channel policy",
×
NEW
491
                )
×
492
        }
493

NEW
494
        err = checkPolicy(policy1, migPol1)
×
NEW
495
        if err != nil {
×
NEW
496
                return fmt.Errorf("policy1 mismatch for channel %d: %w", scid,
×
NEW
497
                        err)
×
NEW
498
        }
×
499

NEW
500
        err = checkPolicy(policy2, migPol2)
×
NEW
501
        if err != nil {
×
NEW
502
                return fmt.Errorf("policy2 mismatch for channel %d: %w", scid,
×
NEW
503
                        err)
×
NEW
504
        }
×
505

NEW
506
        return nil
×
507
}
508

509
func getAndBuildChanAndPolicies(ctx context.Context, db SQLQueries,
510
        row sqlc.GetChannelBySCIDWithPoliciesRow,
511
        chain chainhash.Hash) (*models.ChannelEdgeInfo,
NEW
512
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy, error) {
×
NEW
513

×
NEW
514
        node1, node2, err := buildNodeVertices(
×
NEW
515
                row.Node.PubKey, row.Node_2.PubKey,
×
NEW
516
        )
×
NEW
517
        if err != nil {
×
NEW
518
                return nil, nil, nil, err
×
NEW
519
        }
×
520

NEW
521
        edge, err := getAndBuildEdgeInfo(
×
NEW
522
                ctx, db, chain, row.Channel.ID, row.Channel, node1, node2,
×
NEW
523
        )
×
NEW
524
        if err != nil {
×
NEW
525
                return nil, nil, nil, fmt.Errorf("unable to build channel "+
×
NEW
526
                        "info: %w", err)
×
NEW
527
        }
×
528

NEW
529
        dbPol1, dbPol2, err := extractChannelPolicies(row)
×
NEW
530
        if err != nil {
×
NEW
531
                return nil, nil, nil, fmt.Errorf("unable to extract channel "+
×
NEW
532
                        "policies: %w", err)
×
NEW
533
        }
×
534

NEW
535
        policy1, policy2, err := getAndBuildChanPolicies(
×
NEW
536
                ctx, db, dbPol1, dbPol2, edge.ChannelID, node1, node2,
×
NEW
537
        )
×
NEW
538
        if err != nil {
×
NEW
539
                return nil, nil, nil, fmt.Errorf("unable to build channel "+
×
NEW
540
                        "policies: %w", err)
×
NEW
541
        }
×
542

NEW
543
        return edge, policy1, policy2, nil
×
544
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc