• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14881819500

07 May 2025 11:06AM UTC coverage: 58.617%. First build
14881819500

Pull #9791

github

web-flow
Merge 8fdf7d431 into 695cf7c4a
Pull Request #9791: graph/db: introduce a `V1Store` interface

19 of 36 new or added lines in 4 files covered. (52.78%)

97158 of 165750 relevant lines covered (58.62%)

1.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.76
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "time"
9

10
        "github.com/btcsuite/btcd/chaincfg/chainhash"
11
        "github.com/btcsuite/btcd/wire"
12
        "github.com/lightningnetwork/lnd/batch"
13
        "github.com/lightningnetwork/lnd/graph/db/models"
14
        "github.com/lightningnetwork/lnd/lnwire"
15
        "github.com/lightningnetwork/lnd/routing/route"
16
)
17

18
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
19
// busy shutting down.
20
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
21

22
// ChannelGraph is a layer above the graph's CRUD layer.
23
//
24
// NOTE: currently, this is purely a pass-through layer directly to the backing
25
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
26
// into this layer so that the KVStore is only responsible for CRUD operations.
27
type ChannelGraph struct {
28
        started atomic.Bool
29
        stopped atomic.Bool
30

31
        // cacheMu guards any writes to the graphCache. It should be held
32
        // across the DB write call and the graphCache update to make the
33
        // two updates as atomic as possible.
34
        cacheMu sync.Mutex
35

36
        graphCache *GraphCache
37

38
        V1Store
39
        *topologyManager
40

41
        quit chan struct{}
42
        wg   sync.WaitGroup
43
}
44

45
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
46
func NewChannelGraph(v1Store V1Store,
47
        options ...ChanGraphOption) (*ChannelGraph, error) {
3✔
48

3✔
49
        opts := defaultChanGraphOptions()
3✔
50
        for _, o := range options {
6✔
51
                o(opts)
3✔
52
        }
3✔
53

54
        g := &ChannelGraph{
3✔
55
                V1Store:         v1Store,
3✔
56
                topologyManager: newTopologyManager(),
3✔
57
                quit:            make(chan struct{}),
3✔
58
        }
3✔
59

3✔
60
        // The graph cache can be turned off (e.g. for mobile users) for a
3✔
61
        // speed/memory usage tradeoff.
3✔
62
        if opts.useGraphCache {
6✔
63
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
3✔
64
        }
3✔
65

66
        return g, nil
3✔
67
}
68

69
// Start kicks off any goroutines required for the ChannelGraph to function.
70
// If the graph cache is enabled, then it will be populated with the contents of
71
// the database.
72
func (c *ChannelGraph) Start() error {
3✔
73
        if !c.started.CompareAndSwap(false, true) {
3✔
74
                return nil
×
75
        }
×
76
        log.Debugf("ChannelGraph starting")
3✔
77
        defer log.Debug("ChannelGraph started")
3✔
78

3✔
79
        if c.graphCache != nil {
6✔
80
                if err := c.populateCache(); err != nil {
3✔
81
                        return fmt.Errorf("could not populate the graph "+
×
82
                                "cache: %w", err)
×
83
                }
×
84
        }
85

86
        c.wg.Add(1)
3✔
87
        go c.handleTopologySubscriptions()
3✔
88

3✔
89
        return nil
3✔
90
}
91

92
// Stop signals any active goroutines for a graceful closure.
93
func (c *ChannelGraph) Stop() error {
3✔
94
        if !c.stopped.CompareAndSwap(false, true) {
3✔
95
                return nil
×
96
        }
×
97

98
        log.Debugf("ChannelGraph shutting down...")
3✔
99
        defer log.Debug("ChannelGraph shutdown complete")
3✔
100

3✔
101
        close(c.quit)
3✔
102
        c.wg.Wait()
3✔
103

3✔
104
        return nil
3✔
105
}
106

107
// handleTopologySubscriptions ensures that topology client subscriptions,
108
// subscription cancellations and topology notifications are handled
109
// synchronously.
110
//
111
// NOTE: this MUST be run in a goroutine.
112
func (c *ChannelGraph) handleTopologySubscriptions() {
3✔
113
        defer c.wg.Done()
3✔
114

3✔
115
        for {
6✔
116
                select {
3✔
117
                // A new fully validated topology update has just arrived.
118
                // We'll notify any registered clients.
119
                case update := <-c.topologyUpdate:
3✔
120
                        // TODO(elle): change topology handling to be handled
3✔
121
                        // synchronously so that we can guarantee the order of
3✔
122
                        // notification delivery.
3✔
123
                        c.wg.Add(1)
3✔
124
                        go c.handleTopologyUpdate(update)
3✔
125

126
                        // TODO(roasbeef): remove all unconnected vertexes
127
                        // after N blocks pass with no corresponding
128
                        // announcements.
129

130
                // A new notification client update has arrived. We're either
131
                // gaining a new client, or cancelling notifications for an
132
                // existing client.
133
                case ntfnUpdate := <-c.ntfnClientUpdates:
3✔
134
                        clientID := ntfnUpdate.clientID
3✔
135

3✔
136
                        if ntfnUpdate.cancel {
6✔
137
                                client, ok := c.topologyClients.LoadAndDelete(
3✔
138
                                        clientID,
3✔
139
                                )
3✔
140
                                if ok {
6✔
141
                                        close(client.exit)
3✔
142
                                        client.wg.Wait()
3✔
143

3✔
144
                                        close(client.ntfnChan)
3✔
145
                                }
3✔
146

147
                                continue
3✔
148
                        }
149

150
                        c.topologyClients.Store(clientID, &topologyClient{
3✔
151
                                ntfnChan: ntfnUpdate.ntfnChan,
3✔
152
                                exit:     make(chan struct{}),
3✔
153
                        })
3✔
154

155
                case <-c.quit:
3✔
156
                        return
3✔
157
                }
158
        }
159
}
160

161
// populateCache loads the entire channel graph into the in-memory graph cache.
162
//
163
// NOTE: This should only be called if the graphCache has been constructed.
164
func (c *ChannelGraph) populateCache() error {
3✔
165
        startTime := time.Now()
3✔
166
        log.Info("Populating in-memory channel graph, this might take a " +
3✔
167
                "while...")
3✔
168

3✔
169
        err := c.V1Store.ForEachNodeCacheable(func(node route.Vertex,
3✔
170
                features *lnwire.FeatureVector) error {
6✔
171

3✔
172
                c.graphCache.AddNodeFeatures(node, features)
3✔
173

3✔
174
                return nil
3✔
175
        })
3✔
176
        if err != nil {
3✔
177
                return err
×
178
        }
×
179

180
        err = c.V1Store.ForEachChannel(func(info *models.ChannelEdgeInfo,
3✔
181
                policy1, policy2 *models.ChannelEdgePolicy) error {
6✔
182

3✔
183
                c.graphCache.AddChannel(info, policy1, policy2)
3✔
184

3✔
185
                return nil
3✔
186
        })
3✔
187
        if err != nil {
3✔
188
                return err
×
189
        }
×
190

191
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
3✔
192
                time.Since(startTime), c.graphCache.Stats())
3✔
193

3✔
194
        return nil
3✔
195
}
196

197
// ForEachNodeDirectedChannel iterates through all channels of a given node,
198
// executing the passed callback on the directed edge representing the channel
199
// and its incoming policy. If the callback returns an error, then the iteration
200
// is halted with the error propagated back up to the caller. If the graphCache
201
// is available, then it will be used to retrieve the node's channels instead
202
// of the database.
203
//
204
// Unknown policies are passed into the callback as nil values.
205
//
206
// NOTE: this is part of the graphdb.NodeTraverser interface.
207
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
208
        cb func(channel *DirectedChannel) error) error {
3✔
209

3✔
210
        if c.graphCache != nil {
6✔
211
                return c.graphCache.ForEachChannel(node, cb)
3✔
212
        }
3✔
213

214
        return c.V1Store.ForEachNodeDirectedChannel(node, cb)
3✔
215
}
216

217
// FetchNodeFeatures returns the features of the given node. If no features are
218
// known for the node, an empty feature vector is returned.
219
// If the graphCache is available, then it will be used to retrieve the node's
220
// features instead of the database.
221
//
222
// NOTE: this is part of the graphdb.NodeTraverser interface.
223
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
224
        *lnwire.FeatureVector, error) {
3✔
225

3✔
226
        if c.graphCache != nil {
6✔
227
                return c.graphCache.GetFeatures(node), nil
3✔
228
        }
3✔
229

230
        return c.V1Store.FetchNodeFeatures(node)
3✔
231
}
232

233
// GraphSession will provide the call-back with access to a NodeTraverser
234
// instance which can be used to perform queries against the channel graph. If
235
// the graph cache is not enabled, then the call-back will be provided with
236
// access to the graph via a consistent read-only transaction.
237
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
3✔
238
        if c.graphCache != nil {
6✔
239
                return cb(c)
3✔
240
        }
3✔
241

NEW
242
        return c.V1Store.GraphSession(cb)
×
243
}
244

245
// ForEachNodeCached iterates through all the stored vertices/nodes in the
246
// graph, executing the passed callback with each node encountered.
247
//
248
// NOTE: The callback contents MUST not be modified.
249
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
250
        chans map[uint64]*DirectedChannel) error) error {
×
251

×
252
        if c.graphCache != nil {
×
253
                return c.graphCache.ForEachNode(cb)
×
254
        }
×
255

NEW
256
        return c.V1Store.ForEachNodeCached(cb)
×
257
}
258

259
// AddLightningNode adds a vertex/node to the graph database. If the node is not
260
// in the database from before, this will add a new, unconnected one to the
261
// graph. If it is present from before, this will update that node's
262
// information. Note that this method is expected to only be called to update an
263
// already present node from a node announcement, or to insert a node found in a
264
// channel update.
265
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
266
        op ...batch.SchedulerOption) error {
3✔
267

3✔
268
        c.cacheMu.Lock()
3✔
269
        defer c.cacheMu.Unlock()
3✔
270

3✔
271
        err := c.V1Store.AddLightningNode(node, op...)
3✔
272
        if err != nil {
3✔
273
                return err
×
274
        }
×
275

276
        if c.graphCache != nil {
6✔
277
                c.graphCache.AddNodeFeatures(
3✔
278
                        node.PubKeyBytes, node.Features,
3✔
279
                )
3✔
280
        }
3✔
281

282
        select {
3✔
283
        case c.topologyUpdate <- node:
3✔
284
        case <-c.quit:
×
285
                return ErrChanGraphShuttingDown
×
286
        }
287

288
        return nil
3✔
289
}
290

291
// DeleteLightningNode starts a new database transaction to remove a vertex/node
292
// from the database according to the node's public key.
293
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
×
294
        c.cacheMu.Lock()
×
295
        defer c.cacheMu.Unlock()
×
296

×
NEW
297
        err := c.V1Store.DeleteLightningNode(nodePub)
×
298
        if err != nil {
×
299
                return err
×
300
        }
×
301

302
        if c.graphCache != nil {
×
303
                c.graphCache.RemoveNode(nodePub)
×
304
        }
×
305

306
        return nil
×
307
}
308

309
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
310
// undirected edge from the two target nodes are created. The information stored
311
// denotes the static attributes of the channel, such as the channelID, the keys
312
// involved in creation of the channel, and the set of features that the channel
313
// supports. The chanPoint and chanID are used to uniquely identify the edge
314
// globally within the database.
315
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
316
        op ...batch.SchedulerOption) error {
3✔
317

3✔
318
        c.cacheMu.Lock()
3✔
319
        defer c.cacheMu.Unlock()
3✔
320

3✔
321
        err := c.V1Store.AddChannelEdge(edge, op...)
3✔
322
        if err != nil {
3✔
323
                return err
×
324
        }
×
325

326
        if c.graphCache != nil {
6✔
327
                c.graphCache.AddChannel(edge, nil, nil)
3✔
328
        }
3✔
329

330
        select {
3✔
331
        case c.topologyUpdate <- edge:
3✔
332
        case <-c.quit:
×
333
                return ErrChanGraphShuttingDown
×
334
        }
335

336
        return nil
3✔
337
}
338

339
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
340
// If the cache is enabled, the edge will be added back to the graph cache if
341
// we still have a record of this channel in the DB.
342
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
×
343
        c.cacheMu.Lock()
×
344
        defer c.cacheMu.Unlock()
×
345

×
NEW
346
        err := c.V1Store.MarkEdgeLive(chanID)
×
347
        if err != nil {
×
348
                return err
×
349
        }
×
350

351
        if c.graphCache != nil {
×
352
                // We need to add the channel back into our graph cache,
×
353
                // otherwise we won't use it for path finding.
×
NEW
354
                infos, err := c.V1Store.FetchChanInfos([]uint64{chanID})
×
355
                if err != nil {
×
356
                        return err
×
357
                }
×
358

359
                if len(infos) == 0 {
×
360
                        return nil
×
361
                }
×
362

363
                info := infos[0]
×
364

×
365
                c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2)
×
366
        }
367

368
        return nil
×
369
}
370

371
// DeleteChannelEdges removes edges with the given channel IDs from the
372
// database and marks them as zombies. This ensures that we're unable to re-add
373
// it to our database once again. If an edge does not exist within the
374
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
375
// true, then when we mark these edges as zombies, we'll set up the keys such
376
// that we require the node that failed to send the fresh update to be the one
377
// that resurrects the channel from its zombie state. The markZombie bool
378
// denotes whether to mark the channel as a zombie.
379
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
380
        chanIDs ...uint64) error {
3✔
381

3✔
382
        c.cacheMu.Lock()
3✔
383
        defer c.cacheMu.Unlock()
3✔
384

3✔
385
        infos, err := c.V1Store.DeleteChannelEdges(
3✔
386
                strictZombiePruning, markZombie, chanIDs...,
3✔
387
        )
3✔
388
        if err != nil {
3✔
389
                return err
×
390
        }
×
391

392
        if c.graphCache != nil {
6✔
393
                for _, info := range infos {
6✔
394
                        c.graphCache.RemoveChannel(
3✔
395
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
3✔
396
                                info.ChannelID,
3✔
397
                        )
3✔
398
                }
3✔
399
        }
400

401
        return err
3✔
402
}
403

404
// DisconnectBlockAtHeight is used to indicate that the block specified
405
// by the passed height has been disconnected from the main chain. This
406
// will "rewind" the graph back to the height below, deleting channels
407
// that are no longer confirmed from the graph. The prune log will be
408
// set to the last prune height valid for the remaining chain.
409
// Channels that were removed from the graph resulting from the
410
// disconnected block are returned.
411
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
412
        []*models.ChannelEdgeInfo, error) {
2✔
413

2✔
414
        c.cacheMu.Lock()
2✔
415
        defer c.cacheMu.Unlock()
2✔
416

2✔
417
        edges, err := c.V1Store.DisconnectBlockAtHeight(height)
2✔
418
        if err != nil {
2✔
419
                return nil, err
×
420
        }
×
421

422
        if c.graphCache != nil {
4✔
423
                for _, edge := range edges {
4✔
424
                        c.graphCache.RemoveChannel(
2✔
425
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
2✔
426
                                edge.ChannelID,
2✔
427
                        )
2✔
428
                }
2✔
429
        }
430

431
        return edges, nil
2✔
432
}
433

434
// PruneGraph prunes newly closed channels from the channel graph in response
435
// to a new block being solved on the network. Any transactions which spend the
436
// funding output of any known channels within he graph will be deleted.
437
// Additionally, the "prune tip", or the last block which has been used to
438
// prune the graph is stored so callers can ensure the graph is fully in sync
439
// with the current UTXO state. A slice of channels that have been closed by
440
// the target block are returned if the function succeeds without error.
441
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
442
        blockHash *chainhash.Hash, blockHeight uint32) (
443
        []*models.ChannelEdgeInfo, error) {
3✔
444

3✔
445
        c.cacheMu.Lock()
3✔
446
        defer c.cacheMu.Unlock()
3✔
447

3✔
448
        edges, nodes, err := c.V1Store.PruneGraph(
3✔
449
                spentOutputs, blockHash, blockHeight,
3✔
450
        )
3✔
451
        if err != nil {
3✔
452
                return nil, err
×
453
        }
×
454

455
        if c.graphCache != nil {
6✔
456
                for _, edge := range edges {
6✔
457
                        c.graphCache.RemoveChannel(
3✔
458
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
3✔
459
                                edge.ChannelID,
3✔
460
                        )
3✔
461
                }
3✔
462

463
                for _, node := range nodes {
6✔
464
                        c.graphCache.RemoveNode(node)
3✔
465
                }
3✔
466

467
                log.Debugf("Pruned graph, cache now has %s",
3✔
468
                        c.graphCache.Stats())
3✔
469
        }
470

471
        if len(edges) != 0 {
6✔
472
                // Notify all currently registered clients of the newly closed
3✔
473
                // channels.
3✔
474
                closeSummaries := createCloseSummaries(
3✔
475
                        blockHeight, edges...,
3✔
476
                )
3✔
477
                c.notifyTopologyChange(&TopologyChange{
3✔
478
                        ClosedChannels: closeSummaries,
3✔
479
                })
3✔
480
        }
3✔
481

482
        return edges, nil
3✔
483
}
484

485
// PruneGraphNodes is a garbage collection method which attempts to prune out
486
// any nodes from the channel graph that are currently unconnected. This ensure
487
// that we only maintain a graph of reachable nodes. In the event that a pruned
488
// node gains more channels, it will be re-added back to the graph.
489
func (c *ChannelGraph) PruneGraphNodes() error {
3✔
490
        c.cacheMu.Lock()
3✔
491
        defer c.cacheMu.Unlock()
3✔
492

3✔
493
        nodes, err := c.V1Store.PruneGraphNodes()
3✔
494
        if err != nil {
3✔
495
                return err
×
496
        }
×
497

498
        if c.graphCache != nil {
6✔
499
                for _, node := range nodes {
3✔
500
                        c.graphCache.RemoveNode(node)
×
501
                }
×
502
        }
503

504
        return nil
3✔
505
}
506

507
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
508
// ID's that we don't know and are not known zombies of the passed set. In other
509
// words, we perform a set difference of our set of chan ID's and the ones
510
// passed in. This method can be used by callers to determine the set of
511
// channels another peer knows of that we don't.
512
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
513
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
3✔
514

3✔
515
        unknown, knownZombies, err := c.V1Store.FilterKnownChanIDs(chansInfo)
3✔
516
        if err != nil {
3✔
517
                return nil, err
×
518
        }
×
519

520
        for _, info := range knownZombies {
3✔
521
                // TODO(ziggie): Make sure that for the strict pruning case we
×
522
                // compare the pubkeys and whether the right timestamp is not
×
523
                // older than the `ChannelPruneExpiry`.
×
524
                //
×
525
                // NOTE: The timestamp data has no verification attached to it
×
526
                // in the `ReplyChannelRange` msg so we are trusting this data
×
527
                // at this point. However it is not critical because we are just
×
528
                // removing the channel from the db when the timestamps are more
×
529
                // recent. During the querying of the gossip msg verification
×
530
                // happens as usual. However we should start punishing peers
×
531
                // when they don't provide us honest data ?
×
532
                isStillZombie := isZombieChan(
×
533
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
×
534
                )
×
535

×
536
                if isStillZombie {
×
537
                        continue
×
538
                }
539

540
                // If we have marked it as a zombie but the latest update
541
                // timestamps could bring it back from the dead, then we mark it
542
                // alive, and we let it be added to the set of IDs to query our
543
                // peer for.
NEW
544
                err := c.V1Store.MarkEdgeLive(
×
545
                        info.ShortChannelID.ToUint64(),
×
546
                )
×
547
                // Since there is a chance that the edge could have been marked
×
548
                // as "live" between the FilterKnownChanIDs call and the
×
549
                // MarkEdgeLive call, we ignore the error if the edge is already
×
550
                // marked as live.
×
551
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
×
552
                        return nil, err
×
553
                }
×
554
        }
555

556
        return unknown, nil
3✔
557
}
558

559
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
560
// zombie. This method is used on an ad-hoc basis, when channels need to be
561
// marked as zombies outside the normal pruning cycle.
562
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
563
        pubKey1, pubKey2 [33]byte) error {
×
564

×
565
        c.cacheMu.Lock()
×
566
        defer c.cacheMu.Unlock()
×
567

×
NEW
568
        err := c.V1Store.MarkEdgeZombie(chanID, pubKey1, pubKey2)
×
569
        if err != nil {
×
570
                return err
×
571
        }
×
572

573
        if c.graphCache != nil {
×
574
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
×
575
        }
×
576

577
        return nil
×
578
}
579

580
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
581
// within the database for the referenced channel. The `flags` attribute within
582
// the ChannelEdgePolicy determines which of the directed edges are being
583
// updated. If the flag is 1, then the first node's information is being
584
// updated, otherwise it's the second node's information. The node ordering is
585
// determined by the lexicographical ordering of the identity public keys of the
586
// nodes on either side of the channel.
587
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
588
        op ...batch.SchedulerOption) error {
3✔
589

3✔
590
        c.cacheMu.Lock()
3✔
591
        defer c.cacheMu.Unlock()
3✔
592

3✔
593
        from, to, err := c.V1Store.UpdateEdgePolicy(edge, op...)
3✔
594
        if err != nil {
3✔
595
                return err
×
596
        }
×
597

598
        if c.graphCache != nil {
6✔
599
                var isUpdate1 bool
3✔
600
                if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
6✔
601
                        isUpdate1 = true
3✔
602
                }
3✔
603

604
                c.graphCache.UpdatePolicy(edge, from, to, isUpdate1)
3✔
605
        }
606

607
        select {
3✔
608
        case c.topologyUpdate <- edge:
3✔
609
        case <-c.quit:
×
610
                return ErrChanGraphShuttingDown
×
611
        }
612

613
        return nil
3✔
614
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc