• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

input-output-hk / catalyst-libs / 16137717588

08 Jul 2025 08:12AM UTC coverage: 68.653%. First build
16137717588

Pull #406

github

web-flow
Merge 966560951 into 0d4c348fe
Pull Request #406: feat(rust/signed-doc): Cleanup `DocType`, `DocumentRefs` related test cases

185 of 189 new or added lines in 5 files covered. (97.88%)

12733 of 18547 relevant lines covered (68.65%)

2244.16 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/rust/cardano-chain-follower/src/chain_sync.rs
1
//! Sync from the chain to an in-memory buffer.
2
//!
3
//! All iteration of the chain is done through this buffer or a mithril snapshot.
4
//! Consumers of this library do not talk to the node directly.
5

6
use std::time::Duration;
7

8
use anyhow::Context;
9
use cardano_blockchain_types::{Fork, MultiEraBlock, Network, Point};
10
use pallas::{
11
    ledger::traverse::MultiEraHeader,
12
    network::{
13
        facades::PeerClient,
14
        miniprotocols::chainsync::{self, HeaderContent, Tip},
15
    },
16
};
17
use tokio::{
18
    spawn,
19
    sync::mpsc,
20
    time::{sleep, timeout},
21
};
22
use tracing::{debug, error};
23

24
use crate::{
25
    chain_sync_live_chains::{
26
        get_fill_to_point, get_intersect_points, get_live_block, get_live_head_point, get_peer_tip,
27
        live_chain_add_block_to_tip, live_chain_backfill, live_chain_length, purge_live_chain,
28
    },
29
    chain_sync_ready::{
30
        get_chain_update_tx_queue, notify_follower, wait_for_sync_ready, SyncReadyWaiter,
31
    },
32
    chain_update,
33
    error::{Error, Result},
34
    mithril_snapshot_config::MithrilUpdateMessage,
35
    mithril_snapshot_data::latest_mithril_snapshot_id,
36
    stats::{self},
37
    ChainSyncConfig,
38
};
39

40
/// The maximum number of seconds we wait for a node to connect.
41
const MAX_NODE_CONNECT_TIME_SECS: u64 = 2;
42

43
/// The maximum number of times we wait for a nodeChainUpdate to connect.
44
/// Currently set to maximum of 5 retries.
45
const MAX_NODE_CONNECT_RETRIES: u64 = 5;
46

47
/// Try and connect to a node, in a robust and quick way.
48
///
49
/// If it takes longer then 5 seconds, retry the connection.
50
/// Retry 5 times before giving up.
51
async fn retry_connect(
×
52
    addr: &str, magic: u64,
×
53
) -> std::result::Result<PeerClient, pallas::network::facades::Error> {
×
54
    let mut retries = MAX_NODE_CONNECT_RETRIES;
×
55
    loop {
56
        match timeout(
×
57
            Duration::from_secs(MAX_NODE_CONNECT_TIME_SECS),
×
58
            PeerClient::connect(addr, magic),
×
59
        )
×
60
        .await
×
61
        {
62
            Ok(peer) => {
×
63
                match peer {
×
64
                    Ok(peer) => return Ok(peer),
×
65
                    Err(err) => {
×
66
                        retries = retries.saturating_sub(1);
×
67
                        if retries == 0 {
×
68
                            return Err(err);
×
69
                        }
×
70
                        debug!("retrying {retries} connect to {addr} : {err:?}");
×
71
                    },
72
                }
73
            },
74
            Err(error) => {
×
75
                retries = retries.saturating_sub(1);
×
76
                if retries == 0 {
×
77
                    return Err(pallas::network::facades::Error::ConnectFailure(
×
78
                        tokio::io::Error::new(
×
79
                            tokio::io::ErrorKind::Other,
×
80
                            format!("failed to connect to {addr} : {error}"),
×
81
                        ),
×
82
                    ));
×
83
                }
×
84
                debug!("retrying {retries} connect to {addr} : {error:?}");
×
85
            },
86
        }
87
    }
88
}
×
89

90
/// Purge the live chain, and intersect with TIP.
91
async fn purge_and_intersect_tip(client: &mut PeerClient, chain: Network) -> Result<Point> {
×
92
    if let Err(error) = purge_live_chain(chain, &Point::TIP) {
×
93
        // Shouldn't happen.
94
        error!("failed to purge live chain: {error}");
×
95
    }
×
96

97
    client
×
98
        .chainsync()
×
99
        .intersect_tip()
×
100
        .await
×
101
        .map_err(Error::Chainsync)
×
102
        .map(std::convert::Into::into)
×
103
}
×
104

105
/// Resynchronize to the live tip in memory.
106
async fn resync_live_tip(client: &mut PeerClient, chain: Network) -> Result<Point> {
×
107
    let sync_points = get_intersect_points(chain);
×
108
    if sync_points.is_empty() {
×
109
        return purge_and_intersect_tip(client, chain).await;
×
110
    }
×
111

112
    let sync_to_point = match client.chainsync().find_intersect(sync_points).await {
×
113
        Ok((Some(point), _)) => point.into(),
×
114
        Ok((None, _)) => {
115
            // No intersection found, so purge live chain and re-sync it.
116
            return purge_and_intersect_tip(client, chain).await;
×
117
        },
118
        Err(error) => return Err(Error::Chainsync(error)),
×
119
    };
120

121
    Ok(sync_to_point)
×
122
}
×
123

124
/// Fetch a single block from the Peer, and decode it.
125
async fn fetch_block_from_peer(
×
126
    peer: &mut PeerClient, chain: Network, point: Point, previous_point: Point, fork_count: Fork,
×
127
) -> anyhow::Result<MultiEraBlock> {
×
128
    let block_data = peer
×
129
        .blockfetch()
×
130
        .fetch_single(point.clone().into())
×
131
        .await
×
132
        .with_context(|| "Fetching block data")?;
×
133

134
    debug!("{chain}, {previous_point}, {fork_count:?}");
×
135
    let live_block_data = MultiEraBlock::new(chain, block_data, &previous_point, fork_count)?;
×
136

137
    Ok(live_block_data)
×
138
}
×
139

140
/// Process a rollback.
141
///
142
/// Fetch the rollback block, and try and insert it into the live-chain.
143
/// If its a real rollback, it will purge the chain ahead of the block automatically.
144
async fn process_rollback_actual(
×
145
    peer: &mut PeerClient, chain: Network, point: Point, tip: &Tip, fork_count: &mut Fork,
×
146
) -> anyhow::Result<Point> {
×
147
    debug!("RollBackward: {:?} {:?}", point, tip);
×
148

149
    // Check if the block is in the live chain, if it is, re-add it, which auto-purges the
150
    // rest of live chain tip. And increments the fork count.
151
    if let Some(mut block) = get_live_block(chain, &point, 0, true) {
×
152
        // Even though we are re-adding the known block, increase the fork count.
153
        block.set_fork(*fork_count);
×
154
        live_chain_add_block_to_tip(chain, block, fork_count, tip.0.clone().into())?;
×
155
        return Ok(point);
×
156
    }
×
157

×
158
    // If the block is NOT in the chain, fetch it, and insert it, which will automatically
×
159
    // find the correct place to insert it, and purge the old tip blocks.
×
160

×
161
    // We don't know what or if there is a previous block, so probe for it.
×
162
    // Fizzy search for the block immediately preceding the block we will fetch.
×
163
    // In case we don;t have a previous point on the live chain, it might be the tip of the
×
164
    // mithril chain, so get that.
×
165
    let previous_block = get_live_block(chain, &point, -1, false);
×
166
    let previous_point = if let Some(previous_block) = previous_block {
×
167
        let previous = previous_block.previous();
×
168
        debug!("Previous block: {:?}", previous);
×
169
        if previous == Point::ORIGIN {
×
170
            latest_mithril_snapshot_id(chain).tip()
×
171
        } else {
172
            previous
×
173
        }
174
    } else {
175
        debug!("Using Mithril Tip as rollback previous point.");
×
176
        latest_mithril_snapshot_id(chain).tip()
×
177
    };
178
    debug!("Previous point: {:?}", previous_point);
×
179
    let block =
×
180
        fetch_block_from_peer(peer, chain, point.clone(), previous_point, *fork_count).await?;
×
181
    live_chain_add_block_to_tip(chain, block, fork_count, tip.0.clone().into())?;
×
182

183
    // Next block we receive is a rollback.
184
    Ok(point)
×
185
}
×
186

187
/// Process a rollback detected from the peer.
188
async fn process_rollback(
×
189
    peer: &mut PeerClient, chain: Network, point: Point, tip: &Tip, previous_point: &Point,
×
190
    fork_count: &mut Fork,
×
191
) -> anyhow::Result<Point> {
×
192
    let rollback_slot = point.slot_or_default();
×
193
    let head_slot = previous_point.slot_or_default();
×
194
    debug!("Head slot: {head_slot:?}");
×
195
    debug!("Rollback slot: {rollback_slot:?}");
×
196
    // It is ok because slot implement saturating subtraction.
197
    #[allow(clippy::arithmetic_side_effects)]
198
    let slot_rollback_size = head_slot - rollback_slot;
×
199

200
    // We actually do the work here...
201
    let response = process_rollback_actual(peer, chain, point, tip, fork_count).await?;
×
202

203
    // We never really know how many blocks are rolled back when advised by the peer, but we
204
    // can work out how many slots. This function wraps the real work, so we can properly
205
    // record the stats when the rollback is complete. Even if it errors.
206
    stats::rollback::rollback(
×
207
        chain,
×
208
        stats::rollback::RollbackType::Peer,
×
209
        slot_rollback_size.into(),
×
210
    );
×
211

×
212
    Ok(response)
×
213
}
×
214

215
/// Process a rollback detected from the peer.
216
async fn process_next_block(
×
217
    peer: &mut PeerClient, chain: Network, header: HeaderContent, tip: &Tip,
×
218
    previous_point: &Point, fork_count: &mut Fork,
×
219
) -> anyhow::Result<Point> {
×
220
    // Decode the Header of the block so we know what to fetch.
221
    let decoded_header = MultiEraHeader::decode(
×
222
        header.variant,
×
223
        header.byron_prefix.map(|p| p.0),
×
224
        &header.cbor,
×
225
    )
×
226
    .with_context(|| "Decoding Block Header")?;
×
227

228
    let block_point = Point::new(decoded_header.slot().into(), decoded_header.hash().into());
×
229

×
230
    debug!("RollForward: {block_point:?} {tip:?}");
×
231

232
    let block = fetch_block_from_peer(
×
233
        peer,
×
234
        chain,
×
235
        block_point.clone(),
×
236
        previous_point.clone(),
×
237
        *fork_count,
×
238
    )
×
239
    .await?;
×
240

241
    let block_point = block.point();
×
242

×
243
    // We can't store this block because we don't know the previous one so the chain
×
244
    // would break, so just use it for previous.
×
245
    if *previous_point == Point::UNKNOWN {
×
246
        // Nothing else we can do with the first block when we don't know the previous
247
        // one.  Just return it's point.
248
        debug!("Not storing the block, because we did not know the previous point.");
×
249
    } else {
250
        live_chain_add_block_to_tip(chain, block, fork_count, tip.0.clone().into())?;
×
251
    }
252

253
    Ok(block_point)
×
254
}
×
255

256
/// Follows the chain until there is an error.
257
/// If this returns it can be assumed the client is disconnected.
258
///
259
/// We take ownership of the client because of that.
260
async fn follow_chain(
×
261
    peer: &mut PeerClient, chain: Network, fork_count: &mut Fork,
×
262
) -> anyhow::Result<()> {
×
263
    let mut update_sender = get_chain_update_tx_queue(chain).await;
×
264
    let mut previous_point = Point::UNKNOWN;
×
265

266
    loop {
267
        // debug!("Waiting for data from Cardano Peer Node:");
268

269
        // We can't get an update sender UNTIL we have released the sync lock.
270
        if update_sender.is_none() {
×
271
            update_sender = get_chain_update_tx_queue(chain).await;
×
272
        }
×
273

274
        // Check what response type we need to process.
275
        let response = match peer.chainsync().state() {
×
276
            chainsync::State::CanAwait => peer.chainsync().recv_while_can_await().await,
×
277
            chainsync::State::MustReply => peer.chainsync().recv_while_must_reply().await,
×
278
            _ => peer.chainsync().request_next().await,
×
279
        }
280
        .with_context(|| "Error while receiving block data from peer")?;
×
281

282
        match response {
×
283
            chainsync::NextResponse::RollForward(header, tip) => {
×
284
                // Note: Tip is poorly documented.
285
                // It is a tuple with the following structure:
286
                // ((Slot#, BlockHash), Block# ).
287
                // We can find if we are AT tip by comparing the current block Point with the tip
288
                // Point. We can estimate how far behind we are (in blocks) by
289
                // subtracting current block height and the tip block height.
290
                // IF the TIP is <= the current block height THEN we are at tip.
291
                previous_point =
×
292
                    process_next_block(peer, chain, header, &tip, &previous_point, fork_count)
×
293
                        .await?;
×
294

295
                // This update is just for followers to know to look again at their live chains for
296
                // new data.
297
                notify_follower(chain, update_sender.as_ref(), &chain_update::Kind::Block);
×
298
            },
299
            chainsync::NextResponse::RollBackward(point, tip) => {
×
300
                previous_point =
×
301
                    process_rollback(peer, chain, point.into(), &tip, &previous_point, fork_count)
×
302
                        .await?;
×
303
                // This update is just for followers to know to look again at their live chains for
304
                // new data.
305
                notify_follower(chain, update_sender.as_ref(), &chain_update::Kind::Rollback);
×
306
            },
307
            chainsync::NextResponse::Await => {
×
308
                // debug!("Peer Node says: Await");
×
309
            },
×
310
        }
311
    }
312
}
×
313

314
/// How long we wait before trying to reconnect to a peer when it totally fails our
315
/// attempts.
316
const PEER_FAILURE_RECONNECT_DELAY: Duration = Duration::from_secs(10);
317

318
/// Do not return until we have a connection to the peer.
319
async fn persistent_reconnect(addr: &str, chain: Network) -> PeerClient {
×
320
    // Not yet connected to the peer.
×
321
    stats::peer_connected(chain, false, addr);
×
322

323
    loop {
324
        // We never have a connection if we end up around the loop, so make a new one.
325
        match retry_connect(addr, chain.into()).await {
×
326
            Ok(peer) => {
×
327
                // Successfully connected to the peer.
×
328
                stats::peer_connected(chain, true, addr);
×
329

×
330
                return peer;
×
331
            },
332
            Err(error) => {
×
333
                error!(
×
334
                    "Chain Sync for: {} from   {}  : Failed to connect to relay: {}",
×
335
                    chain, addr, error,
336
                );
337

338
                // Wait a bit before trying again.
339
                tokio::time::sleep(PEER_FAILURE_RECONNECT_DELAY).await;
×
340
            },
341
        };
342
    }
343
}
×
344

345
/// Backfill the live chain, based on the Mithril Sync updates.
346
/// This does NOT return until the live chain has been backfilled from the end of mithril
347
/// to the current synced tip blocks.
348
///
349
/// This only needs to be done once per chain connection.
350
async fn live_sync_backfill(
×
351
    cfg: &ChainSyncConfig, update: &MithrilUpdateMessage,
×
352
) -> anyhow::Result<()> {
×
353
    stats::backfill_started(cfg.chain);
×
354

355
    let (fill_to, _oldest_fork) = get_fill_to_point(cfg.chain).await;
×
356
    let range = (update.tip.clone().into(), fill_to.clone().into());
×
357
    let mut previous_point = update.previous.clone();
×
358

×
359
    let range_msg = format!("{range:?}");
×
360

361
    let mut peer = persistent_reconnect(&cfg.relay_address, cfg.chain).await;
×
362

363
    // Request the range of blocks from the Peer.
364
    peer.blockfetch()
×
365
        .request_range(range)
×
366
        .await
×
367
        .with_context(|| "Requesting Block Range")?;
×
368

369
    let mut backfill_blocks = Vec::<MultiEraBlock>::new();
×
370

371
    while let Some(block_data) = peer.blockfetch().recv_while_streaming().await? {
×
372
        // Backfilled blocks get placed in the oldest fork currently on the live-chain.
373
        let block = MultiEraBlock::new(cfg.chain, block_data, &previous_point, Fork::BACKFILL)
×
374
            .with_context(|| {
×
375
                format!(
×
376
                    "Failed to decode block data. previous: {previous_point:?}, range: {range_msg}"
×
377
                )
×
378
            })?;
×
379

380
        // Check we get the first block in the range properly.
381
        if backfill_blocks.is_empty() && !block.point().strict_eq(&update.tip) {
×
382
            return Err(Error::BackfillSync(format!(
×
383
                "First Block is invalid: Block {:?} != Range Start {:?}.",
×
384
                block.point(),
×
385
                update.tip
×
386
            ))
×
387
            .into());
×
388
        }
×
389

×
390
        previous_point = block.point();
×
391

×
392
        backfill_blocks.push(block);
×
393
    }
394

395
    // Check we get the last block in the range properly.
396
    if backfill_blocks.is_empty() || !previous_point.strict_eq(&fill_to) {
×
397
        return Err(Error::BackfillSync(format!(
×
398
            "Last Block is invalid. Block {previous_point:?} != Range End {fill_to:?}"
×
399
        ))
×
400
        .into());
×
401
    }
×
402

×
403
    // Report how many backfill blocks we received.
×
404
    let backfill_size = backfill_blocks.len() as u64;
×
405

×
406
    // Try and backfill, if anything doesn't work, or the chain integrity would break, fail.
×
407
    live_chain_backfill(cfg.chain, &backfill_blocks)?;
×
408

409
    stats::backfill_ended(cfg.chain, backfill_size);
×
410

×
411
    debug!("Backfilled Range OK: {}", range_msg);
×
412

413
    Ok(())
×
414
}
×
415

416
/// Backfill and Purge the live chain, based on the Mithril Sync updates.
417
async fn live_sync_backfill_and_purge(
×
418
    cfg: ChainSyncConfig, mut rx: mpsc::Receiver<MithrilUpdateMessage>,
×
419
    mut sync_ready: SyncReadyWaiter,
×
420
) {
×
421
    // Wait for first Mithril Update advice, which triggers a BACKFILL of the Live Data.
422
    let Some(update) = rx.recv().await else {
×
423
        error!("Mithril Sync Failed, can not continue chain sync either.");
×
424
        return;
×
425
    };
426

427
    debug!(
×
428
        "Before Backfill: Size of the Live Chain is: {} Blocks",
×
429
        live_chain_length(cfg.chain)
×
430
    );
431

432
    let live_chain_head: Point;
433

434
    loop {
435
        // We will re-attempt backfill, until its successful.
436
        // Backfill is atomic, it either fully works, or none of the live-chain is changed.
437
        debug!("Mithril Tip has advanced to: {update:?} : BACKFILL");
×
438
        while let Err(error) = live_sync_backfill(&cfg, &update).await {
×
439
            error!("Mithril Backfill Sync Failed: {}", error);
×
440
            sleep(Duration::from_secs(10)).await;
×
441
        }
442

443
        if let Some(head_point) = get_live_head_point(cfg.chain) {
×
444
            live_chain_head = head_point;
×
445
            break;
×
446
        }
×
447
    }
448

449
    stats::new_mithril_update(
×
450
        cfg.chain,
×
451
        update.tip.slot_or_default(),
×
452
        live_chain_length(cfg.chain) as u64,
×
453
        live_chain_head.slot_or_default(),
×
454
    );
×
455

×
456
    debug!(
×
457
        "After Backfill: Size of the Live Chain is: {} Blocks",
×
458
        live_chain_length(cfg.chain)
×
459
    );
460

461
    // Once Backfill is completed OK we can use the Blockchain data for Syncing and Querying
462
    sync_ready.signal();
×
463

464
    let mut update_sender = get_chain_update_tx_queue(cfg.chain).await;
×
465

466
    loop {
467
        let Some(update) = rx.recv().await else {
×
468
            error!("Mithril Sync Failed, can not continue chain sync either.");
×
469
            return;
×
470
        };
471

472
        // We can't get an update sender until the sync is released.
473
        if update_sender.is_none() {
×
474
            update_sender = get_chain_update_tx_queue(cfg.chain).await;
×
475
        }
×
476

477
        debug!("Mithril Tip has advanced to: {update:?} : PURGE NEEDED");
×
478

479
        let update_point: Point = update.tip.clone();
×
480

481
        if let Err(error) = purge_live_chain(cfg.chain, &update_point) {
×
482
            // This should actually never happen.
483
            error!("Mithril Purge Failed: {}", error);
×
484
        }
×
485

486
        debug!(
×
487
            "After Purge: Size of the Live Chain is: {} Blocks: Triggering Sleeping Followers.",
×
488
            live_chain_length(cfg.chain)
×
489
        );
490

491
        // Trigger any sleeping followers that data has changed.
492
        notify_follower(
×
493
            cfg.chain,
×
494
            update_sender.as_ref(),
×
495
            &chain_update::Kind::ImmutableBlockRollForward,
×
496
        );
×
497
    }
498

499
    // TODO: If the mithril sync dies, sleep for a bit and make sure the live chain
500
    // doesn't grow indefinitely.
501
    // We COULD move the spawn of mithril following into here, and if the rx dies, kill
502
    // that task, and restart it.
503
    // In reality, the mithril sync should never die and drop the queue.
504
}
×
505

506
/// Handle the background downloading of Mithril snapshots for a given network.
507
/// Note: There can ONLY be at most three of these running at any one time.
508
/// This is because there can ONLY be one snapshot for each of the three known Cardano
509
/// networks.
510
/// # Arguments
511
///
512
/// * `network` - The network type for the client to connect to.
513
/// * `aggregator_url` - A reference to the URL of an aggregator that can be used to
514
///   create the client.
515
/// * `genesis_vkey` - The genesis verification key, which is needed to authenticate with
516
///   the server.
517
///
518
/// # Returns
519
///
520
/// This does not return, it is a background task.
521
pub(crate) async fn chain_sync(cfg: ChainSyncConfig, rx: mpsc::Receiver<MithrilUpdateMessage>) {
×
522
    debug!(
×
523
        "Chain Sync for: {} from {} : Starting",
×
524
        cfg.chain, cfg.relay_address,
525
    );
526

527
    // Start the SYNC_READY unlock task.
528
    let sync_waiter = wait_for_sync_ready(cfg.chain);
×
529

×
530
    let backfill_cfg = cfg.clone();
×
531

×
532
    // Start the Live chain backfill task.
×
533
    let _backfill_join_handle = spawn(async move {
×
534
        stats::start_thread(
×
535
            cfg.chain,
×
536
            stats::thread::name::LIVE_SYNC_BACKFILL_AND_PURGE,
×
537
            true,
×
538
        );
×
539
        live_sync_backfill_and_purge(backfill_cfg.clone(), rx, sync_waiter).await;
×
540
        stats::stop_thread(cfg.chain, stats::thread::name::LIVE_SYNC_BACKFILL_AND_PURGE);
×
541
    });
×
542

×
543
    // Live Fill data starts at fork 1.
×
544
    // Immutable data from a mithril snapshot is fork 0.
×
545
    // Live backfill is always Fork 1.
×
546
    let mut fork_count: Fork = Fork::FIRST_LIVE;
×
547

548
    loop {
549
        // We never have a connection if we end up around the loop, so make a new one.
550
        let mut peer = persistent_reconnect(&cfg.relay_address, cfg.chain).await;
×
551
        match resync_live_tip(&mut peer, cfg.chain).await {
×
552
            Ok(tip) => debug!("Tip Resynchronized to {tip}"),
×
553
            Err(error) => {
×
554
                error!(
×
555
                    "Cardano Client {} failed to resync Tip: {}",
×
556
                    cfg.relay_address, error
557
                );
558
                continue;
×
559
            },
560
        }
561

562
        // Note: This can ONLY return with an error, otherwise it will sync indefinitely.
563
        if let Err(error) = follow_chain(&mut peer, cfg.chain, &mut fork_count).await {
×
564
            error!(
×
565
                "Cardano Client {} failed to follow chain: {:?}: Reconnecting.",
×
566
                cfg.relay_address, error
567
            );
568
            continue;
×
569
        }
×
570

×
571
        // If this returns, we are on a new fork (or assume we are)
×
572
        fork_count.incr();
×
573
    }
574
}
575

576
/// Is the current point aligned with what we know as tip.
577
pub(crate) async fn point_at_tip(chain: Network, point: &Point) -> bool {
×
578
    let tip = get_peer_tip(chain);
×
579

×
580
    // We are said to be AT TIP, if the block point is greater than or equal to the tip.
×
581
    tip <= *point
×
582
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc