• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

google / alioth / 17385686062

01 Sep 2025 07:20PM UTC coverage: 18.016% (-0.1%) from 18.149%
17385686062

Pull #281

github

web-flow
Merge f6f978f6a into 6ec9a6d6b
Pull Request #281: Port to Apple Hypervisor framework

0 of 152 new or added lines in 11 files covered. (0.0%)

1323 existing lines in 30 files now uncovered.

1362 of 7560 relevant lines covered (18.02%)

18.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.35
/alioth/src/mem/mapped.rs
1
// Copyright 2024 Google LLC
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14

15
use std::cell::UnsafeCell;
16
#[cfg(target_os = "linux")]
17
use std::ffi::CStr;
18
use std::fmt::Debug;
19
use std::fs::File;
20
use std::io::{IoSlice, IoSliceMut, Read, Write};
21
use std::mem::{align_of, size_of};
22
#[cfg(target_os = "linux")]
23
use std::os::fd::FromRawFd;
24
use std::os::fd::{AsFd, AsRawFd, BorrowedFd};
25
use std::ptr::{NonNull, null_mut};
26
use std::sync::Arc;
27

28
#[cfg(target_os = "linux")]
29
use libc::{MADV_HUGEPAGE, MFD_CLOEXEC};
30
use libc::{
31
    MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, MAP_SHARED, MS_ASYNC, PROT_READ, PROT_WRITE, c_void,
32
    madvise, mmap, msync, munmap,
33
};
34
use parking_lot::{RwLock, RwLockReadGuard};
35
use snafu::ResultExt;
36
use zerocopy::{FromBytes, Immutable, IntoBytes};
37

38
use crate::ffi;
39
use crate::mem::addressable::{Addressable, SlotBackend};
40
use crate::mem::{Error, Result, error};
41

42
#[derive(Debug)]
43
struct MemPages {
44
    addr: NonNull<c_void>,
45
    len: usize,
46
    fd: Option<(File, u64)>,
47
}
48

49
unsafe impl Send for MemPages {}
50
unsafe impl Sync for MemPages {}
51

52
impl Drop for MemPages {
53
    fn drop(&mut self) {
53✔
54
        let ret = unsafe { munmap(self.addr.as_ptr(), self.len) };
261✔
55
        if ret != 0 {
53✔
56
            log::error!("munmap({:p}, {:x}) = {:x}", self.addr, self.len, ret);
×
57
        } else {
58
            log::info!("munmap({:p}, {:x}) = {:x}, done", self.addr, self.len, ret);
54✔
59
        }
60
    }
61
}
62
// ArcMemPages uses Arc to manage the underlying memory and caches
63
// the address and size on the stack. Compared with using Arc<MemPages>,
64
// it avoids a memory load when a caller tries to read/write the pages.
65
// TODO: is it really necessary?
66
#[derive(Debug, Clone)]
67
pub struct ArcMemPages {
68
    addr: usize,
69
    size: usize,
70
    _inner: Arc<MemPages>,
71
}
72

73
impl SlotBackend for ArcMemPages {
74
    fn size(&self) -> u64 {
1,388✔
75
        self.size as u64
1,387✔
76
    }
77
}
78

79
impl ArcMemPages {
80
    pub fn addr(&self) -> usize {
×
81
        self.addr
×
82
    }
83

84
    pub fn size(&self) -> u64 {
×
85
        self.size as u64
×
86
    }
87

88
    pub fn fd(&self) -> Option<(BorrowedFd<'_>, u64)> {
×
89
        self._inner
×
90
            .fd
91
            .as_ref()
92
            .map(|(f, offset)| (f.as_fd(), *offset))
×
93
    }
94

95
    pub fn sync(&self) -> Result<()> {
×
96
        ffi!(unsafe { msync(self.addr as *mut _, self.size, MS_ASYNC) })?;
×
97
        Ok(())
×
98
    }
99

100
    #[cfg(target_os = "linux")]
UNCOV
101
    pub fn madvise_hugepage(&self) -> Result<()> {
UNCOV
102
        ffi!(unsafe { madvise(self.addr as *mut _, self.size, MADV_HUGEPAGE) })?;
UNCOV
103
        Ok(())
104
    }
105

106
    fn from_raw(addr: *mut c_void, len: usize, fd: Option<(File, u64)>) -> Self {
53✔
107
        let addr = NonNull::new(addr).expect("address from mmap() should not be null");
262✔
108
        ArcMemPages {
109
            addr: addr.as_ptr() as usize,
105✔
110
            size: len,
111
            _inner: Arc::new(MemPages { addr, len, fd }),
157✔
112
        }
113
    }
114

115
    pub fn from_file(file: File, offset: i64, len: usize, prot: i32) -> Result<Self> {
×
116
        let addr = ffi!(
×
117
            unsafe { mmap(null_mut(), len, prot, MAP_SHARED, file.as_raw_fd(), offset) },
×
118
            MAP_FAILED
119
        )?;
120
        Ok(Self::from_raw(addr, len, Some((file, offset as u64))))
×
121
    }
122

123
    #[cfg(target_os = "linux")]
UNCOV
124
    pub fn from_memfd(name: &CStr, size: usize, prot: Option<i32>) -> Result<Self> {
UNCOV
125
        let fd = ffi!(unsafe { libc::memfd_create(name.as_ptr(), MFD_CLOEXEC) })?;
UNCOV
126
        let prot = prot.unwrap_or(PROT_WRITE | PROT_READ);
UNCOV
127
        let addr = ffi!(
UNCOV
128
            unsafe { mmap(null_mut(), size, prot, MAP_SHARED, fd, 0) },
129
            MAP_FAILED
130
        )?;
UNCOV
131
        let file = unsafe { File::from_raw_fd(fd) };
UNCOV
132
        file.set_len(size as _)?;
UNCOV
133
        Ok(Self::from_raw(addr, size, Some((file, 0))))
134
    }
135

136
    pub fn from_anonymous(size: usize, prot: Option<i32>, flags: Option<i32>) -> Result<Self> {
53✔
137
        let prot = prot.unwrap_or(PROT_WRITE | PROT_READ);
209✔
138
        let flags = flags.unwrap_or(MAP_PRIVATE) | MAP_ANONYMOUS;
105✔
139
        let addr = ffi!(
105✔
140
            unsafe { mmap(null_mut(), size, prot, flags, -1, 0) },
261✔
141
            MAP_FAILED
142
        )?;
143
        Ok(Self::from_raw(addr, size, None))
1✔
144
    }
145

146
    /// Given offset and len, return the host virtual address and len;
147
    /// len might be truncated.
148
    fn get_valid_range(&self, offset: usize, len: usize) -> Result<(usize, usize)> {
1,590✔
149
        let end = offset.wrapping_add(len).wrapping_sub(1);
7,941✔
150
        if offset >= self.size || end < offset {
3,178✔
151
            return error::ExceedsLimit {
152
                addr: offset as u64,
153
                size: len as u64,
154
            }
155
            .fail();
156
        }
157
        let valid_len = std::cmp::min(self.size - offset, len);
1✔
158
        Ok((self.addr + offset, valid_len))
3✔
159
    }
160

161
    pub fn as_slice_mut(&mut self) -> &mut [u8] {
×
162
        unsafe { std::slice::from_raw_parts_mut(self.addr as *mut u8, self.size) }
×
163
    }
164

165
    pub fn as_slice(&self) -> &[u8] {
×
166
        unsafe { std::slice::from_raw_parts(self.addr as *const u8, self.size) }
×
167
    }
168

169
    /// Given offset and len, return a slice, len might be truncated.
170
    fn get_partial_slice(&self, offset: usize, len: usize) -> Result<&[u8], Error> {
669✔
171
        let (addr, len) = self.get_valid_range(offset, len)?;
3,341✔
172
        Ok(unsafe { std::slice::from_raw_parts(addr as *const u8, len) })
1✔
173
    }
174

175
    /// Given offset and len, return a mutable slice, len might be truncated.
176
    #[allow(clippy::mut_from_ref)]
177
    fn get_partial_slice_mut(&self, offset: usize, len: usize) -> Result<&mut [u8], Error> {
922✔
178
        let (addr, len) = self.get_valid_range(offset, len)?;
4,601✔
179
        Ok(unsafe { std::slice::from_raw_parts_mut(addr as *mut u8, len) })
3✔
180
    }
181
}
182

183
#[derive(Debug)]
184
pub struct Ram {
185
    inner: Addressable<ArcMemPages>,
186
}
187

188
#[derive(Debug)]
189
pub struct RamBus {
190
    ram: RwLock<Ram>,
191
}
192

193
struct Iter<'m> {
194
    ram: &'m Ram,
195
    gpa: u64,
196
    remain: u64,
197
}
198

199
impl<'m> Iterator for Iter<'m> {
200
    type Item = Result<&'m [u8]>;
201

202
    fn next(&mut self) -> Option<Self::Item> {
817✔
203
        if self.remain == 0 {
817✔
204
            return None;
257✔
205
        }
206
        let r = self.ram.get_partial_slice(self.gpa, self.remain);
1✔
207
        if let Ok(s) = r {
501✔
208
            self.gpa += s.len() as u64;
1✔
209
            self.remain -= s.len() as u64;
2✔
210
        }
211
        Some(r)
1✔
212
    }
213
}
214

215
struct IterMut<'m> {
216
    ram: &'m Ram,
217
    gpa: u64,
218
    remain: u64,
219
}
220

221
impl<'m> Iterator for IterMut<'m> {
222
    type Item = Result<&'m mut [u8]>;
223

224
    fn next(&mut self) -> Option<Self::Item> {
765✔
225
        if self.remain == 0 {
765✔
226
            return None;
227✔
227
        }
228
        let r = self.ram.get_partial_slice_mut(self.gpa, self.remain);
1✔
229
        if let Ok(ref s) = r {
479✔
230
            self.gpa += s.len() as u64;
1✔
231
            self.remain -= s.len() as u64;
2✔
232
        }
233
        Some(r)
1✔
234
    }
235
}
236

237
impl Ram {
238
    fn slice_iter(&self, gpa: u64, len: u64) -> Iter<'_> {
319✔
239
        Iter {
240
            ram: self,
241
            gpa,
242
            remain: len,
243
        }
244
    }
245

246
    fn slice_iter_mut(&self, gpa: u64, len: u64) -> IterMut<'_> {
289✔
247
        IterMut {
248
            ram: self,
249
            gpa,
250
            remain: len,
251
        }
252
    }
253

254
    fn get_partial_slice(&self, gpa: u64, len: u64) -> Result<&[u8]> {
731✔
255
        let Some((start, user_mem)) = self.inner.search(gpa) else {
2,129✔
256
            return error::NotMapped { addr: gpa }.fail();
125✔
257
        };
258
        user_mem.get_partial_slice((gpa - start) as usize, len as usize)
2✔
259
    }
260

261
    fn get_partial_slice_mut(&self, gpa: u64, len: u64) -> Result<&mut [u8]> {
983✔
262
        let Some((start, user_mem)) = self.inner.search(gpa) else {
2,885✔
263
            return error::NotMapped { addr: gpa }.fail();
125✔
264
        };
265
        user_mem.get_partial_slice_mut((gpa - start) as usize, len as usize)
3✔
266
    }
267

268
    pub fn get_slice<T>(&self, gpa: u64, len: u64) -> Result<&[UnsafeCell<T>], Error> {
×
269
        let total_len = len * size_of::<T>() as u64;
×
270
        let host_ref = self.get_partial_slice(gpa, total_len)?;
×
271
        let ptr = host_ref.as_ptr() as *const UnsafeCell<T>;
×
272
        if host_ref.len() as u64 != total_len {
×
273
            error::NotContinuous {
274
                addr: gpa,
275
                size: total_len,
276
            }
277
            .fail()
278
        } else if !ptr.is_aligned() {
×
279
            error::NotAligned {
280
                addr: ptr as u64,
×
281
                align: align_of::<T>(),
×
282
            }
283
            .fail()
284
        } else {
285
            Ok(unsafe { &*core::ptr::slice_from_raw_parts(ptr, len as usize) })
×
286
        }
287
    }
288

289
    pub fn get_ptr<T>(&self, gpa: u64) -> Result<*mut T, Error> {
267✔
290
        let host_ref = self.get_partial_slice_mut(gpa, size_of::<T>() as u64)?;
1,307✔
291
        let ptr = host_ref.as_mut_ptr();
7✔
292
        if host_ref.len() != size_of::<T>() {
7✔
293
            error::NotContinuous {
294
                addr: gpa,
295
                size: size_of::<T>() as u64,
×
296
            }
297
            .fail()
298
        } else if !ptr.is_aligned() {
267✔
299
            error::NotAligned {
300
                addr: ptr as u64,
×
301
                align: align_of::<T>(),
×
302
            }
303
            .fail()
304
        } else {
305
            Ok(ptr as *mut T)
267✔
306
        }
307
    }
308

309
    pub fn read(&self, gpa: u64, buf: &mut [u8]) -> Result<()> {
171✔
310
        let host_ref = self.get_partial_slice(gpa, buf.len() as u64)?;
851✔
311
        if host_ref.len() == buf.len() {
47✔
312
            buf.copy_from_slice(host_ref);
93✔
313
        } else {
314
            let mut cur = 0;
125✔
315
            for r in self.slice_iter(gpa, buf.len() as u64) {
251✔
316
                let s = r?;
498✔
317
                let s_len = s.len();
1✔
318
                buf[cur..(cur + s_len)].copy_from_slice(s);
1✔
319
                cur += s_len;
2✔
320
            }
321
        }
322
        Ok(())
109✔
323
    }
324

325
    pub fn read_t<T>(&self, gpa: u64) -> Result<T>
151✔
326
    where
327
        T: FromBytes + IntoBytes,
328
    {
329
        let mut v = T::new_zeroed();
301✔
330
        self.read(gpa, v.as_mut_bytes())?;
664✔
331
        Ok(v)
89✔
332
    }
333

334
    pub fn write(&self, gpa: u64, buf: &[u8]) -> Result<()> {
185✔
335
        let len = buf.len() as u64;
369✔
336
        let host_ref = self.get_partial_slice_mut(gpa, len)?;
921✔
337
        if host_ref.len() == buf.len() {
2✔
338
            host_ref.copy_from_slice(buf);
181✔
339
            Ok(())
61✔
340
        } else {
341
            let mut cur = 0;
125✔
342
            for r in self.slice_iter_mut(gpa, len) {
251✔
343
                let s = r?;
498✔
344
                let s_len = s.len();
1✔
345
                s.copy_from_slice(&buf[cur..(cur + s_len)]);
1✔
346
                cur += s_len;
2✔
347
            }
348
            Ok(())
63✔
349
        }
350
    }
351

352
    pub fn write_t<T>(&self, gpa: u64, val: &T) -> Result<(), Error>
151✔
353
    where
354
        T: IntoBytes + Immutable,
355
    {
356
        self.write(gpa, val.as_bytes())
601✔
357
    }
358

359
    pub fn translate(&self, gpa: u64) -> Result<*const u8> {
×
360
        let s = self.get_partial_slice(gpa, 1)?;
×
361
        Ok(s.as_ptr())
×
362
    }
363

364
    pub fn translate_iov<'a>(&'a self, iov: &[(u64, u64)]) -> Result<Vec<IoSlice<'a>>> {
61✔
365
        let mut slices = vec![];
121✔
366
        for (gpa, len) in iov {
236✔
367
            for r in self.slice_iter(*gpa, *len) {
282✔
368
                slices.push(IoSlice::new(r?));
49✔
369
            }
370
        }
371
        Ok(slices)
61✔
372
    }
373

374
    pub fn translate_iov_mut<'a>(&'a self, iov: &[(u64, u64)]) -> Result<Vec<IoSliceMut<'a>>> {
59✔
375
        let mut slices = vec![];
117✔
376
        for (gpa, len) in iov {
144✔
377
            for r in self.slice_iter_mut(*gpa, *len) {
140✔
378
                slices.push(IoSliceMut::new(r?));
27✔
379
            }
380
        }
381
        Ok(slices)
59✔
382
    }
383

384
    pub fn iter(&self) -> impl DoubleEndedIterator<Item = (u64, &ArcMemPages)> {
×
385
        self.inner.iter()
×
386
    }
387

388
    pub fn madvise(&self, gpa: u64, size: u64, advice: i32) -> Result<()> {
×
389
        for r in self.slice_iter_mut(gpa, size) {
×
390
            let s = r?;
×
391
            ffi!(unsafe { madvise(s.as_mut_ptr() as _, s.len(), advice) })?;
×
392
        }
393
        Ok(())
×
394
    }
395
}
396

397
impl Default for RamBus {
398
    fn default() -> Self {
×
399
        Self::new()
×
400
    }
401
}
402

403
impl RamBus {
404
    pub fn lock_layout(&self) -> RwLockReadGuard<'_, Ram> {
55✔
405
        self.ram.read()
110✔
406
    }
407

408
    pub fn new() -> Self {
51✔
409
        Self {
410
            ram: RwLock::new(Ram {
51✔
411
                inner: Addressable::default(),
412
            }),
413
        }
414
    }
415

416
    pub(crate) fn add(&self, gpa: u64, user_mem: ArcMemPages) -> Result<(), Error> {
53✔
417
        let mut ram = self.ram.write();
158✔
418
        ram.inner.add(gpa, user_mem)?;
210✔
419
        Ok(())
55✔
420
    }
421

422
    pub(crate) fn remove(&self, gpa: u64) -> Result<ArcMemPages, Error> {
3✔
423
        let mut ram = self.ram.write();
7✔
424
        ram.inner.remove(gpa)
8✔
425
    }
426

427
    pub fn read(&self, gpa: u64, buf: &mut [u8]) -> Result<()> {
×
428
        let ram = self.ram.read();
×
429
        ram.read(gpa, buf)
×
430
    }
431

432
    pub fn write(&self, gpa: u64, buf: &[u8]) -> Result<()> {
×
433
        let ram = self.ram.read();
×
434
        ram.write(gpa, buf)
×
435
    }
436

437
    pub fn read_t<T>(&self, gpa: u64) -> Result<T, Error>
151✔
438
    where
439
        T: FromBytes + IntoBytes,
440
    {
441
        let ram = self.ram.read();
451✔
442
        ram.read_t(gpa)
302✔
443
    }
444

445
    pub fn write_t<T>(&self, gpa: u64, val: &T) -> Result<(), Error>
151✔
446
    where
447
        T: IntoBytes + Immutable,
448
    {
449
        let ram = self.ram.read();
451✔
450
        ram.write_t(gpa, val)
452✔
451
    }
452

453
    pub fn read_range(&self, gpa: u64, len: u64, dst: &mut impl Write) -> Result<()> {
131✔
454
        let ram = self.ram.read();
391✔
455
        for r in ram.slice_iter(gpa, len) {
778✔
456
            dst.write_all(r?).context(error::Write)?;
770✔
457
        }
458
        Ok(())
131✔
459
    }
460

461
    pub fn write_range(&self, gpa: u64, len: u64, mut src: impl Read) -> Result<()> {
131✔
462
        let ram = self.ram.read();
392✔
463
        for r in ram.slice_iter_mut(gpa, len) {
778✔
464
            src.read_exact(r?).context(error::Read)?;
770✔
465
        }
466
        Ok(())
131✔
467
    }
468

469
    pub fn read_vectored<T, F>(&self, bufs: &[(u64, u64)], callback: F) -> Result<T, Error>
3✔
470
    where
471
        F: FnOnce(&[IoSlice<'_>]) -> T,
472
    {
473
        let ram = self.ram.read();
8✔
474
        let mut iov = vec![];
5✔
475
        for (gpa, len) in bufs {
22✔
476
            for r in ram.slice_iter(*gpa, *len) {
34✔
477
                iov.push(IoSlice::new(r?));
9✔
478
            }
479
        }
480
        Ok(callback(&iov))
3✔
481
    }
482

483
    pub fn write_vectored<T, F>(&self, bufs: &[(u64, u64)], callback: F) -> Result<T, Error>
3✔
484
    where
485
        F: FnOnce(&mut [IoSliceMut<'_>]) -> T,
486
    {
487
        let ram = self.ram.read();
8✔
488
        let mut iov = vec![];
5✔
489
        for (gpa, len) in bufs {
22✔
490
            for r in ram.slice_iter_mut(*gpa, *len) {
34✔
491
                iov.push(IoSliceMut::new(r?));
9✔
492
            }
493
        }
494
        Ok(callback(&mut iov))
3✔
495
    }
496
}
497

498
#[cfg(test)]
499
mod test {
500
    use std::io::{Read, Write};
501
    use std::mem::size_of;
502

503
    use assert_matches::assert_matches;
504
    use libc::{PROT_READ, PROT_WRITE};
505
    use zerocopy::{FromBytes, Immutable, IntoBytes};
506

507
    use super::{ArcMemPages, RamBus};
508

509
    #[derive(Debug, IntoBytes, FromBytes, Immutable, PartialEq, Eq)]
510
    #[repr(C)]
511
    struct MyStruct {
512
        data: [u32; 8],
513
    }
514

515
    const PAGE_SIZE: u64 = 1 << 12;
516

517
    #[test]
518
    fn test_ram_bus_read() {
519
        let bus = RamBus::new();
520
        let prot = PROT_READ | PROT_WRITE;
521
        let mem1 = ArcMemPages::from_anonymous(PAGE_SIZE as usize, Some(prot), None).unwrap();
522
        let mem2 = ArcMemPages::from_anonymous(PAGE_SIZE as usize, Some(prot), None).unwrap();
523

524
        if mem1.addr > mem2.addr {
525
            bus.add(0x0, mem1).unwrap();
526
            bus.add(PAGE_SIZE, mem2).unwrap();
527
        } else {
528
            bus.add(0x0, mem2).unwrap();
529
            bus.add(PAGE_SIZE, mem1).unwrap();
530
        }
531

532
        let data = MyStruct {
533
            data: [1, 2, 3, 4, 5, 6, 7, 8],
534
        };
535
        let data_size = size_of::<MyStruct>() as u64;
536
        for gpa in (PAGE_SIZE - data_size)..=PAGE_SIZE {
537
            bus.write_t(gpa, &data).unwrap();
538
            let r: MyStruct = bus.read_t(gpa).unwrap();
539
            assert_eq!(r, data)
540
        }
541
        let memory_end = PAGE_SIZE * 2;
542
        for gpa in (memory_end - data_size - 10)..=(memory_end - data_size) {
543
            bus.write_t(gpa, &data).unwrap();
544
            let r: MyStruct = bus.read_t(gpa).unwrap();
545
            assert_eq!(r, data)
546
        }
547
        for gpa in (memory_end - data_size + 1)..memory_end {
548
            assert_matches!(bus.write_t(gpa, &data), Err(_));
549
            assert_matches!(bus.read_t::<MyStruct>(gpa), Err(_));
550
        }
551

552
        let data: Vec<u8> = (0..64).collect();
553
        for gpa in (PAGE_SIZE - 64)..=PAGE_SIZE {
554
            bus.write_range(gpa, 64, &*data).unwrap();
555
            let mut buf = Vec::new();
556
            bus.read_range(gpa, 64, &mut buf).unwrap();
557
            assert_eq!(data, buf)
558
        }
559

560
        let guest_iov = [(0, 16), (PAGE_SIZE - 16, 32), (2 * PAGE_SIZE - 16, 16)];
561
        let write_ret = bus.write_vectored(&guest_iov, |iov| {
562
            assert_eq!(iov.len(), 4);
563
            (&*data).read_vectored(iov)
564
        });
565
        assert_matches!(write_ret, Ok(Ok(64)));
566
        let mut buf_read = Vec::new();
567
        let read_ret = bus.read_vectored(&guest_iov, |iov| {
568
            assert_eq!(iov.len(), 4);
569
            buf_read.write_vectored(iov)
570
        });
571
        assert_matches!(read_ret, Ok(Ok(64)));
572

573
        let locked_bus = bus.lock_layout();
574
        let bufs = locked_bus.translate_iov(&guest_iov).unwrap();
575
        println!("{bufs:?}");
576
        drop(locked_bus);
577
        bus.remove(0x0).unwrap();
578
    }
579
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc