• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mscaudill / tabbed / 18353692528

08 Oct 2025 06:01PM UTC coverage: 88.766% (-0.2%) from 89.015%
18353692528

push

github

mscaudill
[bugfix]: per issue #21, tabbed now reduces the poll amount to the last row of the data section if the poll amount exceeds the number of sampled

14 of 14 new or added lines in 1 file covered. (100.0%)

45 existing lines in 2 files now uncovered.

561 of 632 relevant lines covered (88.77%)

1.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.44
/src/tabbed/reading.py
1
"""A reader of text delimited files that supports the following features:
2

3
- Identification of metadata & header file sections.
4
- Automated type conversion to ints, floats, complex numbers,
5
  times, dates and datetime instances.
6
- Selective reading of rows and columns satisfying equality,
7
  membership, regular expression, and rich comparison conditions.
8
- Iterative reading of rows from the input file.
9
"""
10

11
import csv
2✔
12
import itertools
2✔
13
import re
2✔
14
import warnings
2✔
15
from collections import deque
2✔
16
from collections.abc import Callable, Iterator, Sequence
2✔
17
from pathlib import Path
2✔
18
from types import SimpleNamespace
2✔
19
from typing import IO
2✔
20

21
from clevercsv.dialect import SimpleDialect
2✔
22

23
from tabbed import tabbing
2✔
24
from tabbed.sniffing import Header, MetaData, Sniffer
2✔
25
from tabbed.tabbing import Tabulator
2✔
26
from tabbed.utils import parsing
2✔
27
from tabbed.utils.mixins import ReprMixin
2✔
28
from tabbed.utils.parsing import CellType
2✔
29

30

31
class Reader(ReprMixin):
2✔
32
    r"""An iterative reader of irregular text files supporting selective
33
    value-based reading of rows and columns.
34

35
    A common variant to the RFC-4180 CSV standard includes metadata prior to
36
    a possible header and data section. This reader sniffs files for these
37
    sections advancing to the most-likely start position of the data.
38
    Additionally, it uses type inference to automatically convert data cells
39
    into strings, integers, floats, complex, time, date or datetime instances.
40
    Finally, this reader supports selective reading of rows using equality,
41
    membership, comparison, & regular expression value-based conditions supplied
42
    as keyword arguments to the 'tab' method.
43

44
    Attributes:
45
        infile:
46
            An I/O stream instance returned by open.
47
        tabulator:
48
            A callable container of Tab instances; callables that will filter
49
            rows based on equality, membership, rich comparison and regular
50
            expression conditions.
51
        errors:
52
            A container of casting and ragged length errors detected during
53
            reading.
54

55
    Examples:
56
        >>> # Create a temporary file for reading
57
        >>> import os
58
        >>> import tempfile
59
        >>> import random
60
        >>> from datetime import datetime, timedelta
61
        >>> # make metadata that spans several lines
62
        >>> metadata_string = ('Experiment, 3\n'
63
        ... 'Name, Ernst Rutherford\n'
64
        ... 'location, Cavendish Labs\n'
65
        ... 'Time, 11:03:29.092\n'
66
        ... 'Date, 8/23/1917\n'
67
        ... '\n')
68
        >>> # make a header of 5 columns
69
        >>> header = ['group', 'count', 'color', 'time', 'area']
70
        >>> header_string = ','.join(header) + '\n'
71
        >>> # make a reproducible data section with 20 rows
72
        >>> random.seed(0)
73
        >>> groups = random.choices(['a', 'b', 'c'], k=20)
74
        >>> counts = [str(random.randint(0, 10)) for _ in range(20)]
75
        >>> colors = random.choices(['red', 'green', 'blue'], k=20)
76
        >>> start = datetime(1917, 8, 23, 11, 3, 29, 9209)
77
        >>> times = [str(start + timedelta(seconds=10*i)) for i in range(20)]
78
        >>> areas = [str(random.uniform(0, 10)) for _ in range(20)]
79
        >>> x = [','.join(row) for row in zip(
80
        ...    groups, counts, colors, times, areas)]
81
        >>> data_string = '\r\n'.join(x)
82
        >>> # write the metadata, header and data strings
83
        >>> fp = tempfile.NamedTemporaryFile(mode='w', delete=False)
84
        >>> _ = fp.write(metadata_string)
85
        >>> _ = fp.write(header_string)
86
        >>> _ = fp.write(data_string)
87
        >>> fp.close()
88
        >>> # open the file for reading
89
        >>> infile = open(fp.name, mode='r')
90
        >>> reader = Reader(infile)
91
        >>> # ask the reader for the header
92
        >>> reader.header
93
        ... # doctest: +NORMALIZE_WHITESPACE
94
        Header(line=6,
95
        names=['group', 'count', 'color', 'time', 'area'],
96
        string='group,count,color,time,area')
97
        >>> # read group, count & area columns where group is a or c & 0 < area <=4
98
        >>> # by passing keyword args to this reader's 'tab' method
99
        >>> reader.tab(columns=['group', 'count', 'area'],
100
        ... group=['a', 'c'],
101
        ... area='> 0 and <= 4')
102
        >>> # read the data with a chunksize of 3 rows
103
        >>> rows = reader.read(chunksize=3)
104
        >>> type(rows) # rows are of type generator yielding 3 rows at a time
105
        <class 'generator'>
106
        >>> for idx, chunk in enumerate(rows):
107
        ...     print(f'Index = {idx}\n{chunk}')
108
        ...     # doctest: +NORMALIZE_WHITESPACE
109
        Index = 0
110
        [{'group': 'c', 'count': 4, 'area': 3.2005460467254574},
111
        {'group': 'a', 'count': 10, 'area': 1.0905784593110368},
112
        {'group': 'c', 'count': 7, 'area': 2.90329502402758}]
113
        Index = 1
114
        [{'group': 'c', 'count': 8, 'area': 1.8939132855435614},
115
        {'group': 'c', 'count': 4, 'area': 1.867295282555551}]
116
        >>> # close reader since it was not opened with context manager
117
        >>> reader.close()
118
        >>> os.remove(fp.name) # explicitly remove the tempfile
119
    """
120

121
    # no mutation of exclude parameter
122
    # pylint: disable-next=dangerous-default-value
123
    def __init__(
2✔
124
        self,
125
        infile: IO[str],
126
        poll: int = 20,
127
        exclude: list[str] = ['', ' ', '-', 'nan', 'NaN', 'NAN'],
128
        decimal: str = '.',
129
        **sniffing_kwargs,
130
    ) -> None:
131
        """Initialize this Reader.
132

133
        Args:
134
            infile:
135
                An IO stream instance returned by open builtin.
136
            poll:
137
                The number of last sample rows to use for the Sniffer to detect
138
                header, metadata and data types. For optimal detection of the
139
                header and metadata file components, the poll should be not
140
                include rows that could be header or metadata.
141
            exclude:
142
               A sequence of characters indicating missing values in the file.
143
               Rows containing these values will be disqualified from use for
144
               header, metadata and data type detection. However, this Reader's
145
               read method will still read and return rows with this exclusion
146
               values.
147
            sniffing_kwargs:
148
                Any valid kwarg for a tabbed Sniffer instance including: start,
149
                amount, skips and delimiters. Please see Sniffer initializer.
150

151
        Notes:
152
            During initialization, this reader will use the poll and exclude
153
            arguments to make an initial guess of the header. If this guess is
154
            wrong, the header may be explicitly set via the 'header' setter
155
            property.
156

157
        Raises:
158
            An IOError is issued if infile is empty.
159
        """
160

161
        if self._isempty(infile):
2✔
UNCOV
162
            msg = f'File at path {infile.name} is empty.'
×
UNCOV
163
            raise IOError(msg)
×
164

165
        self.infile = infile
2✔
166
        self.decimal = decimal
2✔
167
        self._sniffer = Sniffer(infile, decimal=decimal, **sniffing_kwargs)
2✔
168
        self._poll = self._initialize_poll(poll)
2✔
169
        self.exclude = exclude
2✔
170
        self._header = self._sniffer.header(self.poll, self.exclude)
2✔
171
        self.tabulator = Tabulator(self.header, columns=None, tabs=None)
2✔
172
        self.errors = SimpleNamespace(casting=[], ragged=[])
2✔
173

174
    def _isempty(self, infile: IO[str]) -> bool:
2✔
175
        """Returns True if infile is empty and False otherwise."""
176

177
        return not bool(Path(infile.name).stat().st_size)
2✔
178

179
    def _initialize_poll(self, value: int) -> int:
2✔
180
        """Sets the integer number of last sample rows to poll for header,
181
        metadata and type detection.
182

183
        Args:
184
            value:
185
                The number of last sample rows to poll. If this number
186
                exceeds the number of sample rows, the poll will be 1.
187

188
        Returns:
189
            None
190
        """
191

192
        result = value
2✔
193
        sample_cnt = len(self._sniffer.rows)
2✔
194
        if value > sample_cnt:
2✔
195
            msg = (
2✔
196
                f'\nThe requested poll={value} exceeds the number of sampled'
197
                f' rows={sample_cnt}. Setting the poll amount to 1.'
198
            )
199
            result = 1
2✔
200
            warnings.warn(msg)
2✔
201
        return result
2✔
202

203
    @property
2✔
204
    def poll(self):
2✔
205
        """Returns the integer number of last sample rows this Reader's sniffer
206
        will use for header, metadata and type detection.
207

208
        Returns:
209
            The integer number of rows to poll.
210
        """
211

212
        return self._poll
2✔
213

214
    @property
2✔
215
    def sniffer(self) -> Sniffer:
2✔
216
        """Returns this Reader's sniffer instance.
217

218
        Any time the sniffer is accessed we reset this reader's header and
219
        tabulator if the header is built by the sniffer.
220
        """
221

222
        if self._header.line is not None:
2✔
223
            # print('Resniffing Header and resetting metadata and Tabulator')
224
            self._header = self._sniffer.header(self.poll, self.exclude)
2✔
225
            self.tabulator = Tabulator(self.header, columns=None, tabs=None)
2✔
226

227
        return self._sniffer
2✔
228

229
    @property
2✔
230
    def header(self) -> Header:
2✔
231
        """Fetches this Reader's current header."""
232

233
        return self._header
2✔
234

235
    @header.setter
2✔
236
    def header(self, value: int | list[str] | dict) -> None:
2✔
237
        """Sets this Reader's header and resets the metadata and Tabulator.
238

239
        Args:
240
            value:
241
                An infile line number, list of string names, or dict of keyword
242
                arguments for sniffer's header method. If value is type int, the
243
                header will be set to the split string values of the value row
244
                of infile. If value is type List, the header will be set to the
245
                string names in value. If value is type dict, the header will be
246
                resniffed by sniffer's header method using value keyword args.
247
                Valid keyword arguments are: 'poll', and 'exclude'. Please type
248
                help(reader.sniffer.header) for more argument details.
249

250
        Returns:
251
            None
252

253
        Raises:
254
            A ValueError is issued if value is int or List type and the length
255
            of the proposed header names does not match the length of the last
256
            sample row in the sniffer.
257
        """
258

259
        # get the expected length of the header from the last sample row.
260
        expected = len(self._sniffer.rows[-1])
2✔
261

262
        if isinstance(value, int):
2✔
263
            sniff = Sniffer(self.infile, start=value, amount=1)
2✔
264
            if len(sniff.rows[0]) != expected:
2✔
265
                msg = (
2✔
266
                    f'Length of row at index = {value} does not match'
267
                    f'length of last sample row = {expected}'
268
                )
269
                raise ValueError(msg)
2✔
UNCOV
270
            result = Header(value, sniff.rows[0], sniff.sample)
×
271

272
        elif isinstance(value, list):
2✔
273
            if len(value) != expected:
2✔
274
                msg = (
2✔
275
                    f'Length of provided header names = {len(value)} does '
276
                    f'not match length of last sample row = {expected}'
277
                )
278
                raise ValueError(msg)
2✔
279
            result = Header(None, value, None)
2✔
280

281
        elif isinstance(value, dict):
2✔
282
            result = self._sniffer.header(**value)
2✔
283

284
        else:
285
            msg = (
2✔
286
                "A header may be set by integer line number, list of "
287
                "header names or a dict of kwargs for sniffer's header "
288
                f"method but not type {type(value)}."
289
            )
290
            raise ValueError(msg)
2✔
291

292
        # set header
293
        self._header = result
2✔
294
        # determine if reader has previously set tabulator and warn
295
        previous = self.tabulator
2✔
296
        tblr = Tabulator(self.header, tabs=None, columns=None)
2✔
297
        if tblr.columns != previous.columns or tblr.tabs != previous.tabs:
2✔
298
            msg = (
2✔
299
                "Previously set tabs have been reset. Please call 'tab' "
300
                "method again before reading."
301
            )
302
            print(msg)
2✔
303

304
        self.tabulator = tblr
2✔
305

306
    def metadata(self) -> MetaData:
2✔
307
        """Returns this Reader's current metadata.
308

309
        Returns:
310
            A sniffed metadata instance.
311
        """
312

313
        return self._sniffer.metadata(self.header, self.poll, self.exclude)
2✔
314

315
    def tab(
2✔
316
        self,
317
        columns: list[str] | list[int] | re.Pattern | None = None,
318
        **tabs: (
319
            CellType
320
            | Sequence[CellType]
321
            | re.Pattern
322
            | Callable[[dict[str, CellType], str], bool]
323
        ),
324
    ) -> None:
325
        """Set the Tabulator instance that will filter infile's rows & columns.
326

327
        A tabulator is a container of tab instances that when called on a row,
328
        sequentially applies each tab to that row. Additionally after applying
329
        the row tabs it filters the result by columns. Implementation details
330
        may be found in the tabbed.tabs module.
331

332
        Args:
333
            columns:
334
                Columns in each row to return during reading as a list of string
335
                names, a list of column indices or a compiled regular expression
336
                pattern to match against header names. If None, all the columns
337
                in the header will be read during a read call.
338
            tabs:
339
                name = value keyword argument pairs where name is a valid header
340
                column name and value may be of type string, int, float,
341
                complex, time, date, datetime, regular expression or callable.
342

343
                - If a string type with rich comparison(s) is provided,
344
                  a comparison tab is constructed.
345
                - If a string, int, float, complex, time, date  or datetime is
346
                  provided, an equality tab is constructed.
347
                - If a sequence is provided, a membership tab is constructed.
348
                - If a compiled re pattern, a Regex tab is constructed. See
349
                  class docs for example.
350

351
        Notes:
352
            If the value in a tab is a numeric or is a string representation of
353
            a numeric it must use a '.' decimal as Tabbed converts ',' decimal
354
            notation to '.' notation for consistency.
355

356
        Returns:
357
            None
358
        """
359

360
        self.tabulator = tabbing.Tabulator.from_keywords(
2✔
361
            self.header, columns, **tabs
362
        )
363

364
    def _log_ragged(self, line, row, raise_error):
2✔
365
        """Error logs rows whose length is unexpected.
366

367
        When python's csv DictReader encounters a row with more cells than
368
        header columns, it stores the additional cells to a list under the None
369
        key.  When the csv DictReader encounters a row that with fewer cells
370
        than header columns it inserts None values into the missing cells. This
371
        function detects rows with None keys or None values and logs the row
372
        number to the error log.
373

374
        Args:
375
            line:
376
                The line number of the row being tested.
377
            row:
378
                A row dictionary of header names and casted values.
379
            raise_error:
380
                A boolean indicating if ragged should raise an error and stop
381
                the reading of the file if a ragged row is encountered.
382

383
        Returns:
384
            The row with None restkey popped
385
        """
386

387
        remainder = row.pop(None, None)
2✔
388
        none_vals = None in row.values()
2✔
389

390
        if remainder is not None or none_vals:
2✔
391
            msg = f'Unexpected line length on row {line}'
2✔
392
            if raise_error:
2✔
UNCOV
393
                raise csv.Error(msg)
×
394
            self.errors.ragged.append(msg)
2✔
395

396
        return row
2✔
397

398
    def _prime(
2✔
399
        self,
400
        start: int | None = None,
401
        indices: Sequence | None = None,
402
    ) -> tuple[Iterator, int]:
403
        """Prime this Reader for reading by constructing a row iterator.
404

405
        Args:
406
            start:
407
                An integer line number from the start of the file to begin
408
                reading data. If None and this reader's header has a line
409
                number, the line following the header line is the start. If None
410
                and the header line is None, the line following the metadata
411
                section is the start. If None and the file has no header or
412
                metadata, start is 0. If indices are provided, this argument is
413
                ignored.
414
            indices:
415
                An optional Sequence of line numbers to read rows relative to
416
                the start of the file. If None, all rows from start not in skips
417
                will be read. If reading a slice of the file, a range instance
418
                will have improved performance over list or tuple sequence
419
                types.
420

421
        Notes:
422
            A warning is issued if the start or index start is less than the
423
            detected start of the datasection.
424

425
        Returns:
426
            A row iterator & row index the iterator starts from.
427

428
        Raises:
429
            A ValueError is issued if start and indices are provided and the
430
            first index is less than start.
431
        """
432

433
        # locate the start of the datasection
434
        autostart = 0
2✔
435
        if self.header.line is not None:
2✔
436
            autostart = self.header.line + 1
2✔
437
        else:
438
            metalines = self._sniffer.metadata(
2✔
439
                None, self.poll, self.exclude
440
            ).lines
441
            autostart = metalines[1] + 1 if metalines[1] else metalines[0]
2✔
442

443
        astart = start if start is not None else autostart
2✔
444
        stop = None
2✔
445
        step = None
2✔
446

447
        # indices if provided override start, stop and step
448
        if indices:
2✔
449

450
            if isinstance(indices, range):
2✔
451
                astart, stop, step = indices.start, indices.stop, indices.step
2✔
452

UNCOV
453
            elif isinstance(indices, Sequence):
×
UNCOV
454
                astart, stop = indices[0], indices[-1] + 1
×
455

456
                if start and astart < start:
×
UNCOV
457
                    msg = (
×
458
                        f'The first indexed line to read = {astart} is < '
459
                        f'the start line = {start}!'
460
                    )
UNCOV
461
                    raise ValueError(msg)
×
462

463
            else:
UNCOV
464
                msg = f'indices must be a Sequence type not {type(indices)}.'
×
UNCOV
465
                raise TypeError(msg)
×
466

467
        # warn if start is < computed autostart
468
        if astart < autostart:
2✔
UNCOV
469
            msg = (
×
470
                f'start = {astart} is < than detected data start = {autostart}'
471
            )
UNCOV
472
            warnings.warn(msg)
×
473

474
        # advance reader's infile to account for blank metalines & get dialect
475
        self.infile.seek(0)
2✔
476

477
        # check that we have a valid simple dialect & convert it
478
        if not self._sniffer.dialect:
2✔
UNCOV
479
            msg = (
×
480
                "Sniffer failed to detect dialect. Please set sniffer's"
481
                "dialect attribute before calling read"
482
            )
UNCOV
483
            raise csv.Error(msg)
×
484
        assert isinstance(self._sniffer.dialect, SimpleDialect)
2✔
485
        dialect = self._sniffer.dialect.to_csv_dialect()
2✔
486

487
        # pylint: disable-next=expression-not-assigned
488
        [next(iter(self.infile)) for _ in range(astart)]
2✔
489
        # iter above is needed for NamedTemporaryFiles which are not iterators
490
        row_iter = csv.DictReader(
2✔
491
            self.infile,
492
            self.header.names,
493
            dialect=dialect,
494
        )
495

496
        stop = stop - astart if stop else None
2✔
497
        return itertools.islice(row_iter, 0, stop, step), astart
2✔
498

499
    # read method needs provide reasonable options for args
500
    # pylint: disable-next=too-many-positional-arguments
501
    def read(
2✔
502
        self,
503
        start: int | None = None,
504
        skips: Sequence[int] | None = None,
505
        indices: Sequence | None = None,
506
        chunksize: int = int(2e5),
507
        skip_empty: bool = True,
508
        raise_ragged: bool = False,
509
    ) -> Iterator[list[dict[str, CellType]]]:
510
        """Iteratively read dictionary rows that satisfy this Reader's tabs.
511

512
        Args:
513
            start:
514
                A line number from the start of the file to begin reading data
515
                from. If None and this reader's header has a line number, the
516
                line following the header is the start. If None and the header
517
                line number is None, the line following the last line in the
518
                metadata is the start. If None and there is no header or
519
                metadata, the start line is 0.
520
            skips:
521
                A sequence of line numbers to skip during reading.
522
            indices:
523
                A sequence of line numbers to read rows from. If None. all rows
524
                from start not in skips will be read. If attempting to read
525
                a slice of a file a range instance may be provided and will have
526
                improved performance over other sequence types like lists.
527
            chunksize:
528
                The number of data lines to read for each yield. Lower values
529
                consume less memory. The default is 200,000 rows.
530
            skip_empty:
531
                A boolean indicating if rows with no values between the
532
                delimiters should be skipped. Default is True.
533
            raise_ragged:
534
                Boolean indicating if a row with more or fewer columns than
535
                expected should raise an error and stop reading. The default is
536
                False. Rows with fewer columns than the header will have None
537
                as  the fill value. Rows with more columns than the header will
538
                have None keys.
539

540
        Yields:
541
            Chunksize number of dictionary rows with header names as keys.
542

543
        Raises:
544
            A csv.Error is issued if a ragged row is encountered and
545
            raise_ragged is True. Casting problems do not raise errors but
546
            gracefully return strings when encountered.
547

548
            A ValueError is issued if start and indices are provided and the
549
            first indexed line to read in indices is less than the line to start
550
            reading from.
551
        """
552

553
        skips = [] if not skips else skips
2✔
554

555
        # poll types & formats, inconsistencies will trigger casting error log
556
        types, _ = self._sniffer.types(self.poll, self.exclude)
2✔
557
        formats, _ = self._sniffer.datetime_formats(self.poll, self.exclude)
2✔
558
        castings = dict(zip(self.header.names, zip(types, formats)))
2✔
559

560
        # initialize casting and ragged row errors
561
        self.errors.casting = []
2✔
562
        self.errors.ragged = []
2✔
563

564
        # construct a row iterator
565
        row_iter, row_start = self._prime(start, indices)
2✔
566

567
        fifo: deque[dict[str, CellType]] = deque()
2✔
568
        for line, dic in enumerate(row_iter, row_start):
2✔
569

570
            if line in skips:
2✔
571
                continue
2✔
572

573
            if indices and line not in indices:
2✔
UNCOV
574
                continue
×
575

576
            if not any(dic.values()) and skip_empty:
2✔
577
                continue
×
578

579
            # chk & log raggedness
580
            dic = self._log_ragged(line, dic, raise_ragged)
2✔
581

582
            # perform casts, log errors & filter with tabulator
583
            arow = {}
2✔
584
            for name, astr in dic.items():
2✔
585

586
                casting, fmt = castings[name]
2✔
587
                try:
2✔
588
                    arow[name] = parsing.convert(
2✔
589
                        astr, self.decimal, casting, fmt
590
                    )
UNCOV
591
                except (ValueError, OverflowError, TypeError):
×
592
                    # on exception leave astr unconverted & log casting error
UNCOV
593
                    msg = f"line = {line}, column = '{name}'"
×
UNCOV
594
                    self.errors.casting.append(msg)
×
UNCOV
595
                    arow[name] = astr
×
596

597
            # apply tabs to filter row
598
            row = self.tabulator(arow)
2✔
599

600
            if row:
2✔
601
                fifo.append(row)
2✔
602

603
            if len(fifo) >= chunksize:
2✔
604
                yield [fifo.popleft() for _ in range(chunksize)]
2✔
605

606
        yield list(fifo)
2✔
607
        self.infile.seek(0)
2✔
608

609
    def peek(self, count: int = 10) -> None:
2✔
610
        """Prints count number of lines from the first line of the file.
611

612
        This method can be used to ensure this Reader identifies the correct
613
        metadata, header and data start locations.
614

615
        Args:
616
            count:
617
                The number of lines to print.
618

619
        Returns:
620
            None
621
        """
622

UNCOV
623
        cnt = 0
×
UNCOV
624
        while cnt < count:
×
UNCOV
625
            CRED = '\033[91m'
×
626
            CEND = '\033[0m'
×
UNCOV
627
            print(CRED + f'{cnt}' + CEND, next(self.infile).rstrip())
×
628
            cnt += 1
×
629

UNCOV
630
        self.infile.seek(0)
×
631

632
    def close(self):
2✔
633
        """Closes this Reader's infile resource."""
634

UNCOV
635
        self.infile.close()
×
636

637

638
if __name__ == '__main__':
2✔
639

UNCOV
640
    import doctest
×
641

UNCOV
642
    doctest.testmod()
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc