• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pyiron / pympipool / 9300952449

30 May 2024 10:21AM UTC coverage: 91.962% (-0.08%) from 92.043%
9300952449

push

github

web-flow
Merge pull request #347 from pyiron/filepath

Call cache script based on full path

11 of 11 new or added lines in 2 files covered. (100.0%)

1 existing line in 1 file now uncovered.

778 of 846 relevant lines covered (91.96%)

0.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.83
/pympipool/shared/executorbase.py
1
from concurrent.futures import (
1✔
2
    Executor as FutureExecutor,
3
    Future,
4
)
5
import importlib.util
1✔
6
import inspect
1✔
7
import os
1✔
8
import queue
1✔
9
import sys
1✔
10
from time import sleep
1✔
11
from typing import Optional, List
1✔
12

13
import cloudpickle
1✔
14

15
from pympipool.shared.communication import interface_bootup
1✔
16
from pympipool.shared.thread import RaisingThread
1✔
17
from pympipool.shared.interface import BaseInterface
1✔
18
from pympipool.shared.inputcheck import (
1✔
19
    check_resource_dict,
20
    check_resource_dict_is_empty,
21
)
22

23

24
class ExecutorBase(FutureExecutor):
1✔
25
    def __init__(self):
1✔
26
        cloudpickle_register(ind=3)
1✔
27
        self._future_queue = queue.Queue()
1✔
28
        self._process = None
1✔
29

30
    @property
1✔
31
    def info(self):
1✔
32
        if self._process is not None and isinstance(self._process, list):
1✔
33
            meta_data_dict = self._process[0]._kwargs.copy()
1✔
34
            if "future_queue" in meta_data_dict.keys():
1✔
35
                del meta_data_dict["future_queue"]
1✔
36
            meta_data_dict["max_workers"] = len(self._process)
1✔
37
            return meta_data_dict
1✔
38
        elif self._process is not None:
1✔
39
            meta_data_dict = self._process._kwargs.copy()
1✔
40
            if "future_queue" in meta_data_dict.keys():
1✔
41
                del meta_data_dict["future_queue"]
1✔
42
            return meta_data_dict
1✔
43
        else:
44
            return None
1✔
45

46
    @property
1✔
47
    def future_queue(self):
1✔
48
        return self._future_queue
×
49

50
    def submit(self, fn: callable, *args, resource_dict: dict = {}, **kwargs):
1✔
51
        """
52
        Submits a callable to be executed with the given arguments.
53

54
        Schedules the callable to be executed as fn(*args, **kwargs) and returns
55
        a Future instance representing the execution of the callable.
56

57
        Args:
58
            fn (callable): function to submit for execution
59
            args: arguments for the submitted function
60
            kwargs: keyword arguments for the submitted function
61
            resource_dict (dict): resource dictionary, which defines the resources used for the execution of the
62
                                  function. Example resource dictionary: {
63
                                      cores: 1,
64
                                      threads_per_core: 1,
65
                                      gpus_per_worker: 0,
66
                                      oversubscribe: False,
67
                                      cwd: None,
68
                                      executor: None,
69
                                      hostname_localhost: False,
70
                                  }
71

72
        Returns:
73
            A Future representing the given call.
74
        """
75
        check_resource_dict_is_empty(resource_dict=resource_dict)
1✔
76
        check_resource_dict(function=fn)
1✔
77
        f = Future()
1✔
78
        self._future_queue.put({"fn": fn, "args": args, "kwargs": kwargs, "future": f})
1✔
79
        return f
1✔
80

81
    def shutdown(self, wait: bool = True, *, cancel_futures: bool = False):
1✔
82
        """
83
        Clean-up the resources associated with the Executor.
84

85
        It is safe to call this method several times. Otherwise, no other
86
        methods can be called after this one.
87

88
        Args:
89
            wait: If True then shutdown will not return until all running
90
                futures have finished executing and the resources used by the
91
                parallel_executors have been reclaimed.
92
            cancel_futures: If True then shutdown will cancel all pending
93
                futures. Futures that are completed or running will not be
94
                cancelled.
95
        """
96
        if cancel_futures:
1✔
97
            cancel_items_in_queue(que=self._future_queue)
×
98
        self._future_queue.put({"shutdown": True, "wait": wait})
1✔
99
        if wait and self._process is not None:
1✔
100
            self._process.join()
1✔
101
            self._future_queue.join()
1✔
102
        self._process = None
1✔
103
        self._future_queue = None
1✔
104

105
    def _set_process(self, process: RaisingThread):
1✔
106
        self._process = process
1✔
107
        self._process.start()
1✔
108

109
    def __len__(self):
1✔
110
        return self._future_queue.qsize()
1✔
111

112
    def __del__(self):
1✔
113
        try:
1✔
114
            self.shutdown(wait=False)
1✔
115
        except (AttributeError, RuntimeError):
1✔
116
            pass
1✔
117

118

119
class ExecutorBroker(ExecutorBase):
1✔
120
    def shutdown(self, wait: bool = True, *, cancel_futures: bool = False):
1✔
121
        """Clean-up the resources associated with the Executor.
122

123
        It is safe to call this method several times. Otherwise, no other
124
        methods can be called after this one.
125

126
        Args:
127
            wait: If True then shutdown will not return until all running
128
                futures have finished executing and the resources used by the
129
                parallel_executors have been reclaimed.
130
            cancel_futures: If True then shutdown will cancel all pending
131
                futures. Futures that are completed or running will not be
132
                cancelled.
133
        """
134
        if cancel_futures:
1✔
135
            cancel_items_in_queue(que=self._future_queue)
1✔
136
        if self._process is not None:
1✔
137
            for _ in range(len(self._process)):
1✔
138
                self._future_queue.put({"shutdown": True, "wait": wait})
1✔
139
            if wait:
1✔
140
                for process in self._process:
1✔
141
                    process.join()
1✔
142
                self._future_queue.join()
1✔
143
        self._process = None
1✔
144
        self._future_queue = None
1✔
145

146
    def _set_process(self, process: List[RaisingThread]):
1✔
147
        self._process = process
1✔
148
        for process in self._process:
1✔
149
            process.start()
1✔
150

151

152
class ExecutorSteps(ExecutorBase):
1✔
153
    def submit(self, fn: callable, *args, resource_dict: dict = {}, **kwargs):
1✔
154
        """
155
        Submits a callable to be executed with the given arguments.
156

157
        Schedules the callable to be executed as fn(*args, **kwargs) and returns
158
        a Future instance representing the execution of the callable.
159

160
        Args:
161
            fn (callable): function to submit for execution
162
            args: arguments for the submitted function
163
            kwargs: keyword arguments for the submitted function
164
            resource_dict (dict): resource dictionary, which defines the resources used for the execution of the
165
                                  function. Example resource dictionary: {
166
                                      cores: 1,
167
                                      threads_per_core: 1,
168
                                      gpus_per_worker: 0,
169
                                      oversubscribe: False,
170
                                      cwd: None,
171
                                      executor: None,
172
                                      hostname_localhost: False,
173
                                  }
174

175
        Returns:
176
            A Future representing the given call.
177
        """
178
        check_resource_dict(function=fn)
1✔
179
        f = Future()
1✔
180
        self._future_queue.put(
1✔
181
            {
182
                "fn": fn,
183
                "args": args,
184
                "kwargs": kwargs,
185
                "future": f,
186
                "resource_dict": resource_dict,
187
            }
188
        )
189
        return f
1✔
190

191
    def shutdown(self, wait: bool = True, *, cancel_futures: bool = False):
1✔
192
        """Clean-up the resources associated with the Executor.
193

194
        It is safe to call this method several times. Otherwise, no other
195
        methods can be called after this one.
196

197
        Args:
198
            wait: If True then shutdown will not return until all running
199
                futures have finished executing and the resources used by the
200
                parallel_executors have been reclaimed.
201
            cancel_futures: If True then shutdown will cancel all pending
202
                futures. Futures that are completed or running will not be
203
                cancelled.
204
        """
205
        if cancel_futures:
1✔
206
            cancel_items_in_queue(que=self._future_queue)
×
207
        if self._process is not None:
1✔
208
            self._future_queue.put({"shutdown": True, "wait": wait})
1✔
209
            if wait:
1✔
210
                self._process.join()
1✔
211
                self._future_queue.join()
1✔
212
        self._process = None
1✔
213
        self._future_queue = None
1✔
214

215

216
def cancel_items_in_queue(que: queue.Queue):
1✔
217
    """
218
    Cancel items which are still waiting in the queue. If the executor is busy tasks remain in the queue, so the future
219
    objects have to be cancelled when the executor shuts down.
220

221
    Args:
222
        que (queue.Queue): Queue with task objects which should be executed
223
    """
224
    while True:
1✔
225
        try:
1✔
226
            item = que.get_nowait()
1✔
227
            if isinstance(item, dict) and "future" in item.keys():
1✔
228
                item["future"].cancel()
1✔
229
                que.task_done()
1✔
230
        except queue.Empty:
1✔
231
            break
1✔
232

233

234
def cloudpickle_register(ind: int = 2):
1✔
235
    """
236
    Cloudpickle can either pickle by value or pickle by reference. The functions which are communicated have to
237
    be pickled by value rather than by reference, so the module which calls the map function is pickled by value.
238
    https://github.com/cloudpipe/cloudpickle#overriding-pickles-serialization-mechanism-for-importable-constructs
239
    inspect can help to find the module which is calling pympipool
240
    https://docs.python.org/3/library/inspect.html
241
    to learn more about inspect another good read is:
242
    http://pymotw.com/2/inspect/index.html#module-inspect
243
    1 refers to 1 level higher than the map function
244

245
    Args:
246
        ind (int): index of the level at which pickle by value starts while for the rest pickle by reference is used
247
    """
248
    try:  # When executed in a jupyter notebook this can cause a ValueError - in this case we just ignore it.
1✔
249
        cloudpickle.register_pickle_by_value(inspect.getmodule(inspect.stack()[ind][0]))
1✔
250
    except IndexError:
×
251
        cloudpickle_register(ind=ind - 1)
×
252
    except ValueError:
×
253
        pass
×
254

255

256
def execute_parallel_tasks(
1✔
257
    future_queue: queue.Queue,
258
    cores: int,
259
    interface_class: BaseInterface,
260
    hostname_localhost: bool = False,
261
    init_function: Optional[callable] = None,
262
    **kwargs,
263
):
264
    """
265
    Execute a single tasks in parallel using the message passing interface (MPI).
266

267
    Args:
268
       future_queue (queue.Queue): task queue of dictionary objects which are submitted to the parallel process
269
       cores (int): defines the total number of MPI ranks to use
270
       interface_class (BaseInterface): Interface to start process on selected compute resources
271
       hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
272
                                     context of an HPC cluster this essential to be able to communicate to an
273
                                     Executor running on a different compute node within the same allocation. And
274
                                     in principle any computer should be able to resolve that their own hostname
275
                                     points to the same address as localhost. Still MacOS >= 12 seems to disable
276
                                     this look up for security reasons. So on MacOS it is required to set this
277
                                     option to true
278
       init_function (callable): optional function to preset arguments for functions which are submitted later
279
    """
280
    interface = interface_bootup(
1✔
281
        command_lst=_get_backend_path(cores=cores),
282
        connections=interface_class(cores=cores, **kwargs),
283
        hostname_localhost=hostname_localhost,
284
    )
285
    if init_function is not None:
1✔
286
        interface.send_dict(
1✔
287
            input_dict={"init": True, "fn": init_function, "args": (), "kwargs": {}}
288
        )
289
    while True:
1✔
290
        task_dict = future_queue.get()
1✔
291
        if "shutdown" in task_dict.keys() and task_dict["shutdown"]:
1✔
292
            interface.shutdown(wait=task_dict["wait"])
1✔
293
            future_queue.task_done()
1✔
294
            future_queue.join()
1✔
295
            break
1✔
296
        elif "fn" in task_dict.keys() and "future" in task_dict.keys():
1✔
297
            f = task_dict.pop("future")
1✔
298
            if f.set_running_or_notify_cancel():
1✔
299
                try:
1✔
300
                    f.set_result(interface.send_and_receive_dict(input_dict=task_dict))
1✔
301
                except Exception as thread_exception:
1✔
302
                    interface.shutdown(wait=True)
1✔
303
                    future_queue.task_done()
1✔
304
                    f.set_exception(exception=thread_exception)
1✔
305
                    raise thread_exception
1✔
306
                else:
307
                    future_queue.task_done()
1✔
308

309

310
def execute_separate_tasks(
1✔
311
    future_queue: queue.Queue,
312
    interface_class: BaseInterface,
313
    max_cores: int,
314
    hostname_localhost: bool = False,
315
    **kwargs,
316
):
317
    """
318
    Execute a single tasks in parallel using the message passing interface (MPI).
319

320
    Args:
321
       future_queue (queue.Queue): task queue of dictionary objects which are submitted to the parallel process
322
       interface_class (BaseInterface): Interface to start process on selected compute resources
323
       max_cores (int): defines the number cores which can be used in parallel
324
       hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
325
                                     context of an HPC cluster this essential to be able to communicate to an
326
                                     Executor running on a different compute node within the same allocation. And
327
                                     in principle any computer should be able to resolve that their own hostname
328
                                     points to the same address as localhost. Still MacOS >= 12 seems to disable
329
                                     this look up for security reasons. So on MacOS it is required to set this
330
                                     option to true
331
    """
332
    active_task_dict = {}
1✔
333
    process_lst, qtask_lst = [], []
1✔
334
    while True:
1✔
335
        task_dict = future_queue.get()
1✔
336
        if "shutdown" in task_dict.keys() and task_dict["shutdown"]:
1✔
337
            if task_dict["wait"]:
1✔
338
                _ = [process.join() for process in process_lst]
1✔
339
            future_queue.task_done()
1✔
340
            future_queue.join()
1✔
341
            break
1✔
342
        elif "fn" in task_dict.keys() and "future" in task_dict.keys():
1✔
343
            qtask = queue.Queue()
1✔
344
            process, active_task_dict = _submit_function_to_separate_process(
1✔
345
                task_dict=task_dict,
346
                qtask=qtask,
347
                active_task_dict=active_task_dict,
348
                interface_class=interface_class,
349
                executor_kwargs=kwargs,
350
                max_cores=max_cores,
351
                hostname_localhost=hostname_localhost,
352
            )
353
            qtask_lst.append(qtask)
1✔
354
            process_lst.append(process)
1✔
355
            future_queue.task_done()
1✔
356

357

358
def execute_tasks_with_dependencies(
1✔
359
    future_queue: queue.Queue,
360
    executor_queue: queue.Queue,
361
    executor: ExecutorBase,
362
    refresh_rate: float = 0.01,
363
):
364
    """
365
    Resolve the dependencies of multiple tasks, by analysing which task requires concurrent.future.Futures objects from
366
    other tasks.
367

368
    Args:
369
        future_queue (Queue): Queue for receiving new tasks.
370
        executor_queue (Queue): Queue for the internal executor.
371
        executor (ExecutorBase): Executor to execute the tasks with after the dependencies are resolved.
372
        refresh_rate (float): Set the refresh rate in seconds, how frequently the input queue is checked.
373
    """
374
    wait_lst = []
1✔
375
    while True:
1✔
376
        try:
1✔
377
            task_dict = future_queue.get_nowait()
1✔
378
        except queue.Empty:
1✔
379
            task_dict = None
1✔
380
        if (  # shutdown the executor
1✔
381
            task_dict is not None
382
            and "shutdown" in task_dict.keys()
383
            and task_dict["shutdown"]
384
        ):
385
            executor.shutdown(wait=task_dict["wait"])
1✔
386
            future_queue.task_done()
1✔
387
            future_queue.join()
1✔
388
            break
1✔
389
        elif (  # handle function submitted to the executor
1✔
390
            task_dict is not None
391
            and "fn" in task_dict.keys()
392
            and "future" in task_dict.keys()
393
        ):
394
            future_lst, ready_flag = _get_future_objects_from_input(task_dict=task_dict)
1✔
395
            if len(future_lst) == 0 or ready_flag:
1✔
396
                # No future objects are used in the input or all future objects are already done
397
                task_dict["args"], task_dict["kwargs"] = _update_futures_in_input(
1✔
398
                    args=task_dict["args"], kwargs=task_dict["kwargs"]
399
                )
400
                executor_queue.put(task_dict)
1✔
401
            else:  # Otherwise add the function to the wait list
402
                task_dict["future_lst"] = future_lst
×
403
                wait_lst.append(task_dict)
×
404
            future_queue.task_done()
1✔
405
        elif len(wait_lst) > 0:
1✔
406
            # Check functions in the wait list and execute them if all future objects are now ready
407
            wait_lst = _submit_waiting_task(
×
408
                wait_lst=wait_lst, executor_queue=executor_queue
409
            )
410
        else:
411
            # If there is nothing else to do, sleep for a moment
412
            sleep(refresh_rate)
1✔
413

414

415
def get_command_path(executable: str) -> str:
1✔
416
    """
417
    Get path of the backend executable script
418

419
    Args:
420
        executable (str): Name of the backend executable script, either mpiexec.py or serial.py
421

422
    Returns:
423
        str: absolute path to the executable script
424
    """
425
    return os.path.abspath(os.path.join(__file__, "..", "..", "backend", executable))
1✔
426

427

428
def _get_backend_path(cores: int) -> list:
1✔
429
    """
430
    Get command to call backend as a list of two strings
431

432
    Args:
433
        cores (int): Number of cores used to execute the task, if it is greater than one use mpiexec_interactive.py else serial_interactive.py
434

435
    Returns:
436
        list[str]: List of strings containing the python executable path and the backend script to execute
437
    """
438
    command_lst = [sys.executable]
1✔
439
    if cores > 1 and importlib.util.find_spec("mpi4py") is not None:
1✔
440
        command_lst += [get_command_path(executable="mpiexec_interactive.py")]
1✔
441
    elif cores > 1:
1✔
442
        raise ImportError(
×
443
            "mpi4py is required for parallel calculations. Please install mpi4py."
444
        )
445
    else:
446
        command_lst += [get_command_path(executable="serial_interactive.py")]
1✔
447
    return command_lst
1✔
448

449

450
def _get_command_path(executable: str) -> str:
1✔
451
    """
452
    Get path of the backend executable script
453

454
    Args:
455
        executable (str): Name of the backend executable script, either mpiexec_interactive.py or serial_interactive.py
456

457
    Returns:
458
        str: absolute path to the executable script
459
    """
UNCOV
460
    return os.path.abspath(os.path.join(__file__, "..", "..", "backend", executable))
×
461

462

463
def _wait_for_free_slots(
1✔
464
    active_task_dict: dict, cores_requested: int, max_cores: int
465
) -> dict:
466
    """
467
    Wait for available computing resources to become available.
468

469
    Args:
470
        active_task_dict (dict): Dictionary containing the future objects and the number of cores they require
471
        cores_requested (int): Number of cores required for executing the next task
472
        max_cores (int): Maximum number cores which can be used
473

474
    Returns:
475
        dict: Dictionary containing the future objects and the number of cores they require
476
    """
477
    while sum(active_task_dict.values()) + cores_requested > max_cores:
1✔
478
        active_task_dict = {k: v for k, v in active_task_dict.items() if not k.done()}
1✔
479
    return active_task_dict
1✔
480

481

482
def _submit_waiting_task(wait_lst: List[dict], executor_queue: queue.Queue) -> list:
1✔
483
    """
484
    Submit the waiting tasks, which future inputs have been completed, to the executor
485

486
    Args:
487
        wait_lst (list): List of waiting tasks
488
        executor_queue (Queue): Queue of the internal executor
489

490
    Returns:
491
        list: list tasks which future inputs have not been completed
492
    """
493
    wait_tmp_lst = []
×
494
    for task_wait_dict in wait_lst:
×
495
        if all([future.done() for future in task_wait_dict["future_lst"]]):
×
496
            del task_wait_dict["future_lst"]
×
497
            task_wait_dict["args"], task_wait_dict["kwargs"] = _update_futures_in_input(
×
498
                args=task_wait_dict["args"], kwargs=task_wait_dict["kwargs"]
499
            )
500
            executor_queue.put(task_wait_dict)
×
501
        else:
502
            wait_tmp_lst.append(task_wait_dict)
×
503
    return wait_tmp_lst
×
504

505

506
def _update_futures_in_input(args: tuple, kwargs: dict):
1✔
507
    """
508
    Evaluate future objects in the arguments and keyword arguments by calling future.result()
509

510
    Args:
511
        args (tuple): function arguments
512
        kwargs (dict): function keyword arguments
513

514
    Returns:
515
        tuple, dict: arguments and keyword arguments with each future object in them being evaluated
516
    """
517
    args = [arg if not isinstance(arg, Future) else arg.result() for arg in args]
1✔
518
    kwargs = {
1✔
519
        key: value if not isinstance(value, Future) else value.result()
520
        for key, value in kwargs.items()
521
    }
522
    return args, kwargs
1✔
523

524

525
def _get_future_objects_from_input(task_dict: dict):
1✔
526
    """
527
    Check the input parameters if they contain future objects and which of these future objects are executed
528

529
    Args:
530
        task_dict (dict): task submitted to the executor as dictionary. This dictionary has the following keys
531
                          {"fn": callable, "args": (), "kwargs": {}, "resource_dict": {}}
532

533
    Returns:
534
        list, boolean: list of future objects and boolean flag if all future objects are already done
535
    """
536
    future_lst = [arg for arg in task_dict["args"] if isinstance(arg, Future)] + [
1✔
537
        value for value in task_dict["kwargs"] if isinstance(value, Future)
538
    ]
539
    boolean_flag = len([future for future in future_lst if future.done()]) == len(
1✔
540
        future_lst
541
    )
542
    return future_lst, boolean_flag
1✔
543

544

545
def _submit_function_to_separate_process(
1✔
546
    task_dict: dict,
547
    active_task_dict: dict,
548
    qtask: queue.Queue,
549
    interface_class: BaseInterface,
550
    executor_kwargs: dict,
551
    max_cores: int,
552
    hostname_localhost: bool = False,
553
):
554
    """
555
    Submit function to be executed in separate Python process
556
    Args:
557
        task_dict (dict): task submitted to the executor as dictionary. This dictionary has the following keys
558
                          {"fn": callable, "args": (), "kwargs": {}, "resource_dict": {}}
559
        active_task_dict (dict): Dictionary containing the future objects and the number of cores they require
560
        qtask (queue.Queue): Queue to communicate with the thread linked to the process executing the python function
561
        interface_class (BaseInterface): Interface to start process on selected compute resources
562
        executor_kwargs (dict): keyword parameters used to initialize the Executor
563
        max_cores (int): defines the number cores which can be used in parallel
564
        hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
565
                                     context of an HPC cluster this essential to be able to communicate to an
566
                                     Executor running on a different compute node within the same allocation. And
567
                                     in principle any computer should be able to resolve that their own hostname
568
                                     points to the same address as localhost. Still MacOS >= 12 seems to disable
569
                                     this look up for security reasons. So on MacOS it is required to set this
570
                                     option to true
571
    Returns:
572
        RaisingThread, dict: thread for communicating with the python process which is executing the function and
573
                             dictionary containing the future objects and the number of cores they require
574
    """
575
    resource_dict = task_dict.pop("resource_dict")
1✔
576
    qtask.put(task_dict)
1✔
577
    qtask.put({"shutdown": True, "wait": True})
1✔
578
    if "cores" not in resource_dict.keys() or (
1✔
579
        resource_dict["cores"] == 1 and executor_kwargs["cores"] >= 1
580
    ):
581
        resource_dict["cores"] = executor_kwargs["cores"]
1✔
582
    active_task_dict = _wait_for_free_slots(
1✔
583
        active_task_dict=active_task_dict,
584
        cores_requested=resource_dict["cores"],
585
        max_cores=max_cores,
586
    )
587
    active_task_dict[task_dict["future"]] = resource_dict["cores"]
1✔
588
    task_kwargs = executor_kwargs.copy()
1✔
589
    task_kwargs.update(resource_dict)
1✔
590
    task_kwargs.update(
1✔
591
        {
592
            "future_queue": qtask,
593
            "interface_class": interface_class,
594
            "hostname_localhost": hostname_localhost,
595
            "init_function": None,
596
        }
597
    )
598
    process = RaisingThread(
1✔
599
        target=execute_parallel_tasks,
600
        kwargs=task_kwargs,
601
    )
602
    process.start()
1✔
603
    return process, active_task_dict
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc