• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

chanzuckerberg / miniwdl / 9055707511

06 May 2024 03:05AM UTC coverage: 95.223% (+0.03%) from 95.191%
9055707511

push

github

mlin
update docker dependencies

7375 of 7745 relevant lines covered (95.22%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.31
/WDL/runtime/task_container.py
1
"""
2
Abstract interface for task container runtime
3
"""
4

5
import os
1✔
6
import logging
1✔
7
import shutil
1✔
8
import threading
1✔
9
import typing
1✔
10
from typing import Callable, Iterable, Any, Dict, Optional, ContextManager
1✔
11
from abc import ABC, abstractmethod
1✔
12
from contextlib import suppress
1✔
13
from .. import Error, Env, Value, Type
1✔
14
from .._util import (
1✔
15
    TerminationSignalFlag,
16
    path_really_within,
17
    rmtree_atomic,
18
    PygtailLogger,
19
    parse_byte_size,
20
)
21
from .._util import StructuredLogMessage as _
1✔
22
from . import config, _statusbar
1✔
23
from .error import OutputError, Terminated, CommandFailed
1✔
24

25

26
class TaskContainer(ABC):
1✔
27
    """
28
    Base class for task containers, subclassed by runtime-specific backends (e.g. Docker).
29
    """
30

31
    # class stuff
32

33
    @classmethod
1✔
34
    def global_init(cls, cfg: config.Loader, logger: logging.Logger) -> None:
1✔
35
        """
36
        Perform any necessary one-time initialization of the underlying container backend. To be
37
        invoked once per process prior to any instantiation of the class.
38
        """
39
        raise NotImplementedError()
×
40

41
    @classmethod
1✔
42
    def detect_resource_limits(cls, cfg: config.Loader, logger: logging.Logger) -> Dict[str, int]:
1✔
43
        """
44
        Detect the maximum resources ("cpu" and "mem_bytes") that the underlying container backend
45
        will be able to provision for any one task.
46

47
        If determining this is at all costly, then backend should memoize (thread-safely and
48
        perhaps front-loaded in global_init).
49
        """
50
        raise NotImplementedError()
×
51

52
    # instance stuff
53

54
    run_id: str
1✔
55

56
    host_dir: str
1✔
57
    """
×
58
    :type: str
59

60
    The run directory (on the host)
61
    """
62

63
    container_dir: str
1✔
64
    """
×
65
    :type: str
66

67
    The scratch directory inside the container. The task command's working directory will be
68
    ``{container_dir}/work/``.
69
    """
70

71
    input_path_map: Dict[str, str]
1✔
72
    """
×
73
    :type: Dict[str,str]
74

75
    A mapping of host input file/directory paths to in-container mounted paths, maintained by
76
    ``add_paths``. Directory paths are distinguished by trailing slashes on both keys and values;
77
    the slashes often should be trimmed for use elsewhere.
78
    """
79

80
    input_path_map_rev: Dict[str, str]
1✔
81
    """
×
82
    Inverse of ``input_path_map`` (also maintained by ``add_paths``)
83
    """
84

85
    try_counter: int
1✔
86
    """
×
87
    :type: int
88

89
    Counter for number of retries; starts at 1 on the first attempt. On subsequent attempts, the
90
    names (on the host) of the working directory, stdout.txt, and stderr.txt may incorporate the
91
    count, to ensure their uniqueness.
92
    """
93

94
    runtime_values: Dict[str, Any]
1✔
95
    """
×
96
    Evaluted task runtime{} section, to be populated by process_runtime(). Typically the
97
    TaskContainer backend needs to honor cpu, memory_limit, memory_reservation, docker, env.
98
    Retry logic (maxRetries, preemptible) is handled externally.
99
    """
100

101
    stderr_callback: Optional[Callable[[str], None]]
1✔
102
    """
×
103
    A function called line-by-line for the task's standard error stream, iff verbose logging is
104
    enabled. If provided by a plugin then it overrides the default standard error logging, which
105
    writes each line to the 'stderr' child of the task logger.
106
    """
107

108
    failure_info: Optional[Dict[str, Any]]
1✔
109
    """
×
110
    Upon run failure, the implementation may provide additional structured information about what
111
    went wrong (beyond the exit code and log messages).
112
    """
113

114
    _running: bool
1✔
115

116
    def __init__(self, cfg: config.Loader, run_id: str, host_dir: str) -> None:
1✔
117
        self.cfg = cfg
1✔
118
        self.run_id = run_id
1✔
119
        self.host_dir = host_dir
1✔
120
        self.container_dir = "/mnt/miniwdl_task_container"
1✔
121
        self.input_path_map = {}
1✔
122
        self.input_path_map_rev = {}
1✔
123
        self.stderr_callback = None
1✔
124
        self.try_counter = 1
1✔
125
        self._running = False
1✔
126
        self.runtime_values = {}
1✔
127
        self.failure_info = None
1✔
128
        os.makedirs(self.host_work_dir())
1✔
129

130
    def add_paths(self, host_paths: Iterable[str]) -> None:
1✔
131
        """
132
        Use before running the container to add a list of host paths to mount inside the container
133
        as inputs. Directory paths should have a trailing slash. The host-to-container path mapping
134
        is maintained in ``input_path_map``.
135

136
        Although ``add_paths`` can be used multiple times, paths should be added together where
137
        possible, as this allows heuristics for dealing with any name collisions among them.
138
        """
139
        assert not self._running
1✔
140

141
        # partition the files by host directory
142
        host_paths_by_dir = {}
1✔
143
        for host_path in host_paths:
1✔
144
            host_path_strip = host_path.rstrip("/")
1✔
145
            if host_path not in self.input_path_map and host_path_strip not in self.input_path_map:
1✔
146
                if not os.path.exists(host_path_strip):
1✔
147
                    raise Error.InputError("input path not found: " + host_path)
1✔
148
                host_paths_by_dir.setdefault(os.path.dirname(host_path_strip), set()).add(host_path)
1✔
149

150
        # for each such partition of files
151
        # - if there are no basename collisions under input subdirectory 0, then mount them there.
152
        # - otherwise, mount them in a fresh subdirectory
153
        for paths in host_paths_by_dir.values():
1✔
154
            based = os.path.join(self.container_dir, "work/_miniwdl_inputs")
1✔
155
            subd = "0"
1✔
156
            for host_path in paths:
1✔
157
                container_path = os.path.join(based, subd, os.path.basename(host_path.rstrip("/")))
1✔
158
                if host_path.endswith("/"):
1✔
159
                    container_path += "/"
1✔
160
                if container_path in self.input_path_map_rev:
1✔
161
                    assert subd == "0"
1✔
162
                    subd = str(len(self.input_path_map) + 1)
1✔
163
            for host_path in paths:
1✔
164
                container_path = os.path.join(based, subd, os.path.basename(host_path.rstrip("/")))
1✔
165
                if host_path.endswith("/"):
1✔
166
                    container_path += "/"
1✔
167
                assert container_path not in self.input_path_map_rev
1✔
168
                self.input_path_map[host_path] = container_path
1✔
169
                self.input_path_map_rev[container_path] = host_path
1✔
170

171
    def copy_input_files(self, logger: logging.Logger) -> None:
1✔
172
        # After add_paths has been used as needed, copy the input files from their original
173
        # locations to the appropriate subdirectories of the container working directory. This may
174
        # not be necessary e.g. if the container backend supports bind-mounting the input
175
        # files from their original host paths.
176
        # called once per task run (attempt)
177
        for host_path, container_path in self.input_path_map.items():
1✔
178
            assert container_path.startswith(self.container_dir)
1✔
179
            host_copy_path = os.path.join(
1✔
180
                self.host_dir, os.path.relpath(container_path.rstrip("/"), self.container_dir)
181
            )
182

183
            logger.info(_("copy host input file", input=host_path, copy=host_copy_path))
1✔
184
            os.makedirs(os.path.dirname(host_copy_path), exist_ok=True)
1✔
185
            if host_path.endswith("/"):
1✔
186
                shutil.copytree(host_path.rstrip("/"), host_copy_path, symlinks=False)
1✔
187
            else:
188
                shutil.copy(host_path, host_copy_path)
1✔
189

190
    def process_runtime(self, logger: logging.Logger, runtime_eval: Dict[str, Value.Base]) -> None:
1✔
191
        """
192
        Given the evaluated WDL expressions from the task runtime{} section, populate
193
        self.runtime_values with validated/postprocessed values that will be needed to configure
194
        the container properly.
195

196
        Subclasses may override this to process custom runtime entries (before or after invoking
197
        this base version).
198
        """
199

200
        ans = self.runtime_values
1✔
201

202
        if "inlineDockerfile" in runtime_eval:
1✔
203
            # join Array[String]
204
            dockerfile = runtime_eval["inlineDockerfile"]
1✔
205
            if not isinstance(dockerfile, Value.Array):
1✔
206
                dockerfile = Value.Array(dockerfile.type, [dockerfile])
1✔
207
            dockerfile = "\n".join(elt.coerce(Type.String()).value for elt in dockerfile.value)
1✔
208
            ans["inlineDockerfile"] = dockerfile
1✔
209
        elif "docker" in runtime_eval or "container" in runtime_eval:
1✔
210
            docker_value = runtime_eval["container" if "container" in runtime_eval else "docker"]
1✔
211
            if isinstance(docker_value, Value.Array) and len(docker_value.value):
1✔
212
                # TODO: choose a preferred candidate
213
                docker_value = docker_value.value[0]
1✔
214
            ans["docker"] = docker_value.coerce(Type.String()).value
1✔
215
        if "docker_network" in runtime_eval:
1✔
216
            network_value = runtime_eval["docker_network"]
1✔
217
            ans["docker_network"] = network_value.coerce(Type.String()).value
1✔
218

219
        if (
1✔
220
            isinstance(runtime_eval.get("privileged", None), Value.Boolean)
221
            and runtime_eval["privileged"].value is True
222
        ):
223
            if self.cfg.get_bool("task_runtime", "allow_privileged"):
1✔
224
                ans["privileged"] = True
1✔
225
            else:
226
                logger.warning(
1✔
227
                    "runtime.privileged ignored; to enable, set configuration"
228
                    " [task_runtime] allow_privileged = true (security+portability warning)"
229
                )
230

231
        host_limits = self.detect_resource_limits(self.cfg, logger)
1✔
232
        if "cpu" in runtime_eval:
1✔
233
            cpu_value = runtime_eval["cpu"].coerce(Type.Int()).value
1✔
234
            assert isinstance(cpu_value, int)
1✔
235
            cpu_max = self.cfg["task_runtime"].get_int("cpu_max")
1✔
236
            if cpu_max == 0:
1✔
237
                cpu_max = host_limits["cpu"]
1✔
238
            cpu = max(1, cpu_value if cpu_value <= cpu_max or cpu_max < 0 else cpu_max)
1✔
239
            if cpu != cpu_value:
1✔
240
                logger.warning(
1✔
241
                    _("runtime.cpu adjusted to host limit", original=cpu_value, adjusted=cpu)
242
                )
243
            ans["cpu"] = cpu
1✔
244

245
        if "memory" in runtime_eval:
1✔
246
            memory_str = runtime_eval["memory"].coerce(Type.String()).value
1✔
247
            assert isinstance(memory_str, str)
1✔
248
            try:
1✔
249
                memory_bytes = parse_byte_size(memory_str)
1✔
250
            except ValueError:
1✔
251
                raise Error.RuntimeError("invalid setting of runtime.memory, " + memory_str)
1✔
252

253
            memory_max = self.cfg["task_runtime"]["memory_max"].strip()
1✔
254
            memory_max = -1 if memory_max == "-1" else parse_byte_size(memory_max)
1✔
255
            if memory_max == 0:
1✔
256
                memory_max = host_limits["mem_bytes"]
1✔
257
            if memory_max > 0 and memory_bytes > memory_max:
1✔
258
                logger.warning(
1✔
259
                    _(
260
                        "runtime.memory adjusted to host limit",
261
                        original=memory_bytes,
262
                        adjusted=memory_max,
263
                    )
264
                )
265
                memory_bytes = memory_max
1✔
266
            ans["memory_reservation"] = memory_bytes
1✔
267

268
            memory_limit_multiplier = self.cfg["task_runtime"].get_float("memory_limit_multiplier")
1✔
269
            if memory_limit_multiplier > 0.0:
1✔
270
                ans["memory_limit"] = int(memory_limit_multiplier * memory_bytes)
1✔
271

272
        if "maxRetries" in runtime_eval:
1✔
273
            ans["maxRetries"] = max(0, runtime_eval["maxRetries"].coerce(Type.Int()).value)
1✔
274
        if "preemptible" in runtime_eval:
1✔
275
            ans["preemptible"] = max(0, runtime_eval["preemptible"].coerce(Type.Int()).value)
1✔
276
        if "returnCodes" in runtime_eval:
1✔
277
            rcv = runtime_eval["returnCodes"]
1✔
278
            if isinstance(rcv, Value.String) and rcv.value == "*":
1✔
279
                ans["returnCodes"] = "*"
1✔
280
            elif isinstance(rcv, Value.Int):
1✔
281
                ans["returnCodes"] = rcv.value
1✔
282
            elif isinstance(rcv, Value.Array):
1✔
283
                try:
1✔
284
                    ans["returnCodes"] = [v.coerce(Type.Int()).value for v in rcv.value]
1✔
285
                except:
×
286
                    pass
×
287
            if "returnCodes" not in ans:
1✔
288
                raise Error.RuntimeError("invalid setting of runtime.returnCodes")
×
289

290
        if "gpu" in runtime_eval:
1✔
291
            if not isinstance(runtime_eval["gpu"], Value.Boolean):
1✔
292
                raise Error.RuntimeError("invalid setting of runtime.gpu")
×
293
            ans["gpu"] = runtime_eval["gpu"].value
1✔
294

295
    def run(self, logger: logging.Logger, command: str) -> None:
1✔
296
        """
297
        1. Container is instantiated with the configured mounts and resources
298
        2. The mounted directory and all subdirectories have u+rwx,g+rwx permission bits; all files
299
           within have u+rw,g+rw permission bits.
300
        3. Command is executed in host_work_dir() which is mounted to {container_dir}/work inside
301
           the container.
302
        4. Standard output is written to host_stdout_txt()
303
        5. Standard error is written to host_stderr_txt() and logged at VERBOSE level
304
        6. Raises CommandFailed for nonzero exit code
305
        7. Raises Terminated if TerminationSignalFlag detected, or Interrupted if the backend
306
           cancels on us for some reason that isn't our fault.
307

308
        The container is torn down in any case, including SIGTERM/SIGHUP signal which is trapped.
309
        """
310
        # container-specific logic should be in _run(). this wrapper traps signals
311

312
        assert not self._running
1✔
313
        if command.strip():  # if the command is empty then don't bother with any of this
1✔
314
            preamble = self.cfg.get("task_runtime", "command_preamble")
1✔
315
            if preamble.strip():
1✔
316
                command = preamble + "\n" + command
×
317
            with TerminationSignalFlag(logger) as terminating:
1✔
318
                if terminating():
1✔
319
                    raise Terminated(quiet=True)
×
320
                self._running = True
1✔
321
                try:
1✔
322
                    exit_code = self._run(logger, terminating, command)
1✔
323
                finally:
324
                    self._running = False
1✔
325

326
                if not self.success_exit_code(exit_code):
1✔
327
                    raise (
1✔
328
                        CommandFailed(
329
                            exit_code,
330
                            self.host_stderr_txt(),
331
                            self.host_stdout_txt(),
332
                            more_info=self.failure_info,
333
                        )
334
                        if not terminating()
335
                        else Terminated()
336
                    )
337

338
    @abstractmethod
1✔
339
    def _run(self, logger: logging.Logger, terminating: Callable[[], bool], command: str) -> int:
1✔
340
        """
341
        Implementation-specific: run command in container & return exit status.
342

343
        Take care to write informative log messages for any backend-specific errors. Miniwdl's
344
        outer exception handler will only emit a brief, generic log message about the run failing.
345
        """
346
        # run command in container & return exit status
347
        raise NotImplementedError()
×
348

349
    def success_exit_code(self, exit_code: int) -> bool:
1✔
350
        if "returnCodes" not in self.runtime_values:
1✔
351
            return exit_code == 0
1✔
352
        rcv = self.runtime_values["returnCodes"]
1✔
353
        if isinstance(rcv, str) and rcv == "*":
1✔
354
            return True
1✔
355
        return exit_code in (rcv if isinstance(rcv, list) else [rcv])
1✔
356

357
    def delete_work(self, logger: logging.Logger, delete_streams: bool = False) -> None:
1✔
358
        """
359
        After the container exits, delete all filesystem traces of it except for task.log. That
360
        includes successful output files!
361

362
        delete_streams: if True, delete stdout.txt and stderr.txt as well
363
        """
364
        to_delete = [self.host_work_dir(), os.path.join(self.host_dir, "write_")]
1✔
365
        to_delete.append(os.path.join(self.host_dir, "command"))
1✔
366
        if delete_streams:
1✔
367
            to_delete.append(self.host_stdout_txt())
1✔
368
            to_delete.append(self.host_stderr_txt())
1✔
369
            to_delete.append(self.host_stderr_txt() + ".offset")
1✔
370
        deleted = []
1✔
371
        for p in to_delete:
1✔
372
            if os.path.isdir(p):
1✔
373
                rmtree_atomic(p)
1✔
374
                deleted.append(p)
1✔
375
            elif os.path.isfile(p):
1✔
376
                with suppress(FileNotFoundError):
1✔
377
                    os.unlink(p)
1✔
378
                deleted.append(p)
1✔
379
        if deleted:
1✔
380
            logger.info(_("deleted task work artifacts", artifacts=deleted))
1✔
381

382
    def reset(self, logger: logging.Logger) -> None:
1✔
383
        """
384
        After a container/command failure, reset the working directory state so that
385
        copy_input_files() and run() can be retried.
386
        """
387
        self.try_counter += 1
1✔
388
        os.makedirs(self.host_work_dir())
1✔
389

390
    def host_path(self, container_path: str, inputs_only: bool = False) -> Optional[str]:
1✔
391
        """
392
        Map the in-container path of an output File/Directory to a host path under ``host_dir``.
393
        Directory paths should be given a trailing "/". Return None if the path does not exist.
394

395
        SECURITY: except for inputs, this method must only return host paths under ``host_dir``
396
        and prevent any reference to other host files (e.g. /etc/passwd), including via symlinks.
397
        """
398
        if os.path.isabs(container_path):
1✔
399
            # handle output of std{out,err}.txt
400
            if container_path == os.path.join(self.container_dir, "stdout.txt"):
1✔
401
                return self.host_stdout_txt()
1✔
402
            if container_path == os.path.join(self.container_dir, "stderr.txt"):
1✔
403
                return self.host_stderr_txt()
1✔
404
            # handle output of an input File or Directory
405
            if container_path in self.input_path_map_rev:
1✔
406
                return self.input_path_map_rev[container_path]
1✔
407
            # handle output of a File or subDirectory found within an input Directory
408
            container_path_components = container_path.strip("/").split("/")
1✔
409
            for i in range(len(container_path_components) - 1, 5, -1):
1✔
410
                # 5 == len(['mnt', 'miniwdl_task_container', 'work', '_miniwdl_inputs', '0'])
411
                container_path_prefix = "/" + "/".join(container_path_components[:i]) + "/"
1✔
412
                if container_path_prefix in self.input_path_map_rev:
1✔
413
                    ans = self.input_path_map_rev[container_path_prefix]
1✔
414
                    ans += "/".join(container_path_components[i:])
1✔
415
                    if container_path.endswith("/"):
1✔
416
                        ans += "/"
1✔
417
                    assert path_really_within(ans, self.input_path_map_rev[container_path_prefix])
1✔
418
                    return ans
1✔
419
            if inputs_only:
1✔
420
                raise Error.InputError(
1✔
421
                    "task inputs attempted to use a non-input or non-existent path "
422
                    + container_path
423
                )
424
            # relativize the path to the provisioned working directory
425
            container_relpath = os.path.relpath(
1✔
426
                container_path, os.path.join(self.container_dir, "work")
427
            )
428
            if container_path.endswith("/") and not container_relpath.endswith("/"):
1✔
429
                container_relpath += "/"
1✔
430
            if container_relpath.startswith("../"):
1✔
431
                # see issue #214
432
                raise OutputError(
1✔
433
                    "task outputs attempted to use a path outside its working directory: "
434
                    + container_path
435
                )
436
            container_path = container_relpath
1✔
437

438
        ans = os.path.join(self.host_work_dir(), container_path)
1✔
439
        if container_path.endswith("/") and not ans.endswith("/"):
1✔
440
            ans += "/"
×
441
        if not (
1✔
442
            (container_path.endswith("/") and os.path.isdir(ans))
443
            or (not container_path.endswith("/") and os.path.isfile(ans))
444
        ):
445
            return None
1✔
446
        if not path_really_within(ans, self.host_work_dir()):
1✔
447
            # fail-safe guard against some weird symlink to host file
448
            raise OutputError(
1✔
449
                "task outputs attempted to use a path outside its working directory: "
450
                + container_path
451
            )
452
        if (
1✔
453
            ans.endswith("/")
454
            and self.input_path_map
455
            and (
456
                path_really_within(self.host_work_dir(), ans[:-1])
457
                or path_really_within(
458
                    ans[:-1], os.path.join(self.host_work_dir(), "_miniwdl_inputs")
459
                )
460
            )
461
        ):
462
            # prevent output of an input mount point
463
            raise OutputError("unusable output directory: " + container_path)
1✔
464
        return ans
1✔
465

466
    def host_work_dir(self):
1✔
467
        return os.path.join(
1✔
468
            self.host_dir, f"work{self.try_counter if self.try_counter > 1 else ''}"
469
        )
470

471
    def host_stdout_txt(self):
1✔
472
        return os.path.join(
1✔
473
            self.host_dir, f"stdout{self.try_counter if self.try_counter > 1 else ''}.txt"
474
        )
475

476
    def host_stderr_txt(self):
1✔
477
        return os.path.join(
1✔
478
            self.host_dir, f"stderr{self.try_counter if self.try_counter > 1 else ''}.txt"
479
        )
480

481
    def touch_mount_point(self, host_path: str) -> None:
1✔
482
        """
483
        Implementation helper: touch a File or Directory mount point that might not already exist
484
        in the host directory. This ensures ownership by the invoking user:group.
485
        """
486
        assert host_path.startswith(self.host_dir + "/")
1✔
487
        if host_path.endswith("/"):  # Directory mount point
1✔
488
            os.makedirs(host_path, exist_ok=True)
1✔
489
        else:  # File mount point
490
            os.makedirs(os.path.dirname(host_path), exist_ok=True)
1✔
491
            with open(host_path, "x") as _:
1✔
492
                pass
1✔
493

494
    def poll_stderr_context(self, logger: logging.Logger) -> ContextManager[Callable[[], None]]:
1✔
495
        """
496
        Implementation helper: open a context yielding a function to poll stderr.txt and log each
497
        each line (to either logger or self.stderr_callback if set). _run() implementation should
498
        call the function periodically while container is running, and close the context once
499
        done/failed.
500
        """
501
        return PygtailLogger(
1✔
502
            logger,
503
            self.host_stderr_txt(),
504
            callback=self.stderr_callback,
505
        )
506

507
    def task_running_context(self) -> ContextManager[None]:
1✔
508
        """
509
        Implementation helper: open a context which counts the task, and its CPU and memory
510
        reservations, in the CLI status bar's "running" ticker. _run() implementation should open
511
        this context once the container is truly running (not while e.g. still queued), and close
512
        it once done/failed.
513
        """
514
        return _statusbar.task_running(
1✔
515
            self.runtime_values.get("cpu", 0),
516
            self.runtime_values.get("memory_reservation", 0),
517
        )
518

519

520
_backends: Dict[str, typing.Type[TaskContainer]] = dict()
1✔
521
_backends_lock: threading.Lock = threading.Lock()
1✔
522

523

524
def new(cfg: config.Loader, logger: logging.Logger, run_id: str, host_dir: str) -> TaskContainer:
1✔
525
    """
526
    Instantiate a TaskContainer from the configured backend, including any necessary global
527
    initialization.
528
    """
529
    global _backends
530
    with _backends_lock:
1✔
531
        if not _backends:
1✔
532
            for plugin_name, plugin_cls in config.load_plugins(cfg, "container_backend"):
1✔
533
                _backends[plugin_name] = plugin_cls  # pyre-fixme
1✔
534
        backend_cls = _backends[cfg["scheduler"]["container_backend"]]
1✔
535
        if not getattr(backend_cls, "_global_init", False):
1✔
536
            backend_cls.global_init(cfg, logger)
1✔
537
            setattr(backend_cls, "_global_init", True)
1✔
538
        ans = backend_cls(cfg, run_id, host_dir)
1✔
539
        assert isinstance(ans, TaskContainer)
1✔
540
        return ans
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc