• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 21919838070

11 Feb 2026 07:27PM UTC coverage: 80.351% (+0.001%) from 80.35%
21919838070

Pull #23096

github

web-flow
Merge 9f45c9e39 into 9a67b81d3
Pull Request #23096: partially DRY out cache scope for test runners

8 of 15 new or added lines in 7 files covered. (53.33%)

1 existing line in 1 file now uncovered.

78767 of 98029 relevant lines covered (80.35%)

3.36 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

52.7
/src/python/pants/backend/python/goals/pytest_runner.py
1
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
9✔
5

6
import logging
9✔
7
import re
9✔
8
from abc import ABC, abstractmethod
9✔
9
from collections import defaultdict
9✔
10
from dataclasses import dataclass
9✔
11

12
from packaging.utils import canonicalize_name as canonicalize_project_name
9✔
13

14
from pants.backend.python.goals.coverage_py import (
9✔
15
    CoverageConfig,
16
    CoverageSubsystem,
17
    PytestCoverageData,
18
)
19
from pants.backend.python.subsystems import pytest
9✔
20
from pants.backend.python.subsystems.debugpy import DebugPy
9✔
21
from pants.backend.python.subsystems.pytest import PyTest, PythonTestFieldSet
9✔
22
from pants.backend.python.subsystems.python_tool_base import get_lockfile_metadata
9✔
23
from pants.backend.python.subsystems.setup import PythonSetup
9✔
24
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
9✔
25
from pants.backend.python.util_rules.local_dists import LocalDistsPexRequest, build_local_dists
9✔
26
from pants.backend.python.util_rules.lockfile_metadata import (
9✔
27
    PythonLockfileMetadataV2,
28
    PythonLockfileMetadataV3,
29
)
30
from pants.backend.python.util_rules.pex import (
9✔
31
    Pex,
32
    PexRequest,
33
    VenvPexProcess,
34
    create_pex,
35
    create_venv_pex,
36
    get_req_strings,
37
    setup_venv_pex_process,
38
)
39
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
9✔
40
from pants.backend.python.util_rules.pex_requirements import PexRequirements
9✔
41
from pants.backend.python.util_rules.python_sources import (
9✔
42
    PythonSourceFilesRequest,
43
    prepare_python_sources,
44
)
45
from pants.core.goals.test import (
9✔
46
    BuildPackageDependenciesRequest,
47
    RuntimePackageDependenciesField,
48
    TestDebugAdapterRequest,
49
    TestDebugRequest,
50
    TestExtraEnv,
51
    TestRequest,
52
    TestResult,
53
    TestSubsystem,
54
    build_runtime_package_dependencies,
55
)
56
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
9✔
57
from pants.core.util_rules.config_files import find_config_file
9✔
58
from pants.core.util_rules.env_vars import environment_vars_subset
9✔
59
from pants.core.util_rules.partitions import Partition, PartitionerType, Partitions
9✔
60
from pants.core.util_rules.source_files import SourceFilesRequest, determine_source_files
9✔
61
from pants.engine.addresses import Address
9✔
62
from pants.engine.collection import Collection
9✔
63
from pants.engine.env_vars import EnvironmentVarsRequest
9✔
64
from pants.engine.environment import EnvironmentName
9✔
65
from pants.engine.fs import (
9✔
66
    EMPTY_DIGEST,
67
    CreateDigest,
68
    Digest,
69
    DigestContents,
70
    DigestSubset,
71
    Directory,
72
    MergeDigests,
73
    PathGlobs,
74
    RemovePrefix,
75
)
76
from pants.engine.internals.graph import resolve_target
9✔
77
from pants.engine.internals.graph import transitive_targets as transitive_targets_get
9✔
78
from pants.engine.intrinsics import (
9✔
79
    create_digest,
80
    digest_subset_to_digest,
81
    digest_to_snapshot,
82
    execute_process_with_retry,
83
    get_digest_contents,
84
    merge_digests,
85
)
86
from pants.engine.process import InteractiveProcess, Process, ProcessWithRetries
9✔
87
from pants.engine.rules import collect_rules, concurrently, implicitly, rule
9✔
88
from pants.engine.target import Target, TransitiveTargetsRequest, WrappedTargetRequest
9✔
89
from pants.engine.unions import UnionMembership, UnionRule, union
9✔
90
from pants.option.global_options import GlobalOptions
9✔
91
from pants.util.docutil import doc_url
9✔
92
from pants.util.frozendict import FrozenDict
9✔
93
from pants.util.logging import LogLevel
9✔
94
from pants.util.ordered_set import OrderedSet
9✔
95
from pants.util.pip_requirement import PipRequirement
9✔
96
from pants.util.strutil import softwrap
9✔
97

98
logger = logging.getLogger()
9✔
99

100

101
# -----------------------------------------------------------------------------------------
102
# Plugin hook
103
# -----------------------------------------------------------------------------------------
104

105

106
@dataclass(frozen=True)
9✔
107
class PytestPluginSetup:
9✔
108
    """The result of custom set up logic before Pytest runs.
109

110
    Please reach out it if you would like certain functionality, such as allowing your plugin to set
111
    environment variables.
112
    """
113

114
    digest: Digest = EMPTY_DIGEST
9✔
115
    extra_sys_path: tuple[str, ...] = ()
9✔
116

117

118
@union(in_scope_types=[EnvironmentName])
9✔
119
@dataclass(frozen=True)
9✔
120
class PytestPluginSetupRequest(ABC):
9✔
121
    """A request to set up the test environment before Pytest runs, e.g. to set up databases.
122

123
    To use, subclass PytestPluginSetupRequest, register the rule
124
    `UnionRule(PytestPluginSetupRequest, MyCustomPytestPluginSetupRequest)`, and add a rule that
125
    takes your subclass as a parameter and returns `PytestPluginSetup`.
126
    """
127

128
    target: Target
9✔
129

130
    @classmethod
9✔
131
    @abstractmethod
9✔
132
    def is_applicable(cls, target: Target) -> bool:
9✔
133
        """Whether the setup implementation should be used for this target or not."""
134

135

136
@rule(polymorphic=True)
9✔
137
async def get_pytest_plugin_setup(req: PytestPluginSetupRequest) -> PytestPluginSetup:
9✔
138
    raise NotImplementedError()
×
139

140

141
class AllPytestPluginSetups(Collection[PytestPluginSetup]):
9✔
142
    pass
9✔
143

144

145
# TODO: Why is this necessary? We should be able to use `PythonTestFieldSet` as the rule param.
146
@dataclass(frozen=True)
9✔
147
class AllPytestPluginSetupsRequest:
9✔
148
    addresses: tuple[Address, ...]
9✔
149

150

151
@rule
9✔
152
async def run_all_setup_plugins(
9✔
153
    request: AllPytestPluginSetupsRequest, union_membership: UnionMembership
154
) -> AllPytestPluginSetups:
155
    wrapped_tgts = await concurrently(
×
156
        resolve_target(
157
            WrappedTargetRequest(address, description_of_origin="<infallible>"), **implicitly()
158
        )
159
        for address in request.addresses
160
    )
161
    setup_requests = [
×
162
        request_type(wrapped_tgt.target)  # type: ignore[abstract]
163
        for request_type in union_membership.get(PytestPluginSetupRequest)
164
        for wrapped_tgt in wrapped_tgts
165
        if request_type.is_applicable(wrapped_tgt.target)
166
    ]
167
    setups = await concurrently(
×
168
        get_pytest_plugin_setup(**implicitly({request: PytestPluginSetupRequest}))
169
        for request in setup_requests
170
    )
171
    return AllPytestPluginSetups(setups)
×
172

173

174
# -----------------------------------------------------------------------------------------
175
# Core logic
176
# -----------------------------------------------------------------------------------------
177

178

179
# If a user wants extra pytest output (e.g., plugin output) to show up in dist/
180
# they must ensure that output goes under this directory. E.g.,
181
# ./pants test <target> -- --html=extra-output/report.html
182
_EXTRA_OUTPUT_DIR = "extra-output"
9✔
183

184

185
@dataclass(frozen=True)
9✔
186
class TestMetadata:
9✔
187
    """Parameters that must be constant for all test targets in a `pytest` batch."""
188

189
    interpreter_constraints: InterpreterConstraints
9✔
190
    extra_env_vars: tuple[str, ...]
9✔
191
    xdist_concurrency: int | None
9✔
192
    resolve: str
9✔
193
    environment: str
9✔
194
    compatability_tag: str | None = None
9✔
195

196
    # Prevent this class from being detected by pytest as a test class.
197
    __test__ = False
9✔
198

199
    @property
9✔
200
    def description(self) -> str | None:
9✔
201
        if not self.compatability_tag:
×
202
            return None
×
203

204
        # TODO: Put more info here.
205
        return self.compatability_tag
×
206

207

208
@dataclass(frozen=True)
9✔
209
class TestSetupRequest:
9✔
210
    field_sets: tuple[PythonTestFieldSet, ...]
9✔
211
    metadata: TestMetadata
9✔
212
    is_debug: bool
9✔
213
    extra_env: FrozenDict[str, str] = FrozenDict()
9✔
214
    prepend_argv: tuple[str, ...] = ()
9✔
215
    additional_pexes: tuple[Pex, ...] = ()
9✔
216

217

218
@dataclass(frozen=True)
9✔
219
class TestSetup:
9✔
220
    process: Process
9✔
221
    results_file_name: str | None
9✔
222

223
    # Prevent this class from being detected by pytest as a test class.
224
    __test__ = False
9✔
225

226

227
_TEST_PATTERN = re.compile(b"def\\s+test_")
9✔
228

229

230
def _count_pytest_tests(contents: DigestContents) -> int:
9✔
231
    return sum(len(_TEST_PATTERN.findall(file.content)) for file in contents)
1✔
232

233

234
async def validate_pytest_cov_included(_pytest: PyTest):
9✔
235
    if _pytest.requirements:
1✔
236
        # We'll only be using this subset of the lockfile.
237
        req_strings = (await get_req_strings(PexRequirements(_pytest.requirements))).req_strings
1✔
238
        requirements = {PipRequirement.parse(req_string) for req_string in req_strings}
1✔
239
    else:
240
        # We'll be using the entire lockfile.
241
        lockfile_metadata = await get_lockfile_metadata(_pytest)
1✔
242
        if not isinstance(lockfile_metadata, (PythonLockfileMetadataV2, PythonLockfileMetadataV3)):
1✔
243
            return
×
244
        requirements = lockfile_metadata.requirements
1✔
245
    if not any(canonicalize_project_name(req.name) == "pytest-cov" for req in requirements):
1✔
246
        raise ValueError(
1✔
247
            softwrap(
248
                f"""\
249
                You set `[test].use_coverage`, but the custom resolve
250
                `{_pytest.install_from_resolve}` used to install pytest is missing
251
                `pytest-cov`, which is needed to collect coverage data.
252

253
                See {doc_url("docs/python/goals/test#pytest-version-and-plugins")} for details
254
                on how to set up a custom resolve for use by pytest.
255
                """
256
            )
257
        )
258

259

260
@rule(level=LogLevel.DEBUG)
9✔
261
async def setup_pytest_for_target(
9✔
262
    request: TestSetupRequest,
263
    pytest: PyTest,
264
    test_subsystem: TestSubsystem,
265
    coverage_config: CoverageConfig,
266
    coverage_subsystem: CoverageSubsystem,
267
    test_extra_env: TestExtraEnv,
268
) -> TestSetup:
269
    addresses = tuple(field_set.address for field_set in request.field_sets)
×
270

271
    transitive_targets, plugin_setups = await concurrently(
×
272
        transitive_targets_get(TransitiveTargetsRequest(addresses), **implicitly()),
273
        run_all_setup_plugins(AllPytestPluginSetupsRequest(addresses), **implicitly()),
274
    )
275
    all_targets = transitive_targets.closure
×
276

277
    interpreter_constraints = request.metadata.interpreter_constraints
×
278

279
    requirements_pex_get = create_pex(**implicitly(RequirementsPexRequest(addresses)))
×
280
    pytest_pex_get = create_pex(
×
281
        pytest.to_pex_request(interpreter_constraints=interpreter_constraints)
282
    )
283

284
    # Ensure that the empty extra output dir exists.
285
    extra_output_directory_digest_get = create_digest(CreateDigest([Directory(_EXTRA_OUTPUT_DIR)]))
×
286

287
    prepared_sources_get = prepare_python_sources(
×
288
        PythonSourceFilesRequest(all_targets, include_files=True), **implicitly()
289
    )
290

291
    # Get the file names for the test_target so that we can specify to Pytest precisely which files
292
    # to test, rather than using auto-discovery.
293
    field_set_source_files_get = determine_source_files(
×
294
        SourceFilesRequest([field_set.source for field_set in request.field_sets])
295
    )
296

297
    field_set_extra_env_get = environment_vars_subset(
×
298
        EnvironmentVarsRequest(request.metadata.extra_env_vars), **implicitly()
299
    )
300

301
    (
×
302
        pytest_pex,
303
        requirements_pex,
304
        prepared_sources,
305
        field_set_source_files,
306
        field_set_extra_env,
307
        extra_output_directory_digest,
308
    ) = await concurrently(
309
        pytest_pex_get,
310
        requirements_pex_get,
311
        prepared_sources_get,
312
        field_set_source_files_get,
313
        field_set_extra_env_get,
314
        extra_output_directory_digest_get,
315
    )
316

317
    local_dists = await build_local_dists(
×
318
        LocalDistsPexRequest(
319
            addresses,
320
            interpreter_constraints=interpreter_constraints,
321
            sources=prepared_sources,
322
        )
323
    )
324

325
    pytest_runner_pex_get = create_venv_pex(
×
326
        **implicitly(
327
            PexRequest(
328
                output_filename="pytest_runner.pex",
329
                interpreter_constraints=interpreter_constraints,
330
                main=pytest.main,
331
                internal_only=True,
332
                pex_path=[pytest_pex, requirements_pex, local_dists.pex, *request.additional_pexes],
333
            )
334
        )
335
    )
336
    config_files_get = find_config_file(pytest.config_request(field_set_source_files.snapshot.dirs))
×
337
    pytest_runner_pex, config_files = await concurrently(pytest_runner_pex_get, config_files_get)
×
338

339
    # The coverage and pytest config may live in the same config file (e.g., setup.cfg, tox.ini
340
    # or pyproject.toml), and wee may have rewritten those files to augment the coverage config,
341
    # in which case we must ensure that the original and rewritten files don't collide.
342
    pytest_config_digest = config_files.snapshot.digest
×
343
    if coverage_config.path in config_files.snapshot.files:
×
344
        subset_paths = list(config_files.snapshot.files)
×
345
        # Remove the original file, and rely on the rewritten file, which contains all the
346
        # pytest-related config unchanged.
347
        subset_paths.remove(coverage_config.path)
×
348
        pytest_config_digest = await digest_subset_to_digest(
×
349
            DigestSubset(pytest_config_digest, PathGlobs(subset_paths))
350
        )
351

352
    input_digest = await merge_digests(
×
353
        MergeDigests(
354
            (
355
                coverage_config.digest,
356
                local_dists.remaining_sources.source_files.snapshot.digest,
357
                pytest_config_digest,
358
                extra_output_directory_digest,
359
                *(plugin_setup.digest for plugin_setup in plugin_setups),
360
            )
361
        )
362
    )
363

364
    # Don't forget to keep "Customize Pytest command line options per target" section in
365
    # docs/markdown/Python/python-goals/python-test-goal.md up to date when changing
366
    # which flags are added to `pytest_args`.
367
    pytest_args = [
×
368
        # Always include colors and strip them out for display below (if required), for better cache
369
        # hit rates
370
        "--color=yes"
371
    ]
372
    output_files = []
×
373

374
    results_file_name = None
×
375
    if not request.is_debug:
×
376
        results_file_prefix = request.field_sets[0].address.path_safe_spec
×
377
        if len(request.field_sets) > 1:
×
378
            results_file_prefix = (
×
379
                f"batch-of-{results_file_prefix}+{len(request.field_sets) - 1}-files"
380
            )
381
        results_file_name = f"{results_file_prefix}.xml"
×
382
        pytest_args.extend(
×
383
            (f"--junit-xml={results_file_name}", "-o", f"junit_family={pytest.junit_family}")
384
        )
385
        output_files.append(results_file_name)
×
386

387
    if test_subsystem.use_coverage and not request.is_debug:
×
388
        await validate_pytest_cov_included(pytest)
×
389
        output_files.append(".coverage")
×
390

391
        if coverage_subsystem.filter:
×
392
            cov_args = [f"--cov={morf}" for morf in coverage_subsystem.filter]
×
393
        else:
394
            # N.B.: Passing `--cov=` or `--cov=.` to communicate "record coverage for all sources"
395
            # fails in certain contexts as detailed in:
396
            #   https://github.com/pantsbuild/pants/issues/12390
397
            # Instead we focus coverage on just the directories containing python source files
398
            # materialized to the Process chroot.
399
            cov_args = [f"--cov={source_root}" for source_root in prepared_sources.source_roots]
×
400

401
        pytest_args.extend(
×
402
            (
403
                "--cov-report=",  # Turn off output.
404
                f"--cov-config={coverage_config.path}",
405
                *cov_args,
406
            )
407
        )
408

409
    extra_sys_path = OrderedSet(
×
410
        (
411
            *prepared_sources.source_roots,
412
            *(entry for plugin_setup in plugin_setups for entry in plugin_setup.extra_sys_path),
413
        )
414
    )
415
    extra_env = {
×
416
        "PEX_EXTRA_SYS_PATH": ":".join(extra_sys_path),
417
        **request.extra_env,
418
        **test_extra_env.env,
419
        # NOTE: field_set_extra_env intentionally after `test_extra_env` to allow overriding within
420
        # `python_tests`.
421
        **field_set_extra_env,
422
    }
423

424
    # Cache test runs only if they are successful, or not at all if `--test-force`.
NEW
425
    cache_scope = test_subsystem.default_process_cache_scope
×
426

427
    xdist_concurrency = 0
×
428
    if pytest.xdist_enabled and not request.is_debug:
×
429
        concurrency = request.metadata.xdist_concurrency
×
430
        if concurrency is None:
×
431
            contents = await get_digest_contents(field_set_source_files.snapshot.digest)
×
432
            concurrency = _count_pytest_tests(contents)
×
433
        xdist_concurrency = concurrency
×
434

435
    timeout_seconds: int | None = None
×
436
    for field_set in request.field_sets:
×
437
        timeout = field_set.timeout.calculate_from_global_options(test_subsystem, pytest)
×
438
        if timeout:
×
439
            if timeout_seconds:
×
440
                timeout_seconds += timeout
×
441
            else:
442
                timeout_seconds = timeout
×
443

444
    run_description = request.field_sets[0].address.spec
×
445
    if len(request.field_sets) > 1:
×
446
        run_description = (
×
447
            f"batch of {run_description} and {len(request.field_sets) - 1} other files"
448
        )
449
    process = await setup_venv_pex_process(
×
450
        VenvPexProcess(
451
            pytest_runner_pex,
452
            argv=(
453
                *request.prepend_argv,
454
                *pytest.args,
455
                *(("-c", pytest.config) if pytest.config else ()),
456
                *(("-n", "{pants_concurrency}") if xdist_concurrency else ()),
457
                # N.B.: Now that we're using command-line options instead of the PYTEST_ADDOPTS
458
                # environment variable, it's critical that `pytest_args` comes after `pytest.args`.
459
                *pytest_args,
460
                *field_set_source_files.files,
461
            ),
462
            extra_env=extra_env,
463
            input_digest=input_digest,
464
            output_directories=(_EXTRA_OUTPUT_DIR,),
465
            output_files=output_files,
466
            timeout_seconds=timeout_seconds,
467
            execution_slot_variable=pytest.execution_slot_var,
468
            concurrency_available=xdist_concurrency,
469
            description=f"Run Pytest for {run_description}",
470
            level=LogLevel.DEBUG,
471
            cache_scope=cache_scope,
472
        ),
473
        **implicitly(),
474
    )
475
    return TestSetup(process, results_file_name=results_file_name)
×
476

477

478
class PyTestRequest(TestRequest):
9✔
479
    tool_subsystem = PyTest  # type: ignore[assignment]
9✔
480
    field_set_type = PythonTestFieldSet
9✔
481
    partitioner_type = PartitionerType.CUSTOM
9✔
482
    supports_debug = True
9✔
483
    supports_debug_adapter = True
9✔
484

485

486
@rule(desc="Partition Pytest", level=LogLevel.DEBUG)
9✔
487
async def partition_python_tests(
9✔
488
    request: PyTestRequest.PartitionRequest[PythonTestFieldSet],
489
    python_setup: PythonSetup,
490
) -> Partitions[PythonTestFieldSet, TestMetadata]:
491
    partitions = []
×
492
    compatible_tests = defaultdict(list)
×
493

494
    for field_set in request.field_sets:
×
495
        metadata = TestMetadata(
×
496
            interpreter_constraints=InterpreterConstraints.create_from_field_sets(
497
                [field_set], python_setup
498
            ),
499
            extra_env_vars=field_set.extra_env_vars.sorted(),
500
            xdist_concurrency=field_set.xdist_concurrency.value,
501
            resolve=field_set.resolve.normalized_value(python_setup),
502
            environment=field_set.environment.value,
503
            compatability_tag=field_set.batch_compatibility_tag.value,
504
        )
505

506
        if not metadata.compatability_tag:
×
507
            # Tests without a compatibility tag are assumed to be incompatible with all others.
508
            partitions.append(Partition((field_set,), metadata))
×
509
        else:
510
            # Group tests by their common metadata.
511
            compatible_tests[metadata].append(field_set)
×
512

513
    for metadata, field_sets in compatible_tests.items():
×
514
        partitions.append(Partition(tuple(field_sets), metadata))
×
515

516
    return Partitions(partitions)
×
517

518

519
@rule(desc="Run Pytest", level=LogLevel.DEBUG)
9✔
520
async def run_python_tests(
9✔
521
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
522
    pytest: PyTest,
523
    test_subsystem: TestSubsystem,
524
    global_options: GlobalOptions,
525
) -> TestResult:
526
    setup = await setup_pytest_for_target(
×
527
        TestSetupRequest(batch.elements, batch.partition_metadata, is_debug=False), **implicitly()
528
    )
529

530
    results = await execute_process_with_retry(
×
531
        ProcessWithRetries(setup.process, test_subsystem.attempts_default)
532
    )
533
    last_result = results.last
×
534

535
    def warning_description() -> str:
×
536
        description = batch.elements[0].address.spec
×
537
        if len(batch.elements) > 1:
×
538
            description = (
×
539
                f"batch containing {description} and {len(batch.elements) - 1} other files"
540
            )
541
        if batch.partition_metadata.description:
×
542
            description = f"{description} ({batch.partition_metadata.description})"
×
543
        return description
×
544

545
    coverage_data = None
×
546
    if test_subsystem.use_coverage:
×
547
        coverage_snapshot = await digest_to_snapshot(
×
548
            **implicitly(DigestSubset(last_result.output_digest, PathGlobs([".coverage"])))
549
        )
550
        if coverage_snapshot.files == (".coverage",):
×
551
            coverage_data = PytestCoverageData(
×
552
                tuple(field_set.address for field_set in batch.elements), coverage_snapshot.digest
553
            )
554
        else:
555
            logger.warning(f"Failed to generate coverage data for {warning_description()}.")
×
556

557
    xml_results_snapshot = None
×
558
    if setup.results_file_name:
×
559
        xml_results_snapshot = await digest_to_snapshot(
×
560
            **implicitly(
561
                DigestSubset(last_result.output_digest, PathGlobs([setup.results_file_name]))
562
            )
563
        )
564
        if xml_results_snapshot.files != (setup.results_file_name,):
×
565
            logger.warning(f"Failed to generate JUnit XML data for {warning_description()}.")
×
566
    extra_output_snapshot = await digest_to_snapshot(
×
567
        **implicitly(
568
            DigestSubset(last_result.output_digest, PathGlobs([f"{_EXTRA_OUTPUT_DIR}/**"]))
569
        )
570
    )
571
    extra_output_snapshot = await digest_to_snapshot(
×
572
        **implicitly(RemovePrefix(extra_output_snapshot.digest, _EXTRA_OUTPUT_DIR))
573
    )
574

575
    if last_result.exit_code == 5 and pytest.allow_empty_test_collection:
×
576
        return TestResult.no_tests_found_in_batch(batch, test_subsystem.output)
×
577

578
    return TestResult.from_batched_fallible_process_result(
×
579
        results.results,
580
        batch=batch,
581
        output_setting=test_subsystem.output,
582
        coverage_data=coverage_data,
583
        xml_results=xml_results_snapshot,
584
        extra_output=extra_output_snapshot,
585
        output_simplifier=global_options.output_simplifier(),
586
    )
587

588

589
@rule(desc="Set up Pytest to run interactively", level=LogLevel.DEBUG)
9✔
590
async def debug_python_test(
9✔
591
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
592
) -> TestDebugRequest:
593
    setup = await setup_pytest_for_target(
×
594
        TestSetupRequest(batch.elements, batch.partition_metadata, is_debug=True), **implicitly()
595
    )
596
    return TestDebugRequest(
×
597
        InteractiveProcess.from_process(
598
            setup.process, forward_signals_to_process=False, restartable=True
599
        )
600
    )
601

602

603
@rule(desc="Set up debugpy to run an interactive Pytest session", level=LogLevel.DEBUG)
9✔
604
async def debugpy_python_test(
9✔
605
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
606
    debugpy: DebugPy,
607
    debug_adapter: DebugAdapterSubsystem,
608
    python_setup: PythonSetup,
609
) -> TestDebugAdapterRequest:
610
    debugpy_pex = await create_pex(
×
611
        debugpy.to_pex_request(
612
            interpreter_constraints=InterpreterConstraints.create_from_field_sets(
613
                batch.elements, python_setup
614
            )
615
        )
616
    )
617

618
    setup = await setup_pytest_for_target(
×
619
        TestSetupRequest(
620
            batch.elements,
621
            batch.partition_metadata,
622
            is_debug=True,
623
            prepend_argv=debugpy.get_args(debug_adapter),
624
            extra_env=FrozenDict(PEX_MODULE="debugpy"),
625
            additional_pexes=(debugpy_pex,),
626
        ),
627
        **implicitly(),
628
    )
629
    return TestDebugAdapterRequest(
×
630
        InteractiveProcess.from_process(
631
            setup.process, forward_signals_to_process=False, restartable=True
632
        )
633
    )
634

635

636
# -----------------------------------------------------------------------------------------
637
# `runtime_package_dependencies` plugin
638
# -----------------------------------------------------------------------------------------
639

640

641
@dataclass(frozen=True)
9✔
642
class RuntimePackagesPluginRequest(PytestPluginSetupRequest):
9✔
643
    @classmethod
9✔
644
    def is_applicable(cls, target: Target) -> bool:
9✔
645
        return bool(target.get(RuntimePackageDependenciesField).value)
×
646

647

648
@rule
9✔
649
async def setup_runtime_packages(request: RuntimePackagesPluginRequest) -> PytestPluginSetup:
9✔
650
    built_packages = await build_runtime_package_dependencies(
×
651
        BuildPackageDependenciesRequest(request.target.get(RuntimePackageDependenciesField))
652
    )
653
    digest = await merge_digests(MergeDigests(pkg.digest for pkg in built_packages))
×
654
    return PytestPluginSetup(digest)
×
655

656

657
def rules():
9✔
658
    return [
7✔
659
        *collect_rules(),
660
        *pytest.rules(),
661
        UnionRule(PytestPluginSetupRequest, RuntimePackagesPluginRequest),
662
        *PyTestRequest.rules(),
663
    ]
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc