• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 19015773527

02 Nov 2025 05:33PM UTC coverage: 17.872% (-62.4%) from 80.3%
19015773527

Pull #22816

github

web-flow
Merge a12d75757 into 6c024e162
Pull Request #22816: Update Pants internal Python to 3.14

4 of 5 new or added lines in 3 files covered. (80.0%)

28452 existing lines in 683 files now uncovered.

9831 of 55007 relevant lines covered (17.87%)

0.18 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/python/pants/backend/python/goals/pytest_runner.py
1
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

UNCOV
4
from __future__ import annotations
×
5

UNCOV
6
import logging
×
UNCOV
7
import re
×
UNCOV
8
from abc import ABC, abstractmethod
×
UNCOV
9
from collections import defaultdict
×
UNCOV
10
from dataclasses import dataclass
×
11

UNCOV
12
from packaging.utils import canonicalize_name as canonicalize_project_name
×
13

UNCOV
14
from pants.backend.python.goals.coverage_py import (
×
15
    CoverageConfig,
16
    CoverageSubsystem,
17
    PytestCoverageData,
18
)
UNCOV
19
from pants.backend.python.subsystems import pytest
×
UNCOV
20
from pants.backend.python.subsystems.debugpy import DebugPy
×
UNCOV
21
from pants.backend.python.subsystems.pytest import PyTest, PythonTestFieldSet
×
UNCOV
22
from pants.backend.python.subsystems.python_tool_base import get_lockfile_metadata
×
UNCOV
23
from pants.backend.python.subsystems.setup import PythonSetup
×
UNCOV
24
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
×
UNCOV
25
from pants.backend.python.util_rules.local_dists import LocalDistsPexRequest, build_local_dists
×
UNCOV
26
from pants.backend.python.util_rules.lockfile_metadata import (
×
27
    PythonLockfileMetadataV2,
28
    PythonLockfileMetadataV3,
29
)
UNCOV
30
from pants.backend.python.util_rules.pex import (
×
31
    Pex,
32
    PexRequest,
33
    VenvPexProcess,
34
    create_pex,
35
    create_venv_pex,
36
    get_req_strings,
37
    setup_venv_pex_process,
38
)
UNCOV
39
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
×
UNCOV
40
from pants.backend.python.util_rules.pex_requirements import PexRequirements
×
UNCOV
41
from pants.backend.python.util_rules.python_sources import (
×
42
    PythonSourceFilesRequest,
43
    prepare_python_sources,
44
)
UNCOV
45
from pants.core.goals.test import (
×
46
    BuildPackageDependenciesRequest,
47
    RuntimePackageDependenciesField,
48
    TestDebugAdapterRequest,
49
    TestDebugRequest,
50
    TestExtraEnv,
51
    TestRequest,
52
    TestResult,
53
    TestSubsystem,
54
    build_runtime_package_dependencies,
55
)
UNCOV
56
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
×
UNCOV
57
from pants.core.util_rules.config_files import find_config_file
×
UNCOV
58
from pants.core.util_rules.env_vars import environment_vars_subset
×
UNCOV
59
from pants.core.util_rules.partitions import Partition, PartitionerType, Partitions
×
UNCOV
60
from pants.core.util_rules.source_files import SourceFilesRequest, determine_source_files
×
UNCOV
61
from pants.engine.addresses import Address
×
UNCOV
62
from pants.engine.collection import Collection
×
UNCOV
63
from pants.engine.env_vars import EnvironmentVarsRequest
×
UNCOV
64
from pants.engine.environment import EnvironmentName
×
UNCOV
65
from pants.engine.fs import (
×
66
    EMPTY_DIGEST,
67
    CreateDigest,
68
    Digest,
69
    DigestContents,
70
    DigestSubset,
71
    Directory,
72
    MergeDigests,
73
    PathGlobs,
74
    RemovePrefix,
75
)
UNCOV
76
from pants.engine.internals.graph import resolve_target
×
UNCOV
77
from pants.engine.internals.graph import transitive_targets as transitive_targets_get
×
UNCOV
78
from pants.engine.intrinsics import (
×
79
    create_digest,
80
    digest_subset_to_digest,
81
    digest_to_snapshot,
82
    execute_process_with_retry,
83
    get_digest_contents,
84
    merge_digests,
85
)
UNCOV
86
from pants.engine.process import InteractiveProcess, Process, ProcessCacheScope, ProcessWithRetries
×
UNCOV
87
from pants.engine.rules import collect_rules, concurrently, implicitly, rule
×
UNCOV
88
from pants.engine.target import Target, TransitiveTargetsRequest, WrappedTargetRequest
×
UNCOV
89
from pants.engine.unions import UnionMembership, UnionRule, union
×
UNCOV
90
from pants.option.global_options import GlobalOptions
×
UNCOV
91
from pants.util.docutil import doc_url
×
UNCOV
92
from pants.util.frozendict import FrozenDict
×
UNCOV
93
from pants.util.logging import LogLevel
×
UNCOV
94
from pants.util.ordered_set import OrderedSet
×
UNCOV
95
from pants.util.pip_requirement import PipRequirement
×
UNCOV
96
from pants.util.strutil import softwrap
×
97

UNCOV
98
logger = logging.getLogger()
×
99

100

101
# -----------------------------------------------------------------------------------------
102
# Plugin hook
103
# -----------------------------------------------------------------------------------------
104

105

UNCOV
106
@dataclass(frozen=True)
×
UNCOV
107
class PytestPluginSetup:
×
108
    """The result of custom set up logic before Pytest runs.
109

110
    Please reach out it if you would like certain functionality, such as allowing your plugin to set
111
    environment variables.
112
    """
113

UNCOV
114
    digest: Digest = EMPTY_DIGEST
×
UNCOV
115
    extra_sys_path: tuple[str, ...] = ()
×
116

117

UNCOV
118
@union(in_scope_types=[EnvironmentName])
×
UNCOV
119
@dataclass(frozen=True)
×
UNCOV
120
class PytestPluginSetupRequest(ABC):
×
121
    """A request to set up the test environment before Pytest runs, e.g. to set up databases.
122

123
    To use, subclass PytestPluginSetupRequest, register the rule
124
    `UnionRule(PytestPluginSetupRequest, MyCustomPytestPluginSetupRequest)`, and add a rule that
125
    takes your subclass as a parameter and returns `PytestPluginSetup`.
126
    """
127

UNCOV
128
    target: Target
×
129

UNCOV
130
    @classmethod
×
UNCOV
131
    @abstractmethod
×
UNCOV
132
    def is_applicable(cls, target: Target) -> bool:
×
133
        """Whether the setup implementation should be used for this target or not."""
134

135

UNCOV
136
@rule(polymorphic=True)
×
UNCOV
137
async def get_pytest_plugin_setup(req: PytestPluginSetupRequest) -> PytestPluginSetup:
×
138
    raise NotImplementedError()
×
139

140

UNCOV
141
class AllPytestPluginSetups(Collection[PytestPluginSetup]):
×
UNCOV
142
    pass
×
143

144

145
# TODO: Why is this necessary? We should be able to use `PythonTestFieldSet` as the rule param.
UNCOV
146
@dataclass(frozen=True)
×
UNCOV
147
class AllPytestPluginSetupsRequest:
×
UNCOV
148
    addresses: tuple[Address, ...]
×
149

150

UNCOV
151
@rule
×
UNCOV
152
async def run_all_setup_plugins(
×
153
    request: AllPytestPluginSetupsRequest, union_membership: UnionMembership
154
) -> AllPytestPluginSetups:
155
    wrapped_tgts = await concurrently(
×
156
        resolve_target(
157
            WrappedTargetRequest(address, description_of_origin="<infallible>"), **implicitly()
158
        )
159
        for address in request.addresses
160
    )
161
    setup_requests = [
×
162
        request_type(wrapped_tgt.target)  # type: ignore[abstract]
163
        for request_type in union_membership.get(PytestPluginSetupRequest)
164
        for wrapped_tgt in wrapped_tgts
165
        if request_type.is_applicable(wrapped_tgt.target)
166
    ]
167
    setups = await concurrently(
×
168
        get_pytest_plugin_setup(**implicitly({request: PytestPluginSetupRequest}))
169
        for request in setup_requests
170
    )
171
    return AllPytestPluginSetups(setups)
×
172

173

174
# -----------------------------------------------------------------------------------------
175
# Core logic
176
# -----------------------------------------------------------------------------------------
177

178

179
# If a user wants extra pytest output (e.g., plugin output) to show up in dist/
180
# they must ensure that output goes under this directory. E.g.,
181
# ./pants test <target> -- --html=extra-output/report.html
UNCOV
182
_EXTRA_OUTPUT_DIR = "extra-output"
×
183

184

UNCOV
185
@dataclass(frozen=True)
×
UNCOV
186
class TestMetadata:
×
187
    """Parameters that must be constant for all test targets in a `pytest` batch."""
188

UNCOV
189
    interpreter_constraints: InterpreterConstraints
×
UNCOV
190
    extra_env_vars: tuple[str, ...]
×
UNCOV
191
    xdist_concurrency: int | None
×
UNCOV
192
    resolve: str
×
UNCOV
193
    environment: str
×
UNCOV
194
    compatability_tag: str | None = None
×
195

196
    # Prevent this class from being detected by pytest as a test class.
UNCOV
197
    __test__ = False
×
198

UNCOV
199
    @property
×
UNCOV
200
    def description(self) -> str | None:
×
201
        if not self.compatability_tag:
×
202
            return None
×
203

204
        # TODO: Put more info here.
205
        return self.compatability_tag
×
206

207

UNCOV
208
@dataclass(frozen=True)
×
UNCOV
209
class TestSetupRequest:
×
UNCOV
210
    field_sets: tuple[PythonTestFieldSet, ...]
×
UNCOV
211
    metadata: TestMetadata
×
UNCOV
212
    is_debug: bool
×
UNCOV
213
    extra_env: FrozenDict[str, str] = FrozenDict()
×
UNCOV
214
    prepend_argv: tuple[str, ...] = ()
×
UNCOV
215
    additional_pexes: tuple[Pex, ...] = ()
×
216

217

UNCOV
218
@dataclass(frozen=True)
×
UNCOV
219
class TestSetup:
×
UNCOV
220
    process: Process
×
UNCOV
221
    results_file_name: str | None
×
222

223
    # Prevent this class from being detected by pytest as a test class.
UNCOV
224
    __test__ = False
×
225

226

UNCOV
227
_TEST_PATTERN = re.compile(b"def\\s+test_")
×
228

229

UNCOV
230
def _count_pytest_tests(contents: DigestContents) -> int:
×
UNCOV
231
    return sum(len(_TEST_PATTERN.findall(file.content)) for file in contents)
×
232

233

UNCOV
234
async def validate_pytest_cov_included(_pytest: PyTest):
×
UNCOV
235
    if _pytest.requirements:
×
236
        # We'll only be using this subset of the lockfile.
UNCOV
237
        req_strings = (await get_req_strings(PexRequirements(_pytest.requirements))).req_strings
×
UNCOV
238
        requirements = {PipRequirement.parse(req_string) for req_string in req_strings}
×
239
    else:
240
        # We'll be using the entire lockfile.
UNCOV
241
        lockfile_metadata = await get_lockfile_metadata(_pytest)
×
UNCOV
242
        if not isinstance(lockfile_metadata, (PythonLockfileMetadataV2, PythonLockfileMetadataV3)):
×
243
            return
×
UNCOV
244
        requirements = lockfile_metadata.requirements
×
UNCOV
245
    if not any(canonicalize_project_name(req.name) == "pytest-cov" for req in requirements):
×
UNCOV
246
        raise ValueError(
×
247
            softwrap(
248
                f"""\
249
                You set `[test].use_coverage`, but the custom resolve
250
                `{_pytest.install_from_resolve}` used to install pytest is missing
251
                `pytest-cov`, which is needed to collect coverage data.
252

253
                See {doc_url("docs/python/goals/test#pytest-version-and-plugins")} for details
254
                on how to set up a custom resolve for use by pytest.
255
                """
256
            )
257
        )
258

259

UNCOV
260
@rule(level=LogLevel.DEBUG)
×
UNCOV
261
async def setup_pytest_for_target(
×
262
    request: TestSetupRequest,
263
    pytest: PyTest,
264
    test_subsystem: TestSubsystem,
265
    coverage_config: CoverageConfig,
266
    coverage_subsystem: CoverageSubsystem,
267
    test_extra_env: TestExtraEnv,
268
) -> TestSetup:
269
    addresses = tuple(field_set.address for field_set in request.field_sets)
×
270

271
    transitive_targets, plugin_setups = await concurrently(
×
272
        transitive_targets_get(TransitiveTargetsRequest(addresses), **implicitly()),
273
        run_all_setup_plugins(AllPytestPluginSetupsRequest(addresses), **implicitly()),
274
    )
275
    all_targets = transitive_targets.closure
×
276

277
    interpreter_constraints = request.metadata.interpreter_constraints
×
278

279
    requirements_pex_get = create_pex(**implicitly(RequirementsPexRequest(addresses)))
×
280
    pytest_pex_get = create_pex(
×
281
        pytest.to_pex_request(interpreter_constraints=interpreter_constraints)
282
    )
283

284
    # Ensure that the empty extra output dir exists.
285
    extra_output_directory_digest_get = create_digest(CreateDigest([Directory(_EXTRA_OUTPUT_DIR)]))
×
286

287
    prepared_sources_get = prepare_python_sources(
×
288
        PythonSourceFilesRequest(all_targets, include_files=True), **implicitly()
289
    )
290

291
    # Get the file names for the test_target so that we can specify to Pytest precisely which files
292
    # to test, rather than using auto-discovery.
293
    field_set_source_files_get = determine_source_files(
×
294
        SourceFilesRequest([field_set.source for field_set in request.field_sets])
295
    )
296

297
    field_set_extra_env_get = environment_vars_subset(
×
298
        EnvironmentVarsRequest(request.metadata.extra_env_vars), **implicitly()
299
    )
300

301
    (
×
302
        pytest_pex,
303
        requirements_pex,
304
        prepared_sources,
305
        field_set_source_files,
306
        field_set_extra_env,
307
        extra_output_directory_digest,
308
    ) = await concurrently(
309
        pytest_pex_get,
310
        requirements_pex_get,
311
        prepared_sources_get,
312
        field_set_source_files_get,
313
        field_set_extra_env_get,
314
        extra_output_directory_digest_get,
315
    )
316

317
    local_dists = await build_local_dists(
×
318
        LocalDistsPexRequest(
319
            addresses,
320
            interpreter_constraints=interpreter_constraints,
321
            sources=prepared_sources,
322
        )
323
    )
324

325
    pytest_runner_pex_get = create_venv_pex(
×
326
        **implicitly(
327
            PexRequest(
328
                output_filename="pytest_runner.pex",
329
                interpreter_constraints=interpreter_constraints,
330
                main=pytest.main,
331
                internal_only=True,
332
                pex_path=[pytest_pex, requirements_pex, local_dists.pex, *request.additional_pexes],
333
            )
334
        )
335
    )
336
    config_files_get = find_config_file(pytest.config_request(field_set_source_files.snapshot.dirs))
×
337
    pytest_runner_pex, config_files = await concurrently(pytest_runner_pex_get, config_files_get)
×
338

339
    # The coverage and pytest config may live in the same config file (e.g., setup.cfg, tox.ini
340
    # or pyproject.toml), and wee may have rewritten those files to augment the coverage config,
341
    # in which case we must ensure that the original and rewritten files don't collide.
342
    pytest_config_digest = config_files.snapshot.digest
×
343
    if coverage_config.path in config_files.snapshot.files:
×
344
        subset_paths = list(config_files.snapshot.files)
×
345
        # Remove the original file, and rely on the rewritten file, which contains all the
346
        # pytest-related config unchanged.
347
        subset_paths.remove(coverage_config.path)
×
348
        pytest_config_digest = await digest_subset_to_digest(
×
349
            DigestSubset(pytest_config_digest, PathGlobs(subset_paths))
350
        )
351

352
    input_digest = await merge_digests(
×
353
        MergeDigests(
354
            (
355
                coverage_config.digest,
356
                local_dists.remaining_sources.source_files.snapshot.digest,
357
                pytest_config_digest,
358
                extra_output_directory_digest,
359
                *(plugin_setup.digest for plugin_setup in plugin_setups),
360
            )
361
        )
362
    )
363

364
    # Don't forget to keep "Customize Pytest command line options per target" section in
365
    # docs/markdown/Python/python-goals/python-test-goal.md up to date when changing
366
    # which flags are added to `pytest_args`.
367
    pytest_args = [
×
368
        # Always include colors and strip them out for display below (if required), for better cache
369
        # hit rates
370
        "--color=yes"
371
    ]
372
    output_files = []
×
373

374
    results_file_name = None
×
375
    if not request.is_debug:
×
376
        results_file_prefix = request.field_sets[0].address.path_safe_spec
×
377
        if len(request.field_sets) > 1:
×
378
            results_file_prefix = (
×
379
                f"batch-of-{results_file_prefix}+{len(request.field_sets) - 1}-files"
380
            )
381
        results_file_name = f"{results_file_prefix}.xml"
×
382
        pytest_args.extend(
×
383
            (f"--junit-xml={results_file_name}", "-o", f"junit_family={pytest.junit_family}")
384
        )
385
        output_files.append(results_file_name)
×
386

387
    if test_subsystem.use_coverage and not request.is_debug:
×
388
        await validate_pytest_cov_included(pytest)
×
389
        output_files.append(".coverage")
×
390

391
        if coverage_subsystem.filter:
×
392
            cov_args = [f"--cov={morf}" for morf in coverage_subsystem.filter]
×
393
        else:
394
            # N.B.: Passing `--cov=` or `--cov=.` to communicate "record coverage for all sources"
395
            # fails in certain contexts as detailed in:
396
            #   https://github.com/pantsbuild/pants/issues/12390
397
            # Instead we focus coverage on just the directories containing python source files
398
            # materialized to the Process chroot.
399
            cov_args = [f"--cov={source_root}" for source_root in prepared_sources.source_roots]
×
400

401
        pytest_args.extend(
×
402
            (
403
                "--cov-report=",  # Turn off output.
404
                f"--cov-config={coverage_config.path}",
405
                *cov_args,
406
            )
407
        )
408

409
    extra_sys_path = OrderedSet(
×
410
        (
411
            *prepared_sources.source_roots,
412
            *(entry for plugin_setup in plugin_setups for entry in plugin_setup.extra_sys_path),
413
        )
414
    )
415
    extra_env = {
×
416
        "PEX_EXTRA_SYS_PATH": ":".join(extra_sys_path),
417
        **request.extra_env,
418
        **test_extra_env.env,
419
        # NOTE: field_set_extra_env intentionally after `test_extra_env` to allow overriding within
420
        # `python_tests`.
421
        **field_set_extra_env,
422
    }
423

424
    # Cache test runs only if they are successful, or not at all if `--test-force`.
425
    cache_scope = (
×
426
        ProcessCacheScope.PER_SESSION if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
427
    )
428

429
    xdist_concurrency = 0
×
430
    if pytest.xdist_enabled and not request.is_debug:
×
431
        concurrency = request.metadata.xdist_concurrency
×
432
        if concurrency is None:
×
433
            contents = await get_digest_contents(field_set_source_files.snapshot.digest)
×
434
            concurrency = _count_pytest_tests(contents)
×
435
        xdist_concurrency = concurrency
×
436

437
    timeout_seconds: int | None = None
×
438
    for field_set in request.field_sets:
×
439
        timeout = field_set.timeout.calculate_from_global_options(test_subsystem, pytest)
×
440
        if timeout:
×
441
            if timeout_seconds:
×
442
                timeout_seconds += timeout
×
443
            else:
444
                timeout_seconds = timeout
×
445

446
    run_description = request.field_sets[0].address.spec
×
447
    if len(request.field_sets) > 1:
×
448
        run_description = (
×
449
            f"batch of {run_description} and {len(request.field_sets) - 1} other files"
450
        )
451
    process = await setup_venv_pex_process(
×
452
        VenvPexProcess(
453
            pytest_runner_pex,
454
            argv=(
455
                *request.prepend_argv,
456
                *pytest.args,
457
                *(("-c", pytest.config) if pytest.config else ()),
458
                *(("-n", "{pants_concurrency}") if xdist_concurrency else ()),
459
                # N.B.: Now that we're using command-line options instead of the PYTEST_ADDOPTS
460
                # environment variable, it's critical that `pytest_args` comes after `pytest.args`.
461
                *pytest_args,
462
                *field_set_source_files.files,
463
            ),
464
            extra_env=extra_env,
465
            input_digest=input_digest,
466
            output_directories=(_EXTRA_OUTPUT_DIR,),
467
            output_files=output_files,
468
            timeout_seconds=timeout_seconds,
469
            execution_slot_variable=pytest.execution_slot_var,
470
            concurrency_available=xdist_concurrency,
471
            description=f"Run Pytest for {run_description}",
472
            level=LogLevel.DEBUG,
473
            cache_scope=cache_scope,
474
        ),
475
        **implicitly(),
476
    )
477
    return TestSetup(process, results_file_name=results_file_name)
×
478

479

UNCOV
480
class PyTestRequest(TestRequest):
×
UNCOV
481
    tool_subsystem = PyTest  # type: ignore[assignment]
×
UNCOV
482
    field_set_type = PythonTestFieldSet
×
UNCOV
483
    partitioner_type = PartitionerType.CUSTOM
×
UNCOV
484
    supports_debug = True
×
UNCOV
485
    supports_debug_adapter = True
×
486

487

UNCOV
488
@rule(desc="Partition Pytest", level=LogLevel.DEBUG)
×
UNCOV
489
async def partition_python_tests(
×
490
    request: PyTestRequest.PartitionRequest[PythonTestFieldSet],
491
    python_setup: PythonSetup,
492
) -> Partitions[PythonTestFieldSet, TestMetadata]:
493
    partitions = []
×
494
    compatible_tests = defaultdict(list)
×
495

496
    for field_set in request.field_sets:
×
497
        metadata = TestMetadata(
×
498
            interpreter_constraints=InterpreterConstraints.create_from_field_sets(
499
                [field_set], python_setup
500
            ),
501
            extra_env_vars=field_set.extra_env_vars.sorted(),
502
            xdist_concurrency=field_set.xdist_concurrency.value,
503
            resolve=field_set.resolve.normalized_value(python_setup),
504
            environment=field_set.environment.value,
505
            compatability_tag=field_set.batch_compatibility_tag.value,
506
        )
507

508
        if not metadata.compatability_tag:
×
509
            # Tests without a compatibility tag are assumed to be incompatible with all others.
510
            partitions.append(Partition((field_set,), metadata))
×
511
        else:
512
            # Group tests by their common metadata.
513
            compatible_tests[metadata].append(field_set)
×
514

515
    for metadata, field_sets in compatible_tests.items():
×
516
        partitions.append(Partition(tuple(field_sets), metadata))
×
517

518
    return Partitions(partitions)
×
519

520

UNCOV
521
@rule(desc="Run Pytest", level=LogLevel.DEBUG)
×
UNCOV
522
async def run_python_tests(
×
523
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
524
    test_subsystem: TestSubsystem,
525
    global_options: GlobalOptions,
526
) -> TestResult:
527
    setup = await setup_pytest_for_target(
×
528
        TestSetupRequest(batch.elements, batch.partition_metadata, is_debug=False), **implicitly()
529
    )
530

531
    results = await execute_process_with_retry(
×
532
        ProcessWithRetries(setup.process, test_subsystem.attempts_default)
533
    )
534
    last_result = results.last
×
535

536
    def warning_description() -> str:
×
537
        description = batch.elements[0].address.spec
×
538
        if len(batch.elements) > 1:
×
539
            description = (
×
540
                f"batch containing {description} and {len(batch.elements) - 1} other files"
541
            )
542
        if batch.partition_metadata.description:
×
543
            description = f"{description} ({batch.partition_metadata.description})"
×
544
        return description
×
545

546
    coverage_data = None
×
547
    if test_subsystem.use_coverage:
×
548
        coverage_snapshot = await digest_to_snapshot(
×
549
            **implicitly(DigestSubset(last_result.output_digest, PathGlobs([".coverage"])))
550
        )
551
        if coverage_snapshot.files == (".coverage",):
×
552
            coverage_data = PytestCoverageData(
×
553
                tuple(field_set.address for field_set in batch.elements), coverage_snapshot.digest
554
            )
555
        else:
556
            logger.warning(f"Failed to generate coverage data for {warning_description()}.")
×
557

558
    xml_results_snapshot = None
×
559
    if setup.results_file_name:
×
560
        xml_results_snapshot = await digest_to_snapshot(
×
561
            **implicitly(
562
                DigestSubset(last_result.output_digest, PathGlobs([setup.results_file_name]))
563
            )
564
        )
565
        if xml_results_snapshot.files != (setup.results_file_name,):
×
566
            logger.warning(f"Failed to generate JUnit XML data for {warning_description()}.")
×
567
    extra_output_snapshot = await digest_to_snapshot(
×
568
        **implicitly(
569
            DigestSubset(last_result.output_digest, PathGlobs([f"{_EXTRA_OUTPUT_DIR}/**"]))
570
        )
571
    )
572
    extra_output_snapshot = await digest_to_snapshot(
×
573
        **implicitly(RemovePrefix(extra_output_snapshot.digest, _EXTRA_OUTPUT_DIR))
574
    )
575

576
    return TestResult.from_batched_fallible_process_result(
×
577
        results.results,
578
        batch=batch,
579
        output_setting=test_subsystem.output,
580
        coverage_data=coverage_data,
581
        xml_results=xml_results_snapshot,
582
        extra_output=extra_output_snapshot,
583
        output_simplifier=global_options.output_simplifier(),
584
    )
585

586

UNCOV
587
@rule(desc="Set up Pytest to run interactively", level=LogLevel.DEBUG)
×
UNCOV
588
async def debug_python_test(
×
589
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
590
) -> TestDebugRequest:
591
    setup = await setup_pytest_for_target(
×
592
        TestSetupRequest(batch.elements, batch.partition_metadata, is_debug=True), **implicitly()
593
    )
594
    return TestDebugRequest(
×
595
        InteractiveProcess.from_process(
596
            setup.process, forward_signals_to_process=False, restartable=True
597
        )
598
    )
599

600

UNCOV
601
@rule(desc="Set up debugpy to run an interactive Pytest session", level=LogLevel.DEBUG)
×
UNCOV
602
async def debugpy_python_test(
×
603
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
604
    debugpy: DebugPy,
605
    debug_adapter: DebugAdapterSubsystem,
606
    python_setup: PythonSetup,
607
) -> TestDebugAdapterRequest:
608
    debugpy_pex = await create_pex(
×
609
        debugpy.to_pex_request(
610
            interpreter_constraints=InterpreterConstraints.create_from_field_sets(
611
                batch.elements, python_setup
612
            )
613
        )
614
    )
615

616
    setup = await setup_pytest_for_target(
×
617
        TestSetupRequest(
618
            batch.elements,
619
            batch.partition_metadata,
620
            is_debug=True,
621
            prepend_argv=debugpy.get_args(debug_adapter),
622
            extra_env=FrozenDict(PEX_MODULE="debugpy"),
623
            additional_pexes=(debugpy_pex,),
624
        ),
625
        **implicitly(),
626
    )
627
    return TestDebugAdapterRequest(
×
628
        InteractiveProcess.from_process(
629
            setup.process, forward_signals_to_process=False, restartable=True
630
        )
631
    )
632

633

634
# -----------------------------------------------------------------------------------------
635
# `runtime_package_dependencies` plugin
636
# -----------------------------------------------------------------------------------------
637

638

UNCOV
639
@dataclass(frozen=True)
×
UNCOV
640
class RuntimePackagesPluginRequest(PytestPluginSetupRequest):
×
UNCOV
641
    @classmethod
×
UNCOV
642
    def is_applicable(cls, target: Target) -> bool:
×
643
        return bool(target.get(RuntimePackageDependenciesField).value)
×
644

645

UNCOV
646
@rule
×
UNCOV
647
async def setup_runtime_packages(request: RuntimePackagesPluginRequest) -> PytestPluginSetup:
×
648
    built_packages = await build_runtime_package_dependencies(
×
649
        BuildPackageDependenciesRequest(request.target.get(RuntimePackageDependenciesField))
650
    )
651
    digest = await merge_digests(MergeDigests(pkg.digest for pkg in built_packages))
×
652
    return PytestPluginSetup(digest)
×
653

654

UNCOV
655
def rules():
×
UNCOV
656
    return [
×
657
        *collect_rules(),
658
        *pytest.rules(),
659
        UnionRule(PytestPluginSetupRequest, RuntimePackagesPluginRequest),
660
        *PyTestRequest.rules(),
661
    ]
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc