• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 20438429929

22 Dec 2025 04:55PM UTC coverage: 80.287% (+0.003%) from 80.284%
20438429929

Pull #22934

github

web-flow
Merge b49c09e21 into 06f105be8
Pull Request #22934: feat(go): add multi-module support to golangci-lint plugin and upgrade to v2

37 of 62 new or added lines in 3 files covered. (59.68%)

183 existing lines in 9 files now uncovered.

78528 of 97809 relevant lines covered (80.29%)

3.36 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

52.7
/src/python/pants/backend/python/goals/pytest_runner.py
1
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
9✔
5

6
import logging
9✔
7
import re
9✔
8
from abc import ABC, abstractmethod
9✔
9
from collections import defaultdict
9✔
10
from dataclasses import dataclass
9✔
11

12
from packaging.utils import canonicalize_name as canonicalize_project_name
9✔
13

14
from pants.backend.python.goals.coverage_py import (
9✔
15
    CoverageConfig,
16
    CoverageSubsystem,
17
    PytestCoverageData,
18
)
19
from pants.backend.python.subsystems import pytest
9✔
20
from pants.backend.python.subsystems.debugpy import DebugPy
9✔
21
from pants.backend.python.subsystems.pytest import PyTest, PythonTestFieldSet
9✔
22
from pants.backend.python.subsystems.python_tool_base import get_lockfile_metadata
9✔
23
from pants.backend.python.subsystems.setup import PythonSetup
9✔
24
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
9✔
25
from pants.backend.python.util_rules.local_dists import LocalDistsPexRequest, build_local_dists
9✔
26
from pants.backend.python.util_rules.lockfile_metadata import (
9✔
27
    PythonLockfileMetadataV2,
28
    PythonLockfileMetadataV3,
29
)
30
from pants.backend.python.util_rules.pex import (
9✔
31
    Pex,
32
    PexRequest,
33
    VenvPexProcess,
34
    create_pex,
35
    create_venv_pex,
36
    get_req_strings,
37
    setup_venv_pex_process,
38
)
39
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
9✔
40
from pants.backend.python.util_rules.pex_requirements import PexRequirements
9✔
41
from pants.backend.python.util_rules.python_sources import (
9✔
42
    PythonSourceFilesRequest,
43
    prepare_python_sources,
44
)
45
from pants.core.goals.test import (
9✔
46
    BuildPackageDependenciesRequest,
47
    RuntimePackageDependenciesField,
48
    TestDebugAdapterRequest,
49
    TestDebugRequest,
50
    TestExtraEnv,
51
    TestRequest,
52
    TestResult,
53
    TestSubsystem,
54
    build_runtime_package_dependencies,
55
)
56
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
9✔
57
from pants.core.util_rules.config_files import find_config_file
9✔
58
from pants.core.util_rules.env_vars import environment_vars_subset
9✔
59
from pants.core.util_rules.partitions import Partition, PartitionerType, Partitions
9✔
60
from pants.core.util_rules.source_files import SourceFilesRequest, determine_source_files
9✔
61
from pants.engine.addresses import Address
9✔
62
from pants.engine.collection import Collection
9✔
63
from pants.engine.env_vars import EnvironmentVarsRequest
9✔
64
from pants.engine.environment import EnvironmentName
9✔
65
from pants.engine.fs import (
9✔
66
    EMPTY_DIGEST,
67
    CreateDigest,
68
    Digest,
69
    DigestContents,
70
    DigestSubset,
71
    Directory,
72
    MergeDigests,
73
    PathGlobs,
74
    RemovePrefix,
75
)
76
from pants.engine.internals.graph import resolve_target
9✔
77
from pants.engine.internals.graph import transitive_targets as transitive_targets_get
9✔
78
from pants.engine.intrinsics import (
9✔
79
    create_digest,
80
    digest_subset_to_digest,
81
    digest_to_snapshot,
82
    execute_process_with_retry,
83
    get_digest_contents,
84
    merge_digests,
85
)
86
from pants.engine.process import InteractiveProcess, Process, ProcessCacheScope, ProcessWithRetries
9✔
87
from pants.engine.rules import collect_rules, concurrently, implicitly, rule
9✔
88
from pants.engine.target import Target, TransitiveTargetsRequest, WrappedTargetRequest
9✔
89
from pants.engine.unions import UnionMembership, UnionRule, union
9✔
90
from pants.option.global_options import GlobalOptions
9✔
91
from pants.util.docutil import doc_url
9✔
92
from pants.util.frozendict import FrozenDict
9✔
93
from pants.util.logging import LogLevel
9✔
94
from pants.util.ordered_set import OrderedSet
9✔
95
from pants.util.pip_requirement import PipRequirement
9✔
96
from pants.util.strutil import softwrap
9✔
97

98
logger = logging.getLogger()
9✔
99

100

101
# -----------------------------------------------------------------------------------------
102
# Plugin hook
103
# -----------------------------------------------------------------------------------------
104

105

106
@dataclass(frozen=True)
9✔
107
class PytestPluginSetup:
9✔
108
    """The result of custom set up logic before Pytest runs.
109

110
    Please reach out it if you would like certain functionality, such as allowing your plugin to set
111
    environment variables.
112
    """
113

114
    digest: Digest = EMPTY_DIGEST
9✔
115
    extra_sys_path: tuple[str, ...] = ()
9✔
116

117

118
@union(in_scope_types=[EnvironmentName])
9✔
119
@dataclass(frozen=True)
9✔
120
class PytestPluginSetupRequest(ABC):
9✔
121
    """A request to set up the test environment before Pytest runs, e.g. to set up databases.
122

123
    To use, subclass PytestPluginSetupRequest, register the rule
124
    `UnionRule(PytestPluginSetupRequest, MyCustomPytestPluginSetupRequest)`, and add a rule that
125
    takes your subclass as a parameter and returns `PytestPluginSetup`.
126
    """
127

128
    target: Target
9✔
129

130
    @classmethod
9✔
131
    @abstractmethod
9✔
132
    def is_applicable(cls, target: Target) -> bool:
9✔
133
        """Whether the setup implementation should be used for this target or not."""
134

135

136
@rule(polymorphic=True)
9✔
137
async def get_pytest_plugin_setup(req: PytestPluginSetupRequest) -> PytestPluginSetup:
9✔
138
    raise NotImplementedError()
×
139

140

141
class AllPytestPluginSetups(Collection[PytestPluginSetup]):
9✔
142
    pass
9✔
143

144

145
# TODO: Why is this necessary? We should be able to use `PythonTestFieldSet` as the rule param.
146
@dataclass(frozen=True)
9✔
147
class AllPytestPluginSetupsRequest:
9✔
148
    addresses: tuple[Address, ...]
9✔
149

150

151
@rule
9✔
152
async def run_all_setup_plugins(
9✔
153
    request: AllPytestPluginSetupsRequest, union_membership: UnionMembership
154
) -> AllPytestPluginSetups:
155
    wrapped_tgts = await concurrently(
×
156
        resolve_target(
157
            WrappedTargetRequest(address, description_of_origin="<infallible>"), **implicitly()
158
        )
159
        for address in request.addresses
160
    )
161
    setup_requests = [
×
162
        request_type(wrapped_tgt.target)  # type: ignore[abstract]
163
        for request_type in union_membership.get(PytestPluginSetupRequest)
164
        for wrapped_tgt in wrapped_tgts
165
        if request_type.is_applicable(wrapped_tgt.target)
166
    ]
167
    setups = await concurrently(
×
168
        get_pytest_plugin_setup(**implicitly({request: PytestPluginSetupRequest}))
169
        for request in setup_requests
170
    )
171
    return AllPytestPluginSetups(setups)
×
172

173

174
# -----------------------------------------------------------------------------------------
175
# Core logic
176
# -----------------------------------------------------------------------------------------
177

178

179
# If a user wants extra pytest output (e.g., plugin output) to show up in dist/
180
# they must ensure that output goes under this directory. E.g.,
181
# ./pants test <target> -- --html=extra-output/report.html
182
_EXTRA_OUTPUT_DIR = "extra-output"
9✔
183

184

185
@dataclass(frozen=True)
9✔
186
class TestMetadata:
9✔
187
    """Parameters that must be constant for all test targets in a `pytest` batch."""
188

189
    interpreter_constraints: InterpreterConstraints
9✔
190
    extra_env_vars: tuple[str, ...]
9✔
191
    xdist_concurrency: int | None
9✔
192
    resolve: str
9✔
193
    environment: str
9✔
194
    compatability_tag: str | None = None
9✔
195

196
    # Prevent this class from being detected by pytest as a test class.
197
    __test__ = False
9✔
198

199
    @property
9✔
200
    def description(self) -> str | None:
9✔
201
        if not self.compatability_tag:
×
202
            return None
×
203

204
        # TODO: Put more info here.
205
        return self.compatability_tag
×
206

207

208
@dataclass(frozen=True)
9✔
209
class TestSetupRequest:
9✔
210
    field_sets: tuple[PythonTestFieldSet, ...]
9✔
211
    metadata: TestMetadata
9✔
212
    is_debug: bool
9✔
213
    extra_env: FrozenDict[str, str] = FrozenDict()
9✔
214
    prepend_argv: tuple[str, ...] = ()
9✔
215
    additional_pexes: tuple[Pex, ...] = ()
9✔
216

217

218
@dataclass(frozen=True)
9✔
219
class TestSetup:
9✔
220
    process: Process
9✔
221
    results_file_name: str | None
9✔
222

223
    # Prevent this class from being detected by pytest as a test class.
224
    __test__ = False
9✔
225

226

227
_TEST_PATTERN = re.compile(b"def\\s+test_")
9✔
228

229

230
def _count_pytest_tests(contents: DigestContents) -> int:
9✔
231
    return sum(len(_TEST_PATTERN.findall(file.content)) for file in contents)
1✔
232

233

234
async def validate_pytest_cov_included(_pytest: PyTest):
9✔
235
    if _pytest.requirements:
1✔
236
        # We'll only be using this subset of the lockfile.
237
        req_strings = (await get_req_strings(PexRequirements(_pytest.requirements))).req_strings
1✔
238
        requirements = {PipRequirement.parse(req_string) for req_string in req_strings}
1✔
239
    else:
240
        # We'll be using the entire lockfile.
241
        lockfile_metadata = await get_lockfile_metadata(_pytest)
1✔
242
        if not isinstance(lockfile_metadata, (PythonLockfileMetadataV2, PythonLockfileMetadataV3)):
1✔
243
            return
×
244
        requirements = lockfile_metadata.requirements
1✔
245
    if not any(canonicalize_project_name(req.name) == "pytest-cov" for req in requirements):
1✔
246
        raise ValueError(
1✔
247
            softwrap(
248
                f"""\
249
                You set `[test].use_coverage`, but the custom resolve
250
                `{_pytest.install_from_resolve}` used to install pytest is missing
251
                `pytest-cov`, which is needed to collect coverage data.
252

253
                See {doc_url("docs/python/goals/test#pytest-version-and-plugins")} for details
254
                on how to set up a custom resolve for use by pytest.
255
                """
256
            )
257
        )
258

259

260
@rule(level=LogLevel.DEBUG)
9✔
261
async def setup_pytest_for_target(
9✔
262
    request: TestSetupRequest,
263
    pytest: PyTest,
264
    test_subsystem: TestSubsystem,
265
    coverage_config: CoverageConfig,
266
    coverage_subsystem: CoverageSubsystem,
267
    test_extra_env: TestExtraEnv,
268
) -> TestSetup:
269
    addresses = tuple(field_set.address for field_set in request.field_sets)
×
270

271
    transitive_targets, plugin_setups = await concurrently(
×
272
        transitive_targets_get(TransitiveTargetsRequest(addresses), **implicitly()),
273
        run_all_setup_plugins(AllPytestPluginSetupsRequest(addresses), **implicitly()),
274
    )
275
    all_targets = transitive_targets.closure
×
276

277
    interpreter_constraints = request.metadata.interpreter_constraints
×
278

279
    requirements_pex_get = create_pex(**implicitly(RequirementsPexRequest(addresses)))
×
280
    pytest_pex_get = create_pex(
×
281
        pytest.to_pex_request(interpreter_constraints=interpreter_constraints)
282
    )
283

284
    # Ensure that the empty extra output dir exists.
285
    extra_output_directory_digest_get = create_digest(CreateDigest([Directory(_EXTRA_OUTPUT_DIR)]))
×
286

287
    prepared_sources_get = prepare_python_sources(
×
288
        PythonSourceFilesRequest(all_targets, include_files=True), **implicitly()
289
    )
290

291
    # Get the file names for the test_target so that we can specify to Pytest precisely which files
292
    # to test, rather than using auto-discovery.
293
    field_set_source_files_get = determine_source_files(
×
294
        SourceFilesRequest([field_set.source for field_set in request.field_sets])
295
    )
296

297
    field_set_extra_env_get = environment_vars_subset(
×
298
        EnvironmentVarsRequest(request.metadata.extra_env_vars), **implicitly()
299
    )
300

301
    (
×
302
        pytest_pex,
303
        requirements_pex,
304
        prepared_sources,
305
        field_set_source_files,
306
        field_set_extra_env,
307
        extra_output_directory_digest,
308
    ) = await concurrently(
309
        pytest_pex_get,
310
        requirements_pex_get,
311
        prepared_sources_get,
312
        field_set_source_files_get,
313
        field_set_extra_env_get,
314
        extra_output_directory_digest_get,
315
    )
316

317
    local_dists = await build_local_dists(
×
318
        LocalDistsPexRequest(
319
            addresses,
320
            interpreter_constraints=interpreter_constraints,
321
            sources=prepared_sources,
322
        )
323
    )
324

325
    pytest_runner_pex_get = create_venv_pex(
×
326
        **implicitly(
327
            PexRequest(
328
                output_filename="pytest_runner.pex",
329
                interpreter_constraints=interpreter_constraints,
330
                main=pytest.main,
331
                internal_only=True,
332
                pex_path=[pytest_pex, requirements_pex, local_dists.pex, *request.additional_pexes],
333
            )
334
        )
335
    )
336
    config_files_get = find_config_file(pytest.config_request(field_set_source_files.snapshot.dirs))
×
337
    pytest_runner_pex, config_files = await concurrently(pytest_runner_pex_get, config_files_get)
×
338

339
    # The coverage and pytest config may live in the same config file (e.g., setup.cfg, tox.ini
340
    # or pyproject.toml), and wee may have rewritten those files to augment the coverage config,
341
    # in which case we must ensure that the original and rewritten files don't collide.
342
    pytest_config_digest = config_files.snapshot.digest
×
343
    if coverage_config.path in config_files.snapshot.files:
×
344
        subset_paths = list(config_files.snapshot.files)
×
345
        # Remove the original file, and rely on the rewritten file, which contains all the
346
        # pytest-related config unchanged.
347
        subset_paths.remove(coverage_config.path)
×
348
        pytest_config_digest = await digest_subset_to_digest(
×
349
            DigestSubset(pytest_config_digest, PathGlobs(subset_paths))
350
        )
351

352
    input_digest = await merge_digests(
×
353
        MergeDigests(
354
            (
355
                coverage_config.digest,
356
                local_dists.remaining_sources.source_files.snapshot.digest,
357
                pytest_config_digest,
358
                extra_output_directory_digest,
359
                *(plugin_setup.digest for plugin_setup in plugin_setups),
360
            )
361
        )
362
    )
363

364
    # Don't forget to keep "Customize Pytest command line options per target" section in
365
    # docs/markdown/Python/python-goals/python-test-goal.md up to date when changing
366
    # which flags are added to `pytest_args`.
367
    pytest_args = [
×
368
        # Always include colors and strip them out for display below (if required), for better cache
369
        # hit rates
370
        "--color=yes"
371
    ]
372
    output_files = []
×
373

374
    results_file_name = None
×
375
    if not request.is_debug:
×
376
        results_file_prefix = request.field_sets[0].address.path_safe_spec
×
377
        if len(request.field_sets) > 1:
×
378
            results_file_prefix = (
×
379
                f"batch-of-{results_file_prefix}+{len(request.field_sets) - 1}-files"
380
            )
381
        results_file_name = f"{results_file_prefix}.xml"
×
382
        pytest_args.extend(
×
383
            (f"--junit-xml={results_file_name}", "-o", f"junit_family={pytest.junit_family}")
384
        )
385
        output_files.append(results_file_name)
×
386

387
    if test_subsystem.use_coverage and not request.is_debug:
×
388
        await validate_pytest_cov_included(pytest)
×
389
        output_files.append(".coverage")
×
390

391
        if coverage_subsystem.filter:
×
392
            cov_args = [f"--cov={morf}" for morf in coverage_subsystem.filter]
×
393
        else:
394
            # N.B.: Passing `--cov=` or `--cov=.` to communicate "record coverage for all sources"
395
            # fails in certain contexts as detailed in:
396
            #   https://github.com/pantsbuild/pants/issues/12390
397
            # Instead we focus coverage on just the directories containing python source files
398
            # materialized to the Process chroot.
399
            cov_args = [f"--cov={source_root}" for source_root in prepared_sources.source_roots]
×
400

401
        pytest_args.extend(
×
402
            (
403
                "--cov-report=",  # Turn off output.
404
                f"--cov-config={coverage_config.path}",
405
                *cov_args,
406
            )
407
        )
408

409
    extra_sys_path = OrderedSet(
×
410
        (
411
            *prepared_sources.source_roots,
412
            *(entry for plugin_setup in plugin_setups for entry in plugin_setup.extra_sys_path),
413
        )
414
    )
415
    extra_env = {
×
416
        "PEX_EXTRA_SYS_PATH": ":".join(extra_sys_path),
417
        **request.extra_env,
418
        **test_extra_env.env,
419
        # NOTE: field_set_extra_env intentionally after `test_extra_env` to allow overriding within
420
        # `python_tests`.
421
        **field_set_extra_env,
422
    }
423

424
    # Cache test runs only if they are successful, or not at all if `--test-force`.
425
    cache_scope = (
×
426
        ProcessCacheScope.PER_SESSION if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
427
    )
428

429
    xdist_concurrency = 0
×
430
    if pytest.xdist_enabled and not request.is_debug:
×
431
        concurrency = request.metadata.xdist_concurrency
×
432
        if concurrency is None:
×
433
            contents = await get_digest_contents(field_set_source_files.snapshot.digest)
×
434
            concurrency = _count_pytest_tests(contents)
×
435
        xdist_concurrency = concurrency
×
436

437
    timeout_seconds: int | None = None
×
438
    for field_set in request.field_sets:
×
439
        timeout = field_set.timeout.calculate_from_global_options(test_subsystem, pytest)
×
440
        if timeout:
×
441
            if timeout_seconds:
×
442
                timeout_seconds += timeout
×
443
            else:
444
                timeout_seconds = timeout
×
445

446
    run_description = request.field_sets[0].address.spec
×
447
    if len(request.field_sets) > 1:
×
448
        run_description = (
×
449
            f"batch of {run_description} and {len(request.field_sets) - 1} other files"
450
        )
451
    process = await setup_venv_pex_process(
×
452
        VenvPexProcess(
453
            pytest_runner_pex,
454
            argv=(
455
                *request.prepend_argv,
456
                *pytest.args,
457
                *(("-c", pytest.config) if pytest.config else ()),
458
                *(("-n", "{pants_concurrency}") if xdist_concurrency else ()),
459
                # N.B.: Now that we're using command-line options instead of the PYTEST_ADDOPTS
460
                # environment variable, it's critical that `pytest_args` comes after `pytest.args`.
461
                *pytest_args,
462
                *field_set_source_files.files,
463
            ),
464
            extra_env=extra_env,
465
            input_digest=input_digest,
466
            output_directories=(_EXTRA_OUTPUT_DIR,),
467
            output_files=output_files,
468
            timeout_seconds=timeout_seconds,
469
            execution_slot_variable=pytest.execution_slot_var,
470
            concurrency_available=xdist_concurrency,
471
            description=f"Run Pytest for {run_description}",
472
            level=LogLevel.DEBUG,
473
            cache_scope=cache_scope,
474
        ),
475
        **implicitly(),
476
    )
477
    return TestSetup(process, results_file_name=results_file_name)
×
478

479

480
class PyTestRequest(TestRequest):
9✔
481
    tool_subsystem = PyTest  # type: ignore[assignment]
9✔
482
    field_set_type = PythonTestFieldSet
9✔
483
    partitioner_type = PartitionerType.CUSTOM
9✔
484
    supports_debug = True
9✔
485
    supports_debug_adapter = True
9✔
486

487

488
@rule(desc="Partition Pytest", level=LogLevel.DEBUG)
9✔
489
async def partition_python_tests(
9✔
490
    request: PyTestRequest.PartitionRequest[PythonTestFieldSet],
491
    python_setup: PythonSetup,
492
) -> Partitions[PythonTestFieldSet, TestMetadata]:
493
    partitions = []
×
494
    compatible_tests = defaultdict(list)
×
495

496
    for field_set in request.field_sets:
×
497
        metadata = TestMetadata(
×
498
            interpreter_constraints=InterpreterConstraints.create_from_field_sets(
499
                [field_set], python_setup
500
            ),
501
            extra_env_vars=field_set.extra_env_vars.sorted(),
502
            xdist_concurrency=field_set.xdist_concurrency.value,
503
            resolve=field_set.resolve.normalized_value(python_setup),
504
            environment=field_set.environment.value,
505
            compatability_tag=field_set.batch_compatibility_tag.value,
506
        )
507

508
        if not metadata.compatability_tag:
×
509
            # Tests without a compatibility tag are assumed to be incompatible with all others.
510
            partitions.append(Partition((field_set,), metadata))
×
511
        else:
512
            # Group tests by their common metadata.
513
            compatible_tests[metadata].append(field_set)
×
514

515
    for metadata, field_sets in compatible_tests.items():
×
516
        partitions.append(Partition(tuple(field_sets), metadata))
×
517

518
    return Partitions(partitions)
×
519

520

521
@rule(desc="Run Pytest", level=LogLevel.DEBUG)
9✔
522
async def run_python_tests(
9✔
523
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
524
    pytest: PyTest,
525
    test_subsystem: TestSubsystem,
526
    global_options: GlobalOptions,
527
) -> TestResult:
UNCOV
528
    setup = await setup_pytest_for_target(
×
529
        TestSetupRequest(batch.elements, batch.partition_metadata, is_debug=False), **implicitly()
530
    )
531

UNCOV
532
    results = await execute_process_with_retry(
×
533
        ProcessWithRetries(setup.process, test_subsystem.attempts_default)
534
    )
UNCOV
535
    last_result = results.last
×
536

537
    def warning_description() -> str:
×
538
        description = batch.elements[0].address.spec
×
539
        if len(batch.elements) > 1:
×
UNCOV
540
            description = (
×
541
                f"batch containing {description} and {len(batch.elements) - 1} other files"
542
            )
543
        if batch.partition_metadata.description:
×
544
            description = f"{description} ({batch.partition_metadata.description})"
×
UNCOV
545
        return description
×
546

547
    coverage_data = None
×
548
    if test_subsystem.use_coverage:
×
UNCOV
549
        coverage_snapshot = await digest_to_snapshot(
×
550
            **implicitly(DigestSubset(last_result.output_digest, PathGlobs([".coverage"])))
551
        )
552
        if coverage_snapshot.files == (".coverage",):
×
UNCOV
553
            coverage_data = PytestCoverageData(
×
554
                tuple(field_set.address for field_set in batch.elements), coverage_snapshot.digest
555
            )
556
        else:
UNCOV
557
            logger.warning(f"Failed to generate coverage data for {warning_description()}.")
×
558

559
    xml_results_snapshot = None
×
560
    if setup.results_file_name:
×
UNCOV
561
        xml_results_snapshot = await digest_to_snapshot(
×
562
            **implicitly(
563
                DigestSubset(last_result.output_digest, PathGlobs([setup.results_file_name]))
564
            )
565
        )
566
        if xml_results_snapshot.files != (setup.results_file_name,):
×
567
            logger.warning(f"Failed to generate JUnit XML data for {warning_description()}.")
×
UNCOV
568
    extra_output_snapshot = await digest_to_snapshot(
×
569
        **implicitly(
570
            DigestSubset(last_result.output_digest, PathGlobs([f"{_EXTRA_OUTPUT_DIR}/**"]))
571
        )
572
    )
UNCOV
573
    extra_output_snapshot = await digest_to_snapshot(
×
574
        **implicitly(RemovePrefix(extra_output_snapshot.digest, _EXTRA_OUTPUT_DIR))
575
    )
576

UNCOV
577
    if last_result.exit_code == 5 and pytest.allow_empty_test_collection:
×
UNCOV
578
        return TestResult.no_tests_found_in_batch(batch, test_subsystem.output)
×
579

UNCOV
580
    return TestResult.from_batched_fallible_process_result(
×
581
        results.results,
582
        batch=batch,
583
        output_setting=test_subsystem.output,
584
        coverage_data=coverage_data,
585
        xml_results=xml_results_snapshot,
586
        extra_output=extra_output_snapshot,
587
        output_simplifier=global_options.output_simplifier(),
588
    )
589

590

591
@rule(desc="Set up Pytest to run interactively", level=LogLevel.DEBUG)
9✔
592
async def debug_python_test(
9✔
593
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
594
) -> TestDebugRequest:
UNCOV
595
    setup = await setup_pytest_for_target(
×
596
        TestSetupRequest(batch.elements, batch.partition_metadata, is_debug=True), **implicitly()
597
    )
UNCOV
598
    return TestDebugRequest(
×
599
        InteractiveProcess.from_process(
600
            setup.process, forward_signals_to_process=False, restartable=True
601
        )
602
    )
603

604

605
@rule(desc="Set up debugpy to run an interactive Pytest session", level=LogLevel.DEBUG)
9✔
606
async def debugpy_python_test(
9✔
607
    batch: PyTestRequest.Batch[PythonTestFieldSet, TestMetadata],
608
    debugpy: DebugPy,
609
    debug_adapter: DebugAdapterSubsystem,
610
    python_setup: PythonSetup,
611
) -> TestDebugAdapterRequest:
UNCOV
612
    debugpy_pex = await create_pex(
×
613
        debugpy.to_pex_request(
614
            interpreter_constraints=InterpreterConstraints.create_from_field_sets(
615
                batch.elements, python_setup
616
            )
617
        )
618
    )
619

UNCOV
620
    setup = await setup_pytest_for_target(
×
621
        TestSetupRequest(
622
            batch.elements,
623
            batch.partition_metadata,
624
            is_debug=True,
625
            prepend_argv=debugpy.get_args(debug_adapter),
626
            extra_env=FrozenDict(PEX_MODULE="debugpy"),
627
            additional_pexes=(debugpy_pex,),
628
        ),
629
        **implicitly(),
630
    )
UNCOV
631
    return TestDebugAdapterRequest(
×
632
        InteractiveProcess.from_process(
633
            setup.process, forward_signals_to_process=False, restartable=True
634
        )
635
    )
636

637

638
# -----------------------------------------------------------------------------------------
639
# `runtime_package_dependencies` plugin
640
# -----------------------------------------------------------------------------------------
641

642

643
@dataclass(frozen=True)
9✔
644
class RuntimePackagesPluginRequest(PytestPluginSetupRequest):
9✔
645
    @classmethod
9✔
646
    def is_applicable(cls, target: Target) -> bool:
9✔
UNCOV
647
        return bool(target.get(RuntimePackageDependenciesField).value)
×
648

649

650
@rule
9✔
651
async def setup_runtime_packages(request: RuntimePackagesPluginRequest) -> PytestPluginSetup:
9✔
652
    built_packages = await build_runtime_package_dependencies(
×
653
        BuildPackageDependenciesRequest(request.target.get(RuntimePackageDependenciesField))
654
    )
UNCOV
655
    digest = await merge_digests(MergeDigests(pkg.digest for pkg in built_packages))
×
UNCOV
656
    return PytestPluginSetup(digest)
×
657

658

659
def rules():
9✔
660
    return [
7✔
661
        *collect_rules(),
662
        *pytest.rules(),
663
        UnionRule(PytestPluginSetupRequest, RuntimePackagesPluginRequest),
664
        *PyTestRequest.rules(),
665
    ]
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc