• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 24056446661

06 Apr 2026 11:32PM UTC coverage: 92.904% (-0.004%) from 92.908%
24056446661

Pull #23225

github

web-flow
Merge 4e66b9fc9 into 542ca048d
Pull Request #23225: Add --test-show-all-batch-targets to expose all targets in batched pytest

10 of 17 new or added lines in 2 files covered. (58.82%)

23 existing lines in 2 files now uncovered.

91545 of 98537 relevant lines covered (92.9%)

4.04 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.45
/src/python/pants/core/goals/test_test.py
1
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
1✔
5

6
from abc import abstractmethod
1✔
7
from collections.abc import Iterable
1✔
8
from dataclasses import dataclass
1✔
9
from functools import partial
1✔
10
from pathlib import Path
1✔
11
from textwrap import dedent
1✔
12
from typing import Any
1✔
13

14
import pytest
1✔
15
from _pytest.monkeypatch import MonkeyPatch
1✔
16

17
from pants.backend.python.goals import package_pex_binary
1✔
18
from pants.backend.python.target_types import PexBinary, PythonSourcesGeneratorTarget
1✔
19
from pants.backend.python.target_types_rules import rules as python_target_type_rules
1✔
20
from pants.backend.python.util_rules import pex_from_targets
1✔
21
from pants.core.environments.rules import ChosenLocalEnvironmentName
1✔
22
from pants.core.goals.test import (
1✔
23
    BuildPackageDependenciesRequest,
24
    BuiltPackageDependencies,
25
    ConsoleCoverageReport,
26
    CoverageData,
27
    CoverageDataCollection,
28
    CoverageReports,
29
    RuntimePackageDependenciesField,
30
    ShowOutput,
31
    Test,
32
    TestDebugRequest,
33
    TestFieldSet,
34
    TestRequest,
35
    TestResult,
36
    TestSubsystem,
37
    TestTimeoutField,
38
    _format_test_rerun_command,
39
    _format_test_summary,
40
    build_runtime_package_dependencies,
41
    run_tests,
42
)
43
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
1✔
44
from pants.core.util_rules.distdir import DistDir
1✔
45
from pants.core.util_rules.partitions import Partition, Partitions
1✔
46
from pants.engine.addresses import Address
1✔
47
from pants.engine.console import Console
1✔
48
from pants.engine.environment import EnvironmentName
1✔
49
from pants.engine.fs import EMPTY_DIGEST, EMPTY_FILE_DIGEST, FileDigest, Snapshot, Workspace
1✔
50
from pants.engine.internals.session import RunId
1✔
51
from pants.engine.platform import Platform
1✔
52
from pants.engine.process import (
1✔
53
    InteractiveProcess,
54
    InteractiveProcessResult,
55
    ProcessExecutionEnvironment,
56
    ProcessResultMetadata,
57
)
58
from pants.engine.target import (
1✔
59
    BoolField,
60
    Field,
61
    MultipleSourcesField,
62
    Target,
63
    TargetRootsToFieldSets,
64
    TargetRootsToFieldSetsRequest,
65
)
66
from pants.engine.unions import UnionMembership, UnionRule
1✔
67
from pants.option.option_types import SkipOption
1✔
68
from pants.option.subsystem import Subsystem
1✔
69
from pants.testutil.option_util import create_goal_subsystem, create_subsystem
1✔
70
from pants.testutil.python_rule_runner import PythonRuleRunner
1✔
71
from pants.testutil.rule_runner import QueryRule, mock_console, run_rule_with_mocks
1✔
72
from pants.util.logging import LogLevel
1✔
73

74

75
def make_process_result_metadata(
1✔
76
    source: str,
77
    *,
78
    environment_name: str | None = None,
79
    docker_image: str | None = None,
80
    remote_execution: bool = False,
81
    total_elapsed_ms: int = 999,
82
    source_run_id: int = 0,
83
) -> ProcessResultMetadata:
84
    return ProcessResultMetadata(
1✔
85
        total_elapsed_ms,
86
        ProcessExecutionEnvironment(
87
            environment_name=environment_name,
88
            # TODO: None of the following are currently consumed in these tests.
89
            platform=Platform.create_for_localhost().value,
90
            docker_image=docker_image,
91
            remote_execution=remote_execution,
92
            remote_execution_extra_platform_properties=[],
93
            execute_in_workspace=False,
94
            keep_sandboxes="never",
95
        ),
96
        source,
97
        source_run_id,
98
    )
99

100

101
def make_test_result(
1✔
102
    addresses: Iterable[Address],
103
    exit_code: None | int,
104
    stdout_bytes: bytes = b"",
105
    stdout_digest: FileDigest = EMPTY_FILE_DIGEST,
106
    stderr_bytes: bytes = b"",
107
    stderr_digest: FileDigest = EMPTY_FILE_DIGEST,
108
    coverage_data: CoverageData | None = None,
109
    output_setting: ShowOutput = ShowOutput.NONE,
110
    result_metadata: None | ProcessResultMetadata = None,
111
) -> TestResult:
112
    """Create a TestResult with default values for most fields."""
113
    return TestResult(
1✔
114
        addresses=tuple(addresses),
115
        exit_code=exit_code,
116
        stdout_bytes=stdout_bytes,
117
        stdout_digest=stdout_digest,
118
        stderr_bytes=stderr_bytes,
119
        stderr_digest=stderr_digest,
120
        coverage_data=coverage_data,
121
        output_setting=output_setting,
122
        result_metadata=result_metadata,
123
    )
124

125

126
class MockMultipleSourcesField(MultipleSourcesField):
1✔
127
    pass
1✔
128

129

130
class MockTestTimeoutField(TestTimeoutField):
1✔
131
    pass
1✔
132

133

134
class MockSkipTestsField(BoolField):
1✔
135
    alias = "skip_test"
1✔
136
    default = False
1✔
137

138

139
class MockRequiredField(Field):
1✔
140
    alias = "required"
1✔
141
    required = True
1✔
142

143

144
class MockTarget(Target):
1✔
145
    alias = "mock_target"
1✔
146
    core_fields = (MockMultipleSourcesField, MockSkipTestsField, MockRequiredField)
1✔
147

148

149
@dataclass(frozen=True)
1✔
150
class MockCoverageData(CoverageData):
1✔
151
    addresses: Iterable[Address]
1✔
152

153

154
class MockCoverageDataCollection(CoverageDataCollection):
1✔
155
    element_type = MockCoverageData
1✔
156

157

158
@dataclass(frozen=True)
1✔
159
class MockTestFieldSet(TestFieldSet):
1✔
160
    required_fields = (MultipleSourcesField, MockRequiredField)
1✔
161
    sources: MultipleSourcesField
1✔
162
    required: MockRequiredField
1✔
163

164
    @classmethod
1✔
165
    def opt_out(cls, tgt: Target) -> bool:
1✔
166
        return tgt.get(MockSkipTestsField).value
1✔
167

168

169
class MockTestSubsystem(Subsystem):
1✔
170
    options_scope = "mock-test"
1✔
171
    help = "Not real"
1✔
172
    name = "Mock"
1✔
173
    skip = SkipOption("test")
1✔
174

175

176
class MockTestRequest(TestRequest):
1✔
177
    field_set_type = MockTestFieldSet
1✔
178
    tool_subsystem = MockTestSubsystem  # type: ignore[assignment]
1✔
179

180
    @staticmethod
1✔
181
    @abstractmethod
1✔
182
    def exit_code(_: Iterable[Address]) -> int:
1✔
183
        pass
×
184

185
    @staticmethod
1✔
186
    @abstractmethod
1✔
187
    def skipped(_: Iterable[Address]) -> bool:
1✔
188
        pass
×
189

190
    @classmethod
1✔
191
    def test_result(cls, field_sets: Iterable[MockTestFieldSet]) -> TestResult:
1✔
192
        addresses = tuple(field_set.address for field_set in field_sets)
1✔
193
        return make_test_result(
1✔
194
            addresses,
195
            exit_code=cls.exit_code(addresses),
196
            coverage_data=MockCoverageData(addresses),
197
            output_setting=ShowOutput.ALL,
198
            result_metadata=None if cls.skipped(addresses) else make_process_result_metadata("ran"),
199
        )
200

201

202
class SuccessfulRequest(MockTestRequest):
1✔
203
    @staticmethod
1✔
204
    def exit_code(_: Iterable[Address]) -> int:
1✔
205
        return 0
1✔
206

207
    @staticmethod
1✔
208
    def skipped(_: Iterable[Address]) -> bool:
1✔
209
        return False
1✔
210

211

212
class ConditionallySucceedsRequest(MockTestRequest):
1✔
213
    @staticmethod
1✔
214
    def exit_code(addresses: Iterable[Address]) -> int:
1✔
215
        if any(address.target_name == "bad" for address in addresses):
1✔
216
            return 27
1✔
217
        return 0
1✔
218

219
    @staticmethod
1✔
220
    def skipped(addresses: Iterable[Address]) -> bool:
1✔
221
        return any(address.target_name == "skipped" for address in addresses)
1✔
222

223

224
def mock_partitioner(
1✔
225
    __implicitly: tuple,
226
) -> Partitions[MockTestFieldSet, Any]:
227
    request, typ = next(iter(__implicitly[0].items()))
1✔
228
    assert typ == TestRequest.PartitionRequest
1✔
229
    return Partitions(Partition((field_set,), None) for field_set in request.field_sets)
1✔
230

231

232
def mock_test_partition(__implicitly: tuple) -> TestResult:
1✔
233
    request, typ = next(iter(__implicitly[0].items()))
1✔
234
    assert typ == TestRequest.Batch
1✔
235
    request_subtype = {cls.Batch: cls for cls in MockTestRequest.__subclasses__()}[type(request)]
1✔
236
    return request_subtype.test_result(request.elements)
1✔
237

238

239
@pytest.fixture
1✔
240
def rule_runner() -> PythonRuleRunner:
1✔
241
    return PythonRuleRunner()
1✔
242

243

244
def make_target(address: Address | None = None, *, skip: bool = False) -> Target:
1✔
245
    if address is None:
1✔
246
        address = Address("", target_name="tests")
1✔
247
    return MockTarget({MockSkipTestsField.alias: skip, MockRequiredField.alias: "present"}, address)
1✔
248

249

250
def run_test_rule(
1✔
251
    rule_runner: PythonRuleRunner,
252
    *,
253
    request_type: type[TestRequest],
254
    targets: list[Target],
255
    debug: bool = False,
256
    use_coverage: bool = False,
257
    experimental_report_test_result_info: bool = False,
258
    report: bool = False,
259
    report_dir: str = TestSubsystem.default_report_path,
260
    output: ShowOutput = ShowOutput.ALL,
261
    valid_targets: bool = True,
262
    show_rerun_command: bool = False,
263
    run_id: RunId = RunId(999),
264
) -> tuple[int, str]:
265
    test_subsystem = create_goal_subsystem(
1✔
266
        TestSubsystem,
267
        debug=debug,
268
        debug_adapter=False,
269
        use_coverage=use_coverage,
270
        experimental_report_test_result_info=experimental_report_test_result_info,
271
        report=report,
272
        report_dir=report_dir,
273
        xml_dir=None,
274
        output=output,
275
        extra_env_vars=[],
276
        shard="",
277
        batch_size=1,
278
        show_rerun_command=show_rerun_command,
279
        show_all_batch_targets=False,
280
    )
281
    debug_adapter_subsystem = create_subsystem(
1✔
282
        DebugAdapterSubsystem,
283
        host="127.0.0.1",
284
        port="5678",
285
    )
286
    workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)
1✔
287
    union_membership = UnionMembership.from_rules(
1✔
288
        {
289
            UnionRule(TestFieldSet, MockTestFieldSet),
290
            UnionRule(TestRequest, request_type),
291
            UnionRule(TestRequest.PartitionRequest, request_type.PartitionRequest),
292
            UnionRule(TestRequest.Batch, request_type.Batch),
293
            UnionRule(CoverageDataCollection, MockCoverageDataCollection),
294
        }
295
    )
296

297
    def mock_find_valid_field_sets(
1✔
298
        _: TargetRootsToFieldSetsRequest,
299
    ) -> TargetRootsToFieldSets:
300
        if not valid_targets:
1✔
301
            return TargetRootsToFieldSets({})
1✔
302
        return TargetRootsToFieldSets(
1✔
303
            {
304
                tgt: [request_type.field_set_type.create(tgt)]
305
                for tgt in targets
306
                if request_type.field_set_type.is_applicable(tgt)
307
            }
308
        )
309

310
    def mock_debug_request(
1✔
311
        __implicitly: tuple,
312
    ) -> TestDebugRequest:
313
        return TestDebugRequest(InteractiveProcess(["/bin/example"], input_digest=EMPTY_DIGEST))
1✔
314

315
    def mock_coverage_report_generation(
1✔
316
        __implicitly: tuple,
317
    ) -> CoverageReports:
318
        coverage_data_collection, typ = next(iter(__implicitly[0].items()))
1✔
319
        assert typ == CoverageDataCollection
1✔
320
        addresses = ", ".join(
1✔
321
            address.spec
322
            for coverage_data in coverage_data_collection
323
            for address in coverage_data.addresses
324
        )
325
        console_report = ConsoleCoverageReport(
1✔
326
            coverage_insufficient=False, report=f"Ran coverage on {addresses}"
327
        )
328
        return CoverageReports(reports=(console_report,))
1✔
329

330
    with mock_console(rule_runner.options_bootstrapper) as (console, stdio_reader):
1✔
331
        result: Test = run_rule_with_mocks(
1✔
332
            run_tests,
333
            rule_args=[
334
                console,
335
                test_subsystem,
336
                debug_adapter_subsystem,
337
                workspace,
338
                union_membership,
339
                DistDir(relpath=Path("dist")),
340
                run_id,
341
                ChosenLocalEnvironmentName(EnvironmentName(None)),
342
            ],
343
            mock_calls={
344
                "pants.core.goals.test.partition_tests": mock_partitioner,
345
                "pants.core.environments.rules.resolve_single_environment_name": lambda _a: EnvironmentName(
346
                    None
347
                ),
348
                "pants.core.goals.test.test_batch_to_debug_request": mock_debug_request,
349
                "pants.core.goals.test.test_batch_to_debug_adapter_request": mock_debug_request,
350
                "pants.core.goals.test.run_test_batch": mock_test_partition,
351
                "pants.core.goals.test.create_coverage_report": mock_coverage_report_generation,
352
                "pants.engine.internals.specs_rules.find_valid_field_sets_for_target_roots": mock_find_valid_field_sets,
353
                "pants.engine.intrinsics.merge_digests": lambda _: EMPTY_DIGEST,
354
                "pants.engine.intrinsics._interactive_process": lambda _p,
355
                _e: InteractiveProcessResult(0),
356
            },
357
            union_membership=union_membership,
358
            # We don't want temporary warnings to interfere with our expected output.
359
            show_warnings=False,
360
        )
361
        assert not stdio_reader.get_stdout()
1✔
362
        return result.exit_code, stdio_reader.get_stderr()
1✔
363

364

365
def test_invalid_target_noops(rule_runner: PythonRuleRunner) -> None:
1✔
366
    exit_code, stderr = run_test_rule(
1✔
367
        rule_runner,
368
        request_type=SuccessfulRequest,
369
        targets=[make_target()],
370
        valid_targets=False,
371
    )
372
    assert exit_code == 0
1✔
373
    assert stderr.strip() == ""
1✔
374

375

376
def test_skipped_target_noops(rule_runner: PythonRuleRunner) -> None:
1✔
377
    exit_code, stderr = run_test_rule(
1✔
378
        rule_runner,
379
        request_type=ConditionallySucceedsRequest,
380
        targets=[make_target(Address("", target_name="bad"), skip=True)],
381
    )
382
    assert exit_code == 0
1✔
383
    assert stderr.strip() == ""
1✔
384

385

386
@pytest.mark.parametrize(
1✔
387
    ("show_rerun_command", "expected_stderr"),
388
    [
389
        (
390
            False,
391
            # the summary is for humans, so we test it literally, to make sure the formatting is good
392
            dedent(
393
                """\
394

395
                ✓ //:good succeeded in 1.00s (memoized).
396
                ✕ //:bad failed in 1.00s (memoized).
397
                """
398
            ),
399
        ),
400
        (
401
            True,
402
            dedent(
403
                """\
404

405
                ✓ //:good succeeded in 1.00s (memoized).
406
                ✕ //:bad failed in 1.00s (memoized).
407

408
                To rerun the failing tests, use:
409

410
                    pants test //:bad
411
                """
412
            ),
413
        ),
414
    ],
415
)
416
def test_summary(
1✔
417
    rule_runner: PythonRuleRunner, show_rerun_command: bool, expected_stderr: str
418
) -> None:
419
    good_address = Address("", target_name="good")
1✔
420
    bad_address = Address("", target_name="bad")
1✔
421
    skipped_address = Address("", target_name="skipped")
1✔
422

423
    exit_code, stderr = run_test_rule(
1✔
424
        rule_runner,
425
        request_type=ConditionallySucceedsRequest,
426
        targets=[make_target(good_address), make_target(bad_address), make_target(skipped_address)],
427
        show_rerun_command=show_rerun_command,
428
    )
429
    assert exit_code == ConditionallySucceedsRequest.exit_code((bad_address,))
1✔
430
    assert stderr == expected_stderr
1✔
431

432

433
def _assert_test_summary(
1✔
434
    expected: str,
435
    *,
436
    exit_code: int | None,
437
    run_id: int,
438
    result_metadata: ProcessResultMetadata | None,
439
) -> None:
440
    assert expected == _format_test_summary(
1✔
441
        make_test_result(
442
            [Address(spec_path="", target_name="dummy_address")],
443
            exit_code=exit_code,
444
            result_metadata=result_metadata,
445
            output_setting=ShowOutput.FAILED,
446
        ),
447
        RunId(run_id),
448
        Console(use_colors=False),
449
    )
450

451

452
def test_format_summary_remote(rule_runner: PythonRuleRunner) -> None:
1✔
453
    _assert_test_summary(
1✔
454
        "✓ //:dummy_address succeeded in 0.05s (ran in remote environment `ubuntu`).",
455
        exit_code=0,
456
        run_id=0,
457
        result_metadata=make_process_result_metadata(
458
            "ran", environment_name="ubuntu", remote_execution=True, total_elapsed_ms=50
459
        ),
460
    )
461

462

463
def test_format_summary_local(rule_runner: PythonRuleRunner) -> None:
1✔
464
    _assert_test_summary(
1✔
465
        "✓ //:dummy_address succeeded in 0.05s.",
466
        exit_code=0,
467
        run_id=0,
468
        result_metadata=make_process_result_metadata(
469
            "ran", environment_name=None, total_elapsed_ms=50
470
        ),
471
    )
472

473

474
def test_format_summary_memoized(rule_runner: PythonRuleRunner) -> None:
1✔
475
    _assert_test_summary(
1✔
476
        "✓ //:dummy_address succeeded in 0.05s (memoized).",
477
        exit_code=0,
478
        run_id=1234,
479
        result_metadata=make_process_result_metadata("ran", total_elapsed_ms=50),
480
    )
481

482

483
def test_format_summary_memoized_remote(rule_runner: PythonRuleRunner) -> None:
1✔
484
    _assert_test_summary(
1✔
485
        "✓ //:dummy_address succeeded in 0.05s (memoized for remote environment `ubuntu`).",
486
        exit_code=0,
487
        run_id=1234,
488
        result_metadata=make_process_result_metadata(
489
            "ran", environment_name="ubuntu", remote_execution=True, total_elapsed_ms=50
490
        ),
491
    )
492

493

494
@pytest.mark.parametrize(
1✔
495
    ("results", "expected"),
496
    [
497
        pytest.param([], None, id="no_results"),
498
        pytest.param(
499
            [make_test_result([Address("", target_name="t1")], exit_code=0)], None, id="one_success"
500
        ),
501
        pytest.param(
502
            [make_test_result([Address("", target_name="t2")], exit_code=None)],
503
            None,
504
            id="one_no_run",
505
        ),
506
        pytest.param(
507
            [make_test_result([Address("", target_name="t3")], exit_code=1)],
508
            "To rerun the failing tests, use:\n\n    pants test //:t3",
509
            id="one_failure",
510
        ),
511
        pytest.param(
512
            [
513
                make_test_result([Address("", target_name="t1")], exit_code=0),
514
                make_test_result([Address("", target_name="t2")], exit_code=None),
515
                make_test_result([Address("", target_name="t3")], exit_code=1),
516
            ],
517
            "To rerun the failing tests, use:\n\n    pants test //:t3",
518
            id="one_of_each",
519
        ),
520
        pytest.param(
521
            [
522
                make_test_result([Address("path/to", target_name="t1")], exit_code=1),
523
                make_test_result([Address("another/path", target_name="t2")], exit_code=2),
524
                make_test_result([Address("", target_name="t3")], exit_code=3),
525
            ],
526
            "To rerun the failing tests, use:\n\n    pants test //:t3 another/path:t2 path/to:t1",
527
            id="multiple_failures",
528
        ),
529
        pytest.param(
530
            [
531
                make_test_result(
532
                    [
533
                        Address(
534
                            "path with spaces",
535
                            target_name="$*",
536
                            parameters=dict(key="value"),
537
                            generated_name="gn",
538
                        )
539
                    ],
540
                    exit_code=1,
541
                )
542
            ],
543
            "To rerun the failing tests, use:\n\n    pants test 'path with spaces:$*#gn@key=value'",
544
            id="special_characters_require_quoting",
545
        ),
546
    ],
547
)
548
def test_format_rerun_command(results: list[TestResult], expected: None | str) -> None:
1✔
549
    assert expected == _format_test_rerun_command(results)
1✔
550

551

552
def test_debug_target(rule_runner: PythonRuleRunner, monkeypatch: MonkeyPatch) -> None:
1✔
553
    def noop():
1✔
554
        pass
1✔
555

556
    monkeypatch.setattr("pants.engine.intrinsics.task_side_effected", noop)
1✔
557
    exit_code, _ = run_test_rule(
1✔
558
        rule_runner,
559
        request_type=SuccessfulRequest,
560
        targets=[make_target()],
561
        debug=True,
562
    )
563
    assert exit_code == 0
1✔
564

565

566
def test_report(rule_runner: PythonRuleRunner) -> None:
1✔
567
    addr1 = Address("", target_name="t1")
1✔
568
    addr2 = Address("", target_name="t2")
1✔
569
    exit_code, stderr = run_test_rule(
1✔
570
        rule_runner,
571
        request_type=SuccessfulRequest,
572
        targets=[make_target(addr1), make_target(addr2)],
573
        report=True,
574
    )
575
    assert exit_code == 0
1✔
576
    assert "Wrote test reports to dist/test/reports" in stderr
1✔
577

578

579
def test_report_dir(rule_runner: PythonRuleRunner) -> None:
1✔
580
    report_dir = "dist/test-results"
1✔
581
    addr1 = Address("", target_name="t1")
1✔
582
    addr2 = Address("", target_name="t2")
1✔
583
    exit_code, stderr = run_test_rule(
1✔
584
        rule_runner,
585
        request_type=SuccessfulRequest,
586
        targets=[make_target(addr1), make_target(addr2)],
587
        report=True,
588
        report_dir=report_dir,
589
    )
590
    assert exit_code == 0
1✔
591
    assert f"Wrote test reports to {report_dir}" in stderr
1✔
592

593

594
def test_coverage(rule_runner: PythonRuleRunner) -> None:
1✔
595
    addr1 = Address("", target_name="t1")
1✔
596
    addr2 = Address("", target_name="t2")
1✔
597
    exit_code, stderr = run_test_rule(
1✔
598
        rule_runner,
599
        request_type=SuccessfulRequest,
600
        targets=[make_target(addr1), make_target(addr2)],
601
        use_coverage=True,
602
    )
603
    assert exit_code == 0
1✔
604
    assert stderr.strip().endswith(f"Ran coverage on {addr1.spec}, {addr2.spec}")
1✔
605

606

607
def sort_results() -> None:
1✔
UNCOV
608
    def create_test_result(exit_code: int | None, addresses: Iterable[Address]) -> TestResult:
×
609
        return TestResult(
×
610
            exit_code=exit_code,
611
            addresses=tuple(addresses),
612
            stdout_bytes=b"",
613
            stdout_digest=EMPTY_FILE_DIGEST,
614
            stderr_bytes=b"",
615
            stderr_digest=EMPTY_FILE_DIGEST,
616
            output_setting=ShowOutput.ALL,
617
            result_metadata=None,
618
        )
619

UNCOV
620
    skip1 = create_test_result(
×
621
        exit_code=None,
622
        addresses=(Address("t1"),),
623
    )
UNCOV
624
    skip2 = create_test_result(
×
625
        exit_code=None,
626
        addresses=(Address("t2"),),
627
    )
UNCOV
628
    success1 = create_test_result(
×
629
        exit_code=0,
630
        addresses=(Address("t1"),),
631
    )
UNCOV
632
    success2 = create_test_result(
×
633
        exit_code=0,
634
        addresses=(Address("t2"),),
635
    )
UNCOV
636
    fail1 = create_test_result(
×
637
        exit_code=1,
638
        addresses=(Address("t1"),),
639
    )
UNCOV
640
    fail2 = create_test_result(
×
641
        exit_code=1,
642
        addresses=(Address("t2"),),
643
    )
UNCOV
644
    assert sorted([fail2, success2, skip2, fail1, success1, skip1]) == [
×
645
        skip1,
646
        skip2,
647
        success1,
648
        success2,
649
        fail1,
650
        fail2,
651
    ]
652

653

654
def assert_streaming_output(
1✔
655
    *,
656
    exit_code: int | None,
657
    stdout: str = "stdout",
658
    stderr: str = "stderr",
659
    output_setting: ShowOutput = ShowOutput.ALL,
660
    expected_level: LogLevel,
661
    expected_message: str,
662
    result_metadata: ProcessResultMetadata = make_process_result_metadata("dummy"),
663
) -> None:
664
    result = make_test_result(
1✔
665
        addresses=(Address("demo_test"),),
666
        exit_code=exit_code,
667
        stdout_bytes=stdout.encode(),
668
        stderr_bytes=stderr.encode(),
669
        output_setting=output_setting,
670
        result_metadata=result_metadata,
671
    )
672
    assert result.level() == expected_level
1✔
673
    assert result.message() == expected_message
1✔
674

675

676
def test_streaming_output_no_tests() -> None:
1✔
677
    assert_streaming_output(
1✔
678
        exit_code=None,
679
        stdout="",
680
        stderr="",
681
        expected_level=LogLevel.DEBUG,
682
        expected_message="no tests found.",
683
    )
684

685

686
def test_streaming_output_success() -> None:
1✔
687
    assert_success_streamed = partial(
1✔
688
        assert_streaming_output, exit_code=0, expected_level=LogLevel.INFO
689
    )
690
    assert_success_streamed(
1✔
691
        expected_message=dedent(
692
            """\
693
            succeeded.
694
            stdout
695
            stderr
696

697
            """
698
        ),
699
    )
700
    assert_success_streamed(output_setting=ShowOutput.FAILED, expected_message="succeeded.")
1✔
701
    assert_success_streamed(output_setting=ShowOutput.NONE, expected_message="succeeded.")
1✔
702

703

704
def test_streaming_output_failure() -> None:
1✔
705
    assert_failure_streamed = partial(
1✔
706
        assert_streaming_output, exit_code=1, expected_level=LogLevel.ERROR
707
    )
708
    message = dedent(
1✔
709
        """\
710
        failed (exit code 1).
711
        stdout
712
        stderr
713

714
        """
715
    )
716
    assert_failure_streamed(expected_message=message)
1✔
717
    assert_failure_streamed(output_setting=ShowOutput.FAILED, expected_message=message)
1✔
718
    assert_failure_streamed(
1✔
719
        output_setting=ShowOutput.NONE, expected_message="failed (exit code 1)."
720
    )
721

722

723
def test_runtime_package_dependencies() -> None:
1✔
724
    rule_runner = PythonRuleRunner(
1✔
725
        rules=[
726
            build_runtime_package_dependencies,
727
            *pex_from_targets.rules(),
728
            *package_pex_binary.rules(),
729
            *python_target_type_rules(),
730
            QueryRule(BuiltPackageDependencies, [BuildPackageDependenciesRequest]),
731
        ],
732
        target_types=[PythonSourcesGeneratorTarget, PexBinary],
733
    )
734
    rule_runner.set_options(args=[], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
1✔
735

736
    rule_runner.write_files(
1✔
737
        {
738
            "src/py/main.py": "",
739
            "src/py/BUILD": dedent(
740
                """\
741
                python_sources()
742
                pex_binary(name='main', entry_point='main.py')
743
                """
744
            ),
745
        }
746
    )
747
    # Include an irrelevant target that cannot be built with `./pants package`.
748
    input_field = RuntimePackageDependenciesField(["src/py", "src/py:main"], Address("fake"))
1✔
749
    result = rule_runner.request(
1✔
750
        BuiltPackageDependencies, [BuildPackageDependenciesRequest(input_field)]
751
    )
752
    assert len(result) == 1
1✔
753
    built_package = result[0]
1✔
754
    snapshot = rule_runner.request(Snapshot, [built_package.digest])
1✔
755
    assert snapshot.files == ("src.py/main.pex",)
1✔
756

757

758
def test_timeout_calculation() -> None:
1✔
759
    def assert_timeout_calculated(
1✔
760
        *,
761
        field_value: int | None,
762
        expected: int | None,
763
        global_default: int | None = None,
764
        global_max: int | None = None,
765
        timeouts_enabled: bool = True,
766
    ) -> None:
767
        field = MockTestTimeoutField(field_value, Address("", target_name="tests"))
1✔
768
        test_subsystem = create_subsystem(
1✔
769
            TestSubsystem,
770
            timeouts=timeouts_enabled,
771
            timeout_default=global_default,
772
            timeout_maximum=global_max,
773
        )
774
        assert field.calculate_from_global_options(test_subsystem) == expected
1✔
775

776
    assert_timeout_calculated(field_value=10, expected=10)
1✔
777
    assert_timeout_calculated(field_value=20, global_max=10, expected=10)
1✔
778
    assert_timeout_calculated(field_value=None, global_default=20, expected=20)
1✔
779
    assert_timeout_calculated(field_value=None, expected=None)
1✔
780
    assert_timeout_calculated(field_value=None, global_default=20, global_max=10, expected=10)
1✔
781
    assert_timeout_calculated(field_value=10, timeouts_enabled=False, expected=None)
1✔
782

783

784
def test_non_utf8_output() -> None:
1✔
785
    test_result = make_test_result(
1✔
786
        [],
787
        exit_code=1,  # "test error" so stdout/stderr are output in message
788
        stdout_bytes=b"\x80\xbf",  # invalid UTF-8 as required by the test
789
        stderr_bytes=b"\x80\xbf",  # invalid UTF-8 as required by the test
790
        output_setting=ShowOutput.ALL,
791
    )
792
    assert test_result.message() == "failed (exit code 1).\n��\n��\n\n"
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc