• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 20332790708

18 Dec 2025 09:48AM UTC coverage: 64.992% (-15.3%) from 80.295%
20332790708

Pull #22949

github

web-flow
Merge f730a56cd into 407284c67
Pull Request #22949: Add experimental uv resolver for Python lockfiles

54 of 97 new or added lines in 5 files covered. (55.67%)

8270 existing lines in 295 files now uncovered.

48990 of 75379 relevant lines covered (64.99%)

1.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.47
/src/python/pants/backend/shell/goals/test_test.py
1
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
1✔
5

6
from pathlib import Path
1✔
7
from textwrap import dedent
1✔
8

9
import pytest
1✔
10

11
from pants.backend.adhoc import run_system_binary
1✔
12
from pants.backend.adhoc.target_types import SystemBinaryTarget
1✔
13
from pants.backend.shell.goals import test
1✔
14
from pants.backend.shell.goals.test import ShellTestRequest, TestShellCommandFieldSet
1✔
15
from pants.backend.shell.target_types import (
1✔
16
    ShellCommandTarget,
17
    ShellCommandTestTarget,
18
    ShellSourcesGeneratorTarget,
19
)
20
from pants.build_graph.address import Address
1✔
21
from pants.core.goals import package
1✔
22
from pants.core.goals.test import TestDebugRequest, TestResult, get_filtered_environment
1✔
23
from pants.core.util_rules import archive, source_files, system_binaries
1✔
24
from pants.engine.fs import EMPTY_DIGEST, Digest, DigestContents, FileContent
1✔
25
from pants.engine.internals.scheduler import ExecutionError
1✔
26
from pants.engine.rules import QueryRule
1✔
27
from pants.engine.target import Target
1✔
28
from pants.testutil.rule_runner import RuleRunner, mock_console
1✔
29

30
ATTEMPTS_DEFAULT_OPTION = 2
1✔
31

32

33
@pytest.fixture
1✔
34
def rule_runner() -> RuleRunner:
1✔
35
    rule_runner = RuleRunner(
1✔
36
        rules=[
37
            *test.rules(),
38
            *source_files.rules(),
39
            *archive.rules(),
40
            *package.rules(),
41
            *system_binaries.rules(),
42
            *run_system_binary.rules(),
43
            get_filtered_environment,
44
            QueryRule(TestResult, (ShellTestRequest.Batch,)),
45
            QueryRule(TestDebugRequest, [ShellTestRequest.Batch]),
46
        ],
47
        target_types=[
48
            ShellSourcesGeneratorTarget,
49
            ShellCommandTarget,
50
            ShellCommandTestTarget,
51
            SystemBinaryTarget,
52
        ],
53
    )
54
    rule_runner.set_options(
1✔
55
        [f"--test-attempts-default={ATTEMPTS_DEFAULT_OPTION}"], env_inherit={"PATH"}
56
    )
57
    return rule_runner
1✔
58

59

60
@pytest.mark.platform_specific_behavior
1✔
61
def test_basic_usage_of_test_shell_command(rule_runner: RuleRunner) -> None:
1✔
62
    rule_runner.write_files(
1✔
63
        {
64
            "BUILD": dedent(
65
                """\
66
                shell_sources(name="src")
67

68
                shell_command(
69
                  name="msg-gen",
70
                  command="echo message > msg.txt",
71
                  tools=["echo"],
72
                  output_files=["msg.txt"],
73
                )
74

75
                test_shell_command(
76
                  name="pass",
77
                  execution_dependencies=[":msg-gen", ":src"],
78
                  tools=["echo"],
79
                  command="./test.sh msg.txt message",
80
                )
81

82
                test_shell_command(
83
                  name="fail",
84
                  execution_dependencies=[":msg-gen", ":src"],
85
                  tools=["echo"],
86
                  command="./test.sh msg.txt xyzzy",
87
                )
88

89
                # Check whether `runnable_dependencies` works.
90
                system_binary(
91
                    name="cat",
92
                    binary_name="cat",
93
                )
94
                system_binary(
95
                    name="test",
96
                    binary_name="test",
97
                    fingerprint_args=["1", "=", "1"]
98
                )
99
                test_shell_command(
100
                  name="pass_with_runnable_dependency",
101
                  execution_dependencies=[":msg-gen", ":src"],
102
                  tools=["echo"],
103
                  runnable_dependencies=[":cat", ":test"],
104
                  command="value=$(cat msg.txt) && test $value = message",
105
                )
106
                """
107
            ),
108
            "test.sh": dedent(
109
                """\
110
                contents="$(<$1)"
111
                if [ "$contents" = "$2" ]; then
112
                  echo "contains '$2'"
113
                  exit 0
114
                else
115
                  echo "does not contain '$2'"
116
                  exit 1
117
                fi
118
                """
119
            ),
120
        }
121
    )
122
    (Path(rule_runner.build_root) / "test.sh").chmod(0o555)
1✔
123

124
    def test_batch_for_target(test_target: Target) -> ShellTestRequest.Batch:
1✔
125
        return ShellTestRequest.Batch("", (TestShellCommandFieldSet.create(test_target),), None)
1✔
126

127
    def run_test(test_target: Target) -> TestResult:
1✔
128
        return rule_runner.request(TestResult, [test_batch_for_target(test_target)])
1✔
129

130
    pass_target = rule_runner.get_target(Address("", target_name="pass"))
1✔
131
    pass_result = run_test(pass_target)
1✔
132
    assert pass_result.exit_code == 0
1✔
133
    assert pass_result.stdout_bytes == b"contains 'message'\n"
1✔
134

135
    fail_target = rule_runner.get_target(Address("", target_name="fail"))
1✔
136
    fail_result = run_test(fail_target)
1✔
137
    assert fail_result.exit_code == 1
1✔
138
    assert fail_result.stdout_bytes == b"does not contain 'xyzzy'\n"
1✔
139
    assert len(fail_result.process_results) == ATTEMPTS_DEFAULT_OPTION
1✔
140

141
    # Check whether interactive execution via the `test` goal's `--debug` flags succeeds.
142
    pass_debug_request = rule_runner.request(TestDebugRequest, [test_batch_for_target(pass_target)])
1✔
143
    with mock_console(rule_runner.options_bootstrapper):
1✔
144
        pass_debug_result = rule_runner.run_interactive_process(pass_debug_request.process)
1✔
145
        assert pass_debug_result.exit_code == 0
1✔
146

147
    fail_debug_request = rule_runner.request(TestDebugRequest, [test_batch_for_target(fail_target)])
1✔
148
    with mock_console(rule_runner.options_bootstrapper):
1✔
149
        fail_debug_result = rule_runner.run_interactive_process(fail_debug_request.process)
1✔
150
        assert fail_debug_result.exit_code == 1
1✔
151

152
    pass_for_runnable_dependency_target = rule_runner.get_target(
1✔
153
        Address("", target_name="pass_with_runnable_dependency")
154
    )
155
    pass_for_runnable_dependency_result = run_test(pass_for_runnable_dependency_target)
1✔
156
    assert pass_for_runnable_dependency_result.exit_code == 0
1✔
157

158

159
@pytest.mark.platform_specific_behavior
1✔
160
def test_extra_outputs_support(rule_runner: RuleRunner) -> None:
1✔
161
    rule_runner.write_files(
1✔
162
        {
163
            "BUILD": dedent(
164
                """\
165
                shell_sources(name="src")
166

167
                test_shell_command(
168
                  name="test",
169
                  execution_dependencies=[":src"],
170
                  tools=["echo", "mkdir"],
171
                  command="./test.sh msg.txt message",
172
                  output_files=["world.txt"],
173
                  output_directories=["some-dir"],
174
                )
175
                """
176
            ),
177
            "test.sh": dedent(
178
                """\
179
                mkdir -p some-dir
180
                echo "xyzzy" > some-dir/foo.txt
181
                echo "hello" > world.txt
182
                """
183
            ),
184
        }
185
    )
186
    (Path(rule_runner.build_root) / "test.sh").chmod(0o555)
1✔
187

188
    def test_batch_for_target(test_target: Target) -> ShellTestRequest.Batch:
1✔
189
        return ShellTestRequest.Batch("", (TestShellCommandFieldSet.create(test_target),), None)
1✔
190

191
    def run_test(test_target: Target) -> TestResult:
1✔
192
        return rule_runner.request(TestResult, [test_batch_for_target(test_target)])
1✔
193

194
    result = run_test(rule_runner.get_target(Address("", target_name="test")))
1✔
195
    assert result.extra_output is not None
1✔
196
    digest_contents = rule_runner.request(DigestContents, [result.extra_output.digest])
1✔
197
    digest_contents_sorted = sorted(digest_contents, key=lambda x: x.path)
1✔
198
    assert len(digest_contents_sorted) == 2
1✔
199
    assert digest_contents_sorted[0] == FileContent("some-dir/foo.txt", b"xyzzy\n")
1✔
200
    assert digest_contents_sorted[1] == FileContent("world.txt", b"hello\n")
1✔
201

202

203
def test_outputs_match_mode_support(rule_runner: RuleRunner) -> None:
1✔
UNCOV
204
    rule_runner.write_files(
×
205
        {
206
            "BUILD": dedent(
207
                """\
208
            test_shell_command(
209
                name="allow_empty",
210
                command="true",
211
                output_files=["non-existent-file"],
212
                output_directories=["non-existent-dir"],
213
                outputs_match_mode="allow_empty",
214
            )
215
            test_shell_command(
216
                name="all_with_present_file",
217
                command="touch some-file",
218
                tools=["touch"],
219
                output_files=["some-file"],
220
                output_directories=["some-directory"],
221
                outputs_match_mode="all",
222
            )
223
            test_shell_command(
224
                name="all_with_present_directory",
225
                command="mkdir some-directory",
226
                tools=["mkdir"],
227
                output_files=["some-file"],
228
                output_directories=["some-directory"],
229
                outputs_match_mode="all",
230
            )
231
            test_shell_command(
232
                name="at_least_one_with_present_file",
233
                command="touch some-file",
234
                tools=["touch"],
235
                output_files=["some-file"],
236
                output_directories=["some-directory"],
237
                outputs_match_mode="at_least_one",
238
            )
239
            test_shell_command(
240
                name="at_least_one_with_present_directory",
241
                command="mkdir some-directory && touch some-directory/foo.txt",
242
                tools=["mkdir", "touch"],
243
                output_files=["some-file"],
244
                output_directories=["some-directory"],
245
                outputs_match_mode="at_least_one",
246
            )
247
            """
248
            )
249
        }
250
    )
251

UNCOV
252
    def test_batch_for_target(test_target: Target) -> ShellTestRequest.Batch:
×
UNCOV
253
        return ShellTestRequest.Batch("", (TestShellCommandFieldSet.create(test_target),), None)
×
254

UNCOV
255
    def run_test(address: Address) -> TestResult:
×
UNCOV
256
        test_target = rule_runner.get_target(address)
×
UNCOV
257
        return rule_runner.request(TestResult, [test_batch_for_target(test_target)])
×
258

UNCOV
259
    def assert_result(
×
260
        address: Address,
261
        expected_contents: dict[str, str],
262
    ) -> None:
UNCOV
263
        result = run_test(address)
×
UNCOV
264
        if expected_contents:
×
UNCOV
265
            assert result.extra_output
×
UNCOV
266
            assert result.extra_output.files == tuple(expected_contents)
×
267

UNCOV
268
            contents = rule_runner.request(DigestContents, [result.extra_output.digest])
×
UNCOV
269
            for fc in contents:
×
UNCOV
270
                assert fc.content == expected_contents[fc.path].encode()
×
271

UNCOV
272
    assert_result(Address("", target_name="allow_empty"), {})
×
273

UNCOV
274
    with pytest.raises(ExecutionError) as exc_info:
×
UNCOV
275
        run_test(Address("", target_name="all_with_present_file"))
×
UNCOV
276
    assert "some-directory" in str(exc_info)
×
277

UNCOV
278
    with pytest.raises(ExecutionError) as exc_info:
×
UNCOV
279
        run_test(Address("", target_name="all_with_present_directory"))
×
UNCOV
280
    assert "some-file" in str(exc_info)
×
281

UNCOV
282
    assert_result(Address("", target_name="at_least_one_with_present_file"), {"some-file": ""})
×
UNCOV
283
    assert_result(
×
284
        Address("", target_name="at_least_one_with_present_directory"),
285
        {"some-directory/foo.txt": ""},
286
    )
287

288

289
def test_cache_scope_support(rule_runner: RuleRunner) -> None:
1✔
UNCOV
290
    rule_runner.write_files(
×
291
        {
292
            "src/BUILD": dedent(
293
                """\
294
            test_shell_command(
295
              name="cmd_session_scope",
296
              # Use a random value so we can detect when re-execution occurs.
297
              command="echo $RANDOM > out.log",
298
              output_files=["out.log"],
299
              cache_scope="session",
300
            )
301
            test_shell_command(
302
              name="cmd_success_scope",
303
              # Use a random value so we can detect when re-execution occurs.
304
              command="echo $RANDOM > out.log",
305
              output_files=["out.log"],
306
              cache_scope="success",
307
            )
308
            """
309
            ),
310
            "src/a-file": "",
311
        }
312
    )
313

UNCOV
314
    def test_batch_for_target(test_target: Target) -> ShellTestRequest.Batch:
×
UNCOV
315
        return ShellTestRequest.Batch("", (TestShellCommandFieldSet.create(test_target),), None)
×
316

UNCOV
317
    def run_test(address: Address) -> TestResult:
×
UNCOV
318
        test_target = rule_runner.get_target(address)
×
UNCOV
319
        return rule_runner.request(TestResult, [test_batch_for_target(test_target)])
×
320

UNCOV
321
    def test_output_equal(result1: TestResult, result2: TestResult) -> bool:
×
UNCOV
322
        digest1: Digest = EMPTY_DIGEST
×
UNCOV
323
        if result1.extra_output:
×
UNCOV
324
            digest1 = result1.extra_output.digest
×
325

UNCOV
326
        digest2: Digest = EMPTY_DIGEST
×
UNCOV
327
        if result2.extra_output:
×
UNCOV
328
            digest2 = result2.extra_output.digest
×
329

UNCOV
330
        return digest1 == digest2
×
331

332
    # Re-executing the initial execution of a session-scoped test should be cached if in the same session.
UNCOV
333
    address_session = Address("src", target_name="cmd_session_scope")
×
UNCOV
334
    session_result_1 = run_test(address_session)
×
UNCOV
335
    session_result_2 = run_test(address_session)
×
UNCOV
336
    assert test_output_equal(session_result_1, session_result_2)
×
337

338
    # Execute the success-scoped test to ensure it is cached (for testing in the new session).
UNCOV
339
    address_success = Address("src", target_name="cmd_success_scope")
×
UNCOV
340
    success_result_1 = run_test(address_success)
×
341

342
    # Create a new session.
UNCOV
343
    rule_runner.new_session("second-session")
×
UNCOV
344
    rule_runner.set_options([])
×
345

346
    # In a new session, the session-scoped test should be re-executed.
UNCOV
347
    session_result_3 = run_test(address_session)
×
UNCOV
348
    assert not test_output_equal(session_result_2, session_result_3)
×
349

350
    # In a new session, the success-scoped test should NOT be re-executed.
UNCOV
351
    success_result_2 = run_test(address_success)
×
UNCOV
352
    assert test_output_equal(success_result_1, success_result_2)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc