• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 18252174847

05 Oct 2025 01:36AM UTC coverage: 43.382% (-36.9%) from 80.261%
18252174847

push

github

web-flow
run tests on mac arm (#22717)

Just doing the minimal to pull forward the x86_64 pattern.

ref #20993

25776 of 59416 relevant lines covered (43.38%)

1.3 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

46.69
/src/python/pants/backend/python/goals/pytest_runner_integration_test.py
1
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
3✔
5

6
import os
3✔
7
import re
3✔
8
import unittest.mock
3✔
9
from collections.abc import Iterable
3✔
10
from textwrap import dedent
3✔
11

12
import pytest
3✔
13

14
from pants.backend.python import target_types_rules
3✔
15
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
3✔
16
from pants.backend.python.goals import package_dists, package_pex_binary, pytest_runner
3✔
17
from pants.backend.python.goals.coverage_py import create_or_update_coverage_config
3✔
18
from pants.backend.python.goals.pytest_runner import (
3✔
19
    PytestPluginSetup,
20
    PytestPluginSetupRequest,
21
    PyTestRequest,
22
    TestMetadata,
23
)
24
from pants.backend.python.macros.python_artifact import PythonArtifact
3✔
25
from pants.backend.python.subsystems.pytest import PythonTestFieldSet
3✔
26
from pants.backend.python.target_types import (
3✔
27
    PexBinary,
28
    PythonDistribution,
29
    PythonRequirementTarget,
30
    PythonSourcesGeneratorTarget,
31
    PythonTestsGeneratorTarget,
32
    PythonTestUtilsGeneratorTarget,
33
)
34
from pants.backend.python.util_rules import local_dists, pex_from_targets
3✔
35
from pants.core.goals import package
3✔
36
from pants.core.goals.test import (
3✔
37
    TestDebugAdapterRequest,
38
    TestDebugRequest,
39
    TestResult,
40
    build_runtime_package_dependencies,
41
    get_filtered_environment,
42
)
43
from pants.core.util_rules import config_files, distdir
3✔
44
from pants.core.util_rules.partitions import Partitions
3✔
45
from pants.engine.addresses import Address
3✔
46
from pants.engine.fs import CreateDigest, DigestContents, FileContent
3✔
47
from pants.engine.intrinsics import create_digest
3✔
48
from pants.engine.process import InteractiveProcessResult
3✔
49
from pants.engine.rules import rule
3✔
50
from pants.engine.target import Target
3✔
51
from pants.engine.unions import UnionRule
3✔
52
from pants.testutil.debug_adapter_util import debugadapter_port_for_testing
3✔
53
from pants.testutil.python_interpreter_selection import (
3✔
54
    all_major_minor_python_versions,
55
    skip_unless_python310_and_python311_present,
56
)
57
from pants.testutil.python_rule_runner import PythonRuleRunner
3✔
58
from pants.testutil.rule_runner import QueryRule, mock_console
3✔
59
from pants.util.resources import read_sibling_resource
3✔
60

61

62
@pytest.fixture
3✔
63
def rule_runner() -> PythonRuleRunner:
3✔
64
    return PythonRuleRunner(
3✔
65
        rules=[
66
            build_runtime_package_dependencies,
67
            create_or_update_coverage_config,
68
            *pytest_runner.rules(),
69
            *pex_from_targets.rules(),
70
            *dependency_inference_rules.rules(),
71
            *distdir.rules(),
72
            *config_files.rules(),
73
            *package_pex_binary.rules(),
74
            get_filtered_environment,
75
            *target_types_rules.rules(),
76
            *local_dists.rules(),
77
            *package_dists.rules(),
78
            *package.rules(),
79
            QueryRule(Partitions, (PyTestRequest.PartitionRequest,)),
80
            QueryRule(TestResult, (PyTestRequest.Batch,)),
81
            QueryRule(TestDebugRequest, (PyTestRequest.Batch,)),
82
            QueryRule(TestDebugAdapterRequest, (PyTestRequest.Batch,)),
83
        ],
84
        target_types=[
85
            PexBinary,
86
            PythonSourcesGeneratorTarget,
87
            PythonTestsGeneratorTarget,
88
            PythonTestUtilsGeneratorTarget,
89
            PythonRequirementTarget,
90
            PythonDistribution,
91
        ],
92
        objects={"python_artifact": PythonArtifact},
93
    )
94

95

96
SOURCE_ROOT = "tests/python"
3✔
97
PACKAGE = os.path.join(SOURCE_ROOT, "pants_test")
3✔
98

99
GOOD_TEST = dedent(
3✔
100
    """\
101
    def test():
102
        pass
103
    """
104
)
105

106

107
def _configure_pytest_runner(
3✔
108
    rule_runner: PythonRuleRunner,
109
    *,
110
    extra_args: list[str] | None = None,
111
    env: dict[str, str] | None = None,
112
) -> None:
113
    args = [
3✔
114
        "--backend-packages=pants.backend.python",
115
        f"--source-root-patterns={SOURCE_ROOT}",
116
        f"--debug-adapter-port={debugadapter_port_for_testing()}",
117
        *(extra_args or ()),
118
    ]
119
    rule_runner.set_options(args, env=env, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
3✔
120

121

122
def _get_pytest_batch(
3✔
123
    rule_runner: PythonRuleRunner, test_targets: Iterable[Target]
124
) -> PyTestRequest.Batch[PythonTestFieldSet, TestMetadata]:
125
    field_sets = tuple(PythonTestFieldSet.create(tgt) for tgt in test_targets)
3✔
126
    partitions = rule_runner.request(Partitions, [PyTestRequest.PartitionRequest(field_sets)])
3✔
127
    assert len(partitions) == 1
3✔
128
    return PyTestRequest.Batch("", partitions[0].elements, partitions[0].metadata)
3✔
129

130

131
def run_pytest(
3✔
132
    rule_runner: PythonRuleRunner,
133
    test_targets: Iterable[Target],
134
    *,
135
    extra_args: list[str] | None = None,
136
    env: dict[str, str] | None = None,
137
    test_debug_adapter: bool = True,
138
) -> TestResult:
139
    _configure_pytest_runner(rule_runner, extra_args=extra_args, env=env)
3✔
140
    batch = _get_pytest_batch(rule_runner, test_targets)
3✔
141
    test_result = rule_runner.request(TestResult, [batch])
3✔
142
    debug_request = rule_runner.request(TestDebugRequest, [batch])
3✔
143
    if debug_request.process is not None:
3✔
144
        with mock_console(rule_runner.options_bootstrapper):
3✔
145
            debug_result = rule_runner.run_interactive_process(debug_request.process)
3✔
146
            assert test_result.exit_code == debug_result.exit_code
3✔
147

148
    if test_debug_adapter:
3✔
149
        debug_adapter_request = rule_runner.request(TestDebugAdapterRequest, [batch])
3✔
150
        if debug_adapter_request.process is not None:
3✔
151
            with mock_console(rule_runner.options_bootstrapper) as mocked_console:
3✔
152
                _, stdioreader = mocked_console
3✔
153
                debug_adapter_result = rule_runner.run_interactive_process(
3✔
154
                    debug_adapter_request.process
155
                )
156
                assert test_result.exit_code == debug_adapter_result.exit_code, (
3✔
157
                    f"{stdioreader.get_stdout()}\n{stdioreader.get_stderr()}"
158
                )
159

160
    return test_result
3✔
161

162

163
def run_pytest_noninteractive(
3✔
164
    rule_runner: PythonRuleRunner,
165
    test_target: Target,
166
    *,
167
    extra_args: list[str] | None = None,
168
    env: dict[str, str] | None = None,
169
) -> TestResult:
170
    _configure_pytest_runner(rule_runner, extra_args=extra_args, env=env)
×
171
    return rule_runner.request(TestResult, [_get_pytest_batch(rule_runner, [test_target])])
×
172

173

174
def run_pytest_interactive(
3✔
175
    rule_runner: PythonRuleRunner,
176
    test_target: Target,
177
    *,
178
    extra_args: list[str] | None = None,
179
    env: dict[str, str] | None = None,
180
) -> InteractiveProcessResult:
181
    _configure_pytest_runner(rule_runner, extra_args=extra_args, env=env)
×
182
    debug_request = rule_runner.request(
×
183
        TestDebugRequest, [_get_pytest_batch(rule_runner, [test_target])]
184
    )
185
    with mock_console(rule_runner.options_bootstrapper):
×
186
        return rule_runner.run_interactive_process(debug_request.process)
×
187

188

189
@pytest.mark.platform_specific_behavior
3✔
190
@pytest.mark.parametrize(
3✔
191
    "major_minor_interpreter",
192
    all_major_minor_python_versions(["CPython>=3.9,<4"]),
193
)
194
def test_passing(rule_runner: PythonRuleRunner, major_minor_interpreter: str) -> None:
3✔
195
    rule_runner.write_files(
3✔
196
        {f"{PACKAGE}/tests.py": GOOD_TEST, f"{PACKAGE}/BUILD": "python_tests()"}
197
    )
198
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
3✔
199
    result = run_pytest(
3✔
200
        rule_runner,
201
        [tgt],
202
        extra_args=[f"--python-interpreter-constraints=['=={major_minor_interpreter}.*']"],
203
    )
204
    assert result.xml_results is not None
3✔
205
    assert result.exit_code == 0
3✔
206
    assert f"{PACKAGE}/tests.py ." in result.stdout_simplified_str
3✔
207

208

209
def test_failing(rule_runner: PythonRuleRunner) -> None:
3✔
210
    rule_runner.write_files(
×
211
        {
212
            f"{PACKAGE}/tests.py": dedent(
213
                """\
214
                def test():
215
                    assert False
216
                """
217
            ),
218
            f"{PACKAGE}/BUILD": "python_tests()",
219
        }
220
    )
221
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
222
    result = run_pytest(rule_runner, [tgt])
×
223
    assert result.exit_code == 1
×
224
    assert f"{PACKAGE}/tests.py F" in result.stdout_simplified_str
×
225

226

227
def test_dependencies(rule_runner: PythonRuleRunner) -> None:
3✔
228
    """Ensure direct and transitive dependencies work."""
229
    rule_runner.write_files(
×
230
        {
231
            f"{PACKAGE}/__init__.py": "",
232
            f"{PACKAGE}/lib1.py": dedent(
233
                """\
234
                def add_one(x):
235
                    return x + 1
236
                """
237
            ),
238
            f"{PACKAGE}/lib2.py": dedent(
239
                """\
240
                from colors import red
241

242
                def add_two(x):
243
                    return x + 2
244
                """
245
            ),
246
            f"{PACKAGE}/lib3.py": dedent(
247
                """\
248
                from pants_test.lib2 import add_two
249

250
                def add_three(x):
251
                    return add_two(x) + 1
252
                """
253
            ),
254
            f"{PACKAGE}/tests.py": dedent(
255
                """\
256
                from pants_test.lib1 import add_one
257
                from .lib2 import add_two
258
                from pants_test.lib3 import add_three
259
                from ordered_set import OrderedSet
260

261
                def test():
262
                    assert add_one(1) == 2
263
                    assert add_two(1) == 3
264
                    assert add_three(1) == 4
265
                """
266
            ),
267
            f"{PACKAGE}/BUILD": dedent(
268
                """\
269
                python_tests()
270
                python_sources(name="lib")
271
                python_requirement(
272
                    name="reqs", requirements=["ansicolors==1.1.8", "ordered-set==3.1.1"]
273
                )
274
                """
275
            ),
276
        }
277
    )
278

279
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
280
    result = run_pytest(rule_runner, [tgt])
×
281
    assert result.exit_code == 0
×
282
    assert f"{PACKAGE}/tests.py ." in result.stdout_simplified_str
×
283

284

285
@skip_unless_python310_and_python311_present
3✔
286
def test_uses_correct_python_version(rule_runner: PythonRuleRunner) -> None:
3✔
287
    rule_runner.write_files(
×
288
        {
289
            # ExceptionGroup was introduced in 3.11.
290
            f"{PACKAGE}/tests.py": dedent(
291
                """\
292
                def test() -> None:
293
                    eg = ExceptionGroup('', [Exception()])
294
                """
295
            ),
296
            f"{PACKAGE}/BUILD": dedent(
297
                """\
298
                python_tests(name='py310', interpreter_constraints=['==3.10.*'])
299
                python_tests(name='py311', interpreter_constraints=['==3.11.*'])
300
                """
301
            ),
302
        }
303
    )
304

305
    py310_tgt = rule_runner.get_target(
×
306
        Address(PACKAGE, target_name="py310", relative_file_path="tests.py")
307
    )
308
    result = run_pytest(rule_runner, [py310_tgt], test_debug_adapter=False)
×
309
    assert result.exit_code == 1
×
310
    assert b"NameError: name 'ExceptionGroup' is not defined" in result.stdout_bytes
×
311

312
    py311_tgt = rule_runner.get_target(
×
313
        Address(PACKAGE, target_name="py311", relative_file_path="tests.py")
314
    )
315
    result = run_pytest(rule_runner, [py311_tgt], test_debug_adapter=False)
×
316
    assert result.exit_code == 0
×
317
    assert f"{PACKAGE}/tests.py ." in result.stdout_simplified_str
×
318

319

320
def test_passthrough_args(rule_runner: PythonRuleRunner) -> None:
3✔
321
    rule_runner.write_files(
×
322
        {
323
            f"{PACKAGE}/tests.py": dedent(
324
                """\
325
                def test_run_me():
326
                  pass
327

328
                def test_ignore_me():
329
                  pass
330
                """
331
            ),
332
            f"{PACKAGE}/BUILD": "python_tests()",
333
        }
334
    )
335
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
336
    result = run_pytest(rule_runner, [tgt], extra_args=["--pytest-args='-k test_run_me'"])
×
337
    assert result.exit_code == 0
×
338
    assert f"{PACKAGE}/tests.py ." in result.stdout_simplified_str
×
339
    assert b"collected 2 items / 1 deselected / 1 selected" in result.stdout_bytes
×
340

341

342
def test_xdist_enabled_noninteractive(rule_runner: PythonRuleRunner) -> None:
3✔
343
    rule_runner.write_files(
×
344
        {
345
            f"{PACKAGE}/tests.py": dedent(
346
                """\
347
                import os
348

349
                def test_worker_id_set():
350
                  assert "PYTEST_XDIST_WORKER" in os.environ
351

352
                def test_worker_count_set():
353
                  assert "PYTEST_XDIST_WORKER_COUNT" in os.environ
354
                """
355
            ),
356
            f"{PACKAGE}/BUILD": "python_tests(xdist_concurrency=2)",
357
        }
358
    )
359
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
360
    result = run_pytest_noninteractive(rule_runner, tgt, extra_args=["--pytest-xdist-enabled"])
×
361
    assert result.exit_code == 0
×
362

363

364
def test_xdist_enabled_but_disabled_for_target(rule_runner: PythonRuleRunner) -> None:
3✔
365
    rule_runner.write_files(
×
366
        {
367
            f"{PACKAGE}/tests.py": dedent(
368
                """\
369
                import os
370

371
                def test_worker_id_not_set():
372
                  assert "PYTEST_XDIST_WORKER" not in os.environ
373

374
                def test_worker_count_not_set():
375
                  assert "PYTEST_XDIST_WORKER_COUNT" not in os.environ
376
                """
377
            ),
378
            f"{PACKAGE}/BUILD": "python_tests(xdist_concurrency=0)",
379
        }
380
    )
381
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
382
    result = run_pytest_noninteractive(rule_runner, tgt, extra_args=["--pytest-xdist-enabled"])
×
383
    assert result.exit_code == 0
×
384

385

386
def test_xdist_enabled_interactive(rule_runner: PythonRuleRunner) -> None:
3✔
387
    rule_runner.write_files(
×
388
        {
389
            f"{PACKAGE}/tests.py": dedent(
390
                """\
391
                import os
392

393
                def test_worker_id_not_set():
394
                  assert "PYTEST_XDIST_WORKER" not in os.environ
395

396
                def test_worker_count_not_set():
397
                  assert "PYTEST_XDIST_WORKER_COUNT" not in os.environ
398
                """
399
            ),
400
            f"{PACKAGE}/BUILD": "python_tests(xdist_concurrency=2)",
401
        }
402
    )
403
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
404
    result = run_pytest_interactive(rule_runner, tgt, extra_args=["--pytest-xdist-enabled"])
×
405
    assert result.exit_code == 0
×
406

407

408
@pytest.mark.parametrize(
3✔
409
    "config_path,extra_args",
410
    (["pytest.ini", []], ["custom_config.ini", ["--pytest-config=custom_config.ini"]]),
411
)
412
def test_config_file(
3✔
413
    rule_runner: PythonRuleRunner, config_path: str, extra_args: list[str]
414
) -> None:
415
    rule_runner.write_files(
×
416
        {
417
            config_path: dedent(
418
                """\
419
                [pytest]
420
                addopts = -s
421
                """
422
            ),
423
            f"{PACKAGE}/tests.py": dedent(
424
                """\
425
                def test():
426
                    print("All good!")
427
                """
428
            ),
429
            f"{PACKAGE}/BUILD": "python_tests()",
430
        }
431
    )
432
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
433
    result = run_pytest(rule_runner, [tgt], extra_args=extra_args)
×
434
    assert result.exit_code == 0
×
435
    assert b"All good!" in result.stdout_bytes and b"Captured" not in result.stdout_bytes
×
436

437

438
def test_force(rule_runner: PythonRuleRunner) -> None:
3✔
439
    rule_runner.write_files(
×
440
        {f"{PACKAGE}/tests.py": GOOD_TEST, f"{PACKAGE}/BUILD": "python_tests()"}
441
    )
442
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
443

444
    # Should not receive a memoized result if force=True.
445
    result_one = run_pytest(rule_runner, [tgt], extra_args=["--test-force"])
×
446
    result_two = run_pytest(rule_runner, [tgt], extra_args=["--test-force"])
×
447
    assert result_one.exit_code == 0
×
448
    assert result_two.exit_code == 0
×
449
    assert result_one is not result_two
×
450

451
    # But should if force=False.
452
    result_one = run_pytest(rule_runner, [tgt])
×
453
    result_two = run_pytest(rule_runner, [tgt])
×
454
    assert result_one.exit_code == 0
×
455
    assert result_one is result_two
×
456

457

458
def test_extra_output(rule_runner: PythonRuleRunner) -> None:
3✔
459
    rule_runner.write_files(
×
460
        {
461
            f"{PACKAGE}/tests.py": GOOD_TEST,
462
            f"{PACKAGE}/BUILD": "python_tests()",
463
            # The test lockfile provides pytest-html and also setuptools, which it requires
464
            # because it does not use PEP 517.
465
            "pytest.lock": read_sibling_resource(__name__, "pytest_extra_output_test.lock"),
466
        }
467
    )
468
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
469
    result = run_pytest(
×
470
        rule_runner,
471
        [tgt],
472
        extra_args=[
473
            "--pytest-args='--html=extra-output/report.html'",
474
            "--python-resolves={'pytest':'pytest.lock'}",
475
            "--pytest-install-from-resolve=pytest",
476
        ],
477
    )
478
    assert result.exit_code == 0
×
479
    assert f"{PACKAGE}/tests.py ." in result.stdout_simplified_str
×
480
    assert result.extra_output is not None
×
481
    digest_contents = rule_runner.request(DigestContents, [result.extra_output.digest])
×
482
    paths = {dc.path for dc in digest_contents}
×
483
    assert {"assets/style.css", "report.html"} == paths
×
484

485

486
def test_coverage(rule_runner: PythonRuleRunner) -> None:
3✔
487
    # Note that we test that rewriting the pyproject.toml doesn't cause a collision
488
    # between the two code paths by which we pick up that file (coverage and pytest).
489
    rule_runner.write_files(
×
490
        {
491
            "pyproject.toml": "[tool.coverage.report]\n[tool.pytest.ini_options]",
492
            f"{PACKAGE}/tests.py": GOOD_TEST,
493
            f"{PACKAGE}/BUILD": "python_tests()",
494
        }
495
    )
496
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
497
    result = run_pytest(rule_runner, [tgt], extra_args=["--test-use-coverage"])
×
498
    assert result.exit_code == 0
×
499
    assert f"{PACKAGE}/tests.py ." in result.stdout_simplified_str
×
500
    assert result.coverage_data is not None
×
501

502

503
def test_conftest_dependency_injection(rule_runner: PythonRuleRunner) -> None:
3✔
504
    # See `test_skip_tests` for a test that we properly skip running on conftest.py.
505
    rule_runner.write_files(
×
506
        {
507
            f"{SOURCE_ROOT}/conftest.py": dedent(
508
                """\
509
                def pytest_runtest_setup(item):
510
                    print('In conftest!')
511
                """
512
            ),
513
            f"{SOURCE_ROOT}/BUILD": "python_test_utils()",
514
            f"{PACKAGE}/tests.py": GOOD_TEST,
515
            f"{PACKAGE}/BUILD": "python_tests()",
516
        }
517
    )
518
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="tests.py"))
×
519
    result = run_pytest(rule_runner, [tgt], extra_args=["--pytest-args='-s'"])
×
520
    assert result.exit_code == 0
×
521
    assert f"{PACKAGE}/tests.py In conftest!\n." in result.stdout_simplified_str
×
522

523

524
def test_execution_slot_variable(rule_runner: PythonRuleRunner) -> None:
3✔
525
    rule_runner.write_files(
×
526
        {
527
            f"{PACKAGE}/test_concurrency_slot.py": dedent(
528
                """\
529
                import os
530

531
                def test_fail_printing_slot_env_var():
532
                    slot = os.getenv("SLOT")
533
                    print(f"Value of slot is {slot}")
534
                    # Deliberately fail the test so the SLOT output gets printed to stdout
535
                    assert 1 == 2
536
                """
537
            ),
538
            f"{PACKAGE}/BUILD": "python_tests()",
539
        }
540
    )
541
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="test_concurrency_slot.py"))
×
542
    result = run_pytest(rule_runner, [tgt], extra_args=["--pytest-execution-slot-var=SLOT"])
×
543
    assert result.exit_code == 1
×
544
    assert re.search(r"Value of slot is \d+", result.stdout_simplified_str)
×
545

546

547
def test_extra_env_vars(rule_runner: PythonRuleRunner) -> None:
3✔
548
    rule_runner.write_files(
×
549
        {
550
            f"{PACKAGE}/test_extra_env_vars.py": dedent(
551
                """\
552
                import os
553

554
                def test_args():
555
                    assert os.getenv("ARG_WITH_VALUE_VAR") == "arg_with_value_var"
556
                    assert os.getenv("ARG_WITHOUT_VALUE_VAR") == "arg_without_value_value"
557
                    assert os.getenv("PYTHON_TESTS_VAR_WITH_VALUE") == "python_tests_var_with_value"
558
                    assert os.getenv("PYTHON_TESTS_VAR_WITHOUT_VALUE") == "python_tests_var_without_value"
559
                    assert os.getenv("PYTHON_TESTS_OVERRIDE_WITH_VALUE_VAR") == "python_tests_override_with_value_var_override"
560
                """
561
            ),
562
            f"{PACKAGE}/BUILD": dedent(
563
                """\
564
                python_tests(
565
                    extra_env_vars=(
566
                        "PYTHON_TESTS_VAR_WITHOUT_VALUE",
567
                        "PYTHON_TESTS_VAR_WITH_VALUE=python_tests_var_with_value",
568
                        "PYTHON_TESTS_OVERRIDE_WITH_VALUE_VAR=python_tests_override_with_value_var_override",
569
                    )
570
                )
571
                """
572
            ),
573
        }
574
    )
575
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="test_extra_env_vars.py"))
×
576
    result = run_pytest(
×
577
        rule_runner,
578
        [tgt],
579
        extra_args=[
580
            "--test-extra-env-vars=['ARG_WITH_VALUE_VAR=arg_with_value_var', 'ARG_WITHOUT_VALUE_VAR', 'PYTHON_TESTS_OVERRIDE_ARG_WITH_VALUE_VAR']"
581
        ],
582
        env={
583
            "ARG_WITHOUT_VALUE_VAR": "arg_without_value_value",
584
            "PYTHON_TESTS_VAR_WITHOUT_VALUE": "python_tests_var_without_value",
585
            "PYTHON_TESTS_OVERRIDE_WITH_VALUE_VAR": "python_tests_override_with_value_var",
586
        },
587
    )
588
    assert result.exit_code == 0
×
589

590

591
def test_pytest_addopts_test_extra_env(rule_runner: PythonRuleRunner) -> None:
3✔
592
    rule_runner.write_files(
×
593
        {
594
            f"{PACKAGE}/test_pytest_addopts_test_extra_env.py": dedent(
595
                """\
596
                import os
597

598
                def test_addopts():
599
                    assert "-vv" in os.getenv("PYTEST_ADDOPTS")
600
                    assert "--maxfail=2" in os.getenv("PYTEST_ADDOPTS")
601
                """
602
            ),
603
            f"{PACKAGE}/BUILD": dedent(
604
                """\
605
                python_tests()
606
                """
607
            ),
608
        }
609
    )
610
    tgt = rule_runner.get_target(
×
611
        Address(PACKAGE, relative_file_path="test_pytest_addopts_test_extra_env.py")
612
    )
613
    result = run_pytest(
×
614
        rule_runner,
615
        [tgt],
616
        extra_args=[
617
            "--test-extra-env-vars=['PYTEST_ADDOPTS=-vv --maxfail=2']",
618
        ],
619
    )
620
    assert result.exit_code == 0
×
621

622

623
def test_pytest_addopts_field_set_extra_env(rule_runner: PythonRuleRunner) -> None:
3✔
624
    rule_runner.write_files(
×
625
        {
626
            f"{PACKAGE}/test_pytest_addopts_field_set_extra_env.py": dedent(
627
                """\
628
                import os
629

630
                def test_addopts():
631
                    assert "-vv" not in os.getenv("PYTEST_ADDOPTS")
632
                    assert "--maxfail=2" not in os.getenv("PYTEST_ADDOPTS")
633
                    assert "-ra" in os.getenv("PYTEST_ADDOPTS")
634
                    assert "-q" in os.getenv("PYTEST_ADDOPTS")
635
                """
636
            ),
637
            f"{PACKAGE}/BUILD": dedent(
638
                """\
639
                python_tests(
640
                    extra_env_vars=(
641
                        "PYTEST_ADDOPTS=-ra -q",
642
                    )
643
                )
644
                """
645
            ),
646
        }
647
    )
648
    tgt = rule_runner.get_target(
×
649
        Address(PACKAGE, relative_file_path="test_pytest_addopts_field_set_extra_env.py")
650
    )
651
    result = run_pytest(
×
652
        rule_runner,
653
        [tgt],
654
        extra_args=[
655
            "--test-extra-env-vars=['PYTEST_ADDOPTS=-vv --maxfail=2']",  # should be overridden by `python_tests`
656
        ],
657
    )
658
    assert result.exit_code == 0
×
659

660

661
class UsedPlugin(PytestPluginSetupRequest):
3✔
662
    @classmethod
3✔
663
    def is_applicable(cls, target: Target) -> bool:
3✔
664
        return True
×
665

666

667
class UnusedPlugin(PytestPluginSetupRequest):
3✔
668
    @classmethod
3✔
669
    def is_applicable(cls, target: Target) -> bool:
3✔
670
        return False
×
671

672

673
@rule
3✔
674
async def used_plugin(_: UsedPlugin) -> PytestPluginSetup:
3✔
675
    digest = await create_digest(CreateDigest([FileContent("used.txt", b"")]))
×
676
    return PytestPluginSetup(digest=digest, extra_sys_path=("sys/path/used",))
×
677

678

679
@rule
3✔
680
async def unused_plugin(_: UnusedPlugin) -> PytestPluginSetup:
3✔
681
    digest = await create_digest(CreateDigest([FileContent("unused.txt", b"")]))
×
682
    return PytestPluginSetup(digest=digest, extra_sys_path=("sys/path/unused",))
×
683

684

685
def test_setup_plugins_and_runtime_package_dependency(rule_runner: PythonRuleRunner) -> None:
3✔
686
    # We test both the generic `PytestPluginSetup` mechanism and our `runtime_package_dependencies`
687
    # feature in the same test to confirm multiple plugins can be used on the same target.
688
    rule_runner = PythonRuleRunner(
×
689
        rules=[
690
            *rule_runner.rules,
691
            used_plugin,
692
            unused_plugin,
693
            UnionRule(PytestPluginSetupRequest, UsedPlugin),
694
            UnionRule(PytestPluginSetupRequest, UnusedPlugin),
695
        ],
696
        target_types=rule_runner.target_types,
697
    )
698
    rule_runner.write_files(
×
699
        {
700
            f"{PACKAGE}/say_hello.py": "print('Hello, test!')",
701
            f"{PACKAGE}/test_binary_call.py": dedent(
702
                f"""\
703
                import os.path
704
                import subprocess
705
                import sys
706

707
                def test_embedded_binary():
708
                    assert os.path.exists("bin.pex")
709
                    assert b"Hello, test!" in subprocess.check_output(args=['./bin.pex'])
710

711
                    # Ensure that we didn't accidentally pull in the binary's sources. This is a
712
                    # special type of dependency that should not be included with the rest of the
713
                    # normal dependencies.
714
                    assert not os.path.exists("{PACKAGE}/say_hello.py")
715

716
                def test_additional_plugins_digest():
717
                    assert os.path.exists("used.txt")
718
                    assert not os.path.exists("unused.txt")
719

720
                def test_additional_plugins_extra_sys_path():
721
                    assert "sys/path/used" in sys.path
722
                    assert "sys/path/unused" not in sys.path
723
                """
724
            ),
725
            f"{PACKAGE}/BUILD": dedent(
726
                """\
727
                python_sources(name='bin_lib', sources=['say_hello.py'])
728
                pex_binary(name='bin', entry_point='say_hello.py', output_path="bin.pex")
729
                python_tests(runtime_package_dependencies=[':bin'])
730
                """
731
            ),
732
        }
733
    )
734
    tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="test_binary_call.py"))
×
735
    result = run_pytest(rule_runner, [tgt])
×
736
    assert result.exit_code == 0, f"pytest test failed:\n{result.stdout_bytes.decode()}"
×
737

738

739
def test_local_dists(rule_runner: PythonRuleRunner) -> None:
3✔
740
    rule_runner.write_files(
×
741
        {
742
            f"{PACKAGE}/foo/bar.py": "BAR = 'LOCAL DIST'",
743
            f"{PACKAGE}/foo/setup.py": dedent(
744
                """\
745
                from setuptools import setup
746

747
                setup(name="foo", version="9.8.7", packages=["foo"], package_dir={"foo": "."},)
748
                """
749
            ),
750
            f"{PACKAGE}/foo/bar_test.py": dedent(
751
                """
752
                from foo.bar import BAR
753

754
                def test_bar():
755
                  assert BAR == "LOCAL DIST"
756
                """
757
            ),
758
            f"{PACKAGE}/foo/BUILD": dedent(
759
                """\
760
                python_sources(name="lib")
761

762
                python_distribution(
763
                    name="dist",
764
                    dependencies=[":lib"],
765
                    provides=python_artifact(name="foo", version="9.8.7"),
766
                    sdist=False,
767
                    generate_setup=False,
768
                )
769

770
                # Force-exclude any dep on bar.py, so the only way to consume it is via the dist.
771
                python_tests(name="tests", sources=["bar_test.py"],
772
                             dependencies=[":dist", "!!:lib"])
773
                """
774
            ),
775
        }
776
    )
777
    tgt = rule_runner.get_target(
×
778
        Address(os.path.join(PACKAGE, "foo"), target_name="tests", relative_file_path="bar_test.py")
779
    )
780
    result = run_pytest(rule_runner, [tgt])
×
781
    assert result.exit_code == 0
×
782

783

784
def test_skip_tests(rule_runner: PythonRuleRunner) -> None:
3✔
785
    rule_runner.write_files(
×
786
        {
787
            "test_skip_me.py": "",
788
            "test_foo.py": "",
789
            "BUILD": dedent(
790
                """\
791
                python_tests(name='t1', sources=['test_skip_me.py'], skip_tests=True)
792
                python_tests(name='t2', sources=['test_foo.py'])
793
                """
794
            ),
795
        }
796
    )
797

798
    def is_applicable(tgt_name: str, fp: str) -> bool:
×
799
        tgt = rule_runner.get_target(Address("", target_name=tgt_name, relative_file_path=fp))
×
800
        return PythonTestFieldSet.is_applicable(tgt)
×
801

802
    assert not is_applicable("t1", "test_skip_me.py")
×
803
    assert is_applicable("t2", "test_foo.py")
×
804

805

806
def test_debug_adaptor_request_argv(rule_runner: PythonRuleRunner) -> None:
3✔
807
    rule_runner.write_files(
×
808
        {
809
            f"{PACKAGE}/test_foo.py": "",
810
            f"{PACKAGE}/BUILD": dedent(
811
                """\
812
                python_tests(name='tests')
813
                """
814
            ),
815
        }
816
    )
817

818
    args = [
×
819
        "--backend-packages=pants.backend.python",
820
        f"--source-root-patterns={SOURCE_ROOT}",
821
    ]
822
    rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
×
823
    tgt = rule_runner.get_target(
×
824
        Address(PACKAGE, target_name="tests", relative_file_path="test_foo.py")
825
    )
826
    request = rule_runner.request(TestDebugAdapterRequest, [_get_pytest_batch(rule_runner, [tgt])])
×
827
    assert request.process is not None
×
828
    assert request.process.process.argv == (
×
829
        "./pytest_runner.pex_pex_shim.sh",
830
        "--listen",
831
        "127.0.0.1:5678",
832
        "-c",
833
        unittest.mock.ANY,
834
        "--color=yes",
835
        "tests/python/pants_test/test_foo.py",
836
    )
837

838

839
@pytest.mark.parametrize(
3✔
840
    "root_build_contents,package_build_contents,expected_partitions",
841
    (
842
        # No batching by default:
843
        [
844
            "",
845
            "python_tests()",
846
            [[f"{PACKAGE}/test_1.py"], [f"{PACKAGE}/test_2.py"], [f"{PACKAGE}/test_3.py"]],
847
        ],
848
        # Compatibility at the `python_tests` level:
849
        [
850
            "",
851
            "python_tests(batch_compatibility_tag='default')",
852
            [[f"{PACKAGE}/test_1.py", f"{PACKAGE}/test_2.py", f"{PACKAGE}/test_3.py"]],
853
        ],
854
        # Compatibility at a higher level via `__defaults__`:
855
        [
856
            "__defaults__(dict(python_tests=dict(batch_compatibility_tag='default')))",
857
            "python_tests()",
858
            [[f"{PACKAGE}/test_1.py", f"{PACKAGE}/test_2.py", f"{PACKAGE}/test_3.py"]],
859
        ],
860
        # Overriding compatibility from a higher __defaults__:
861
        [
862
            "__defaults__(dict(python_tests=dict(batch_compatibility_tag='default')))",
863
            "python_tests(overrides={'test_2.py': {'batch_compatibility_tag': 'other'}})",
864
            [[f"{PACKAGE}/test_1.py", f"{PACKAGE}/test_3.py"], [f"{PACKAGE}/test_2.py"]],
865
        ],
866
        # Partition on incompatible BUILD metadata:
867
        [
868
            "__defaults__(dict(python_tests=dict(batch_compatibility_tag='default', extra_env_vars=['HOME'])))",
869
            "python_tests(overrides={'test_2.py': {'extra_env_vars': []}})",
870
            [[f"{PACKAGE}/test_1.py", f"{PACKAGE}/test_3.py"], [f"{PACKAGE}/test_2.py"]],
871
        ],
872
        # Order of extra_env_vars shouldn't affect partitioning:
873
        [
874
            "__defaults__(dict(python_tests=dict(batch_compatibility_tag='default', extra_env_vars=['FOO', 'BAR'])))",
875
            "python_tests(overrides={'test_2.py': {'extra_env_vars': ['BAR', 'FOO']}})",
876
            [[f"{PACKAGE}/test_1.py", f"{PACKAGE}/test_2.py", f"{PACKAGE}/test_3.py"]],
877
        ],
878
        # Partition on different environments:
879
        [
880
            "__defaults__(dict(python_tests=dict(batch_compatibility_tag='default')))",
881
            "python_tests(overrides={'test_2.py': {'environment': 'remote'}})",
882
            [[f"{PACKAGE}/test_1.py", f"{PACKAGE}/test_3.py"], [f"{PACKAGE}/test_2.py"]],
883
        ],
884
    ),
885
)
886
def test_partition(
3✔
887
    rule_runner: PythonRuleRunner,
888
    root_build_contents: str,
889
    package_build_contents: str,
890
    expected_partitions: list[list[str]],
891
) -> None:
892
    _configure_pytest_runner(rule_runner)
×
893
    rule_runner.write_files(
×
894
        {
895
            "BUILD": root_build_contents,
896
            f"{PACKAGE}/test_1.py": dedent(
897
                """\
898
                def test():
899
                    assert 1 == 1
900
                """
901
            ),
902
            f"{PACKAGE}/test_2.py": dedent(
903
                """\
904
                def test():
905
                    assert 2 == 2
906
                """
907
            ),
908
            f"{PACKAGE}/test_3.py": dedent(
909
                """\
910
                def test():
911
                    assert 3 == 3
912
                """
913
            ),
914
            f"{PACKAGE}/BUILD": package_build_contents,
915
        }
916
    )
917

918
    field_sets = tuple(
×
919
        PythonTestFieldSet.create(rule_runner.get_target(Address(PACKAGE, relative_file_path=path)))
920
        for path in ("test_1.py", "test_2.py", "test_3.py")
921
    )
922

923
    partitions = rule_runner.request(
×
924
        Partitions[PythonTestFieldSet, TestMetadata], [PyTestRequest.PartitionRequest(field_sets)]
925
    )
926
    sorted_partitions = sorted(
×
927
        sorted(field_set.address.spec for field_set in partition.elements)
928
        for partition in partitions
929
    )
930

931
    assert sorted_partitions == expected_partitions
×
932

933

934
@pytest.mark.platform_specific_behavior
3✔
935
@pytest.mark.parametrize(
3✔
936
    "major_minor_interpreter",
937
    all_major_minor_python_versions(["CPython>=3.9,<4"]),
938
)
939
def test_batched_passing(rule_runner: PythonRuleRunner, major_minor_interpreter: str) -> None:
3✔
940
    rule_runner.write_files(
3✔
941
        {
942
            f"{PACKAGE}/test_1.py": GOOD_TEST,
943
            f"{PACKAGE}/test_2.py": GOOD_TEST,
944
            f"{PACKAGE}/BUILD": "python_tests(batch_compatibility_tag='default')",
945
        }
946
    )
947
    targets = tuple(
3✔
948
        rule_runner.get_target(Address(PACKAGE, relative_file_path=path))
949
        for path in ("test_1.py", "test_2.py")
950
    )
951
    result = run_pytest(
3✔
952
        rule_runner,
953
        targets,
954
        extra_args=[f"--python-interpreter-constraints=['=={major_minor_interpreter}.*']"],
955
    )
956
    assert result.xml_results is not None
3✔
957
    assert result.exit_code == 0
3✔
958
    stdout_text = result.stdout_simplified_str
3✔
959
    assert f"{PACKAGE}/test_1.py ." in stdout_text
3✔
960
    assert f"{PACKAGE}/test_2.py ." in stdout_text
3✔
961

962

963
def test_batched_failing(rule_runner: PythonRuleRunner) -> None:
3✔
964
    rule_runner.write_files(
×
965
        {
966
            f"{PACKAGE}/test_1.py": GOOD_TEST,
967
            f"{PACKAGE}/test_2.py": dedent(
968
                """\
969
                def test():
970
                    assert False
971
                """
972
            ),
973
            f"{PACKAGE}/BUILD": "python_tests(batch_compatibility_tag='default')",
974
        }
975
    )
976
    targets = tuple(
×
977
        rule_runner.get_target(Address(PACKAGE, relative_file_path=path))
978
        for path in ("test_1.py", "test_2.py")
979
    )
980
    result = run_pytest(rule_runner, targets)
×
981
    assert result.exit_code == 1
×
982
    stdout_text = result.stdout_simplified_str
×
983
    assert f"{PACKAGE}/test_1.py ." in stdout_text
×
984
    assert f"{PACKAGE}/test_2.py F" in stdout_text
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc