• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 24055979590

06 Apr 2026 11:17PM UTC coverage: 52.37% (-40.5%) from 92.908%
24055979590

Pull #23225

github

web-flow
Merge 67474653c into 542ca048d
Pull Request #23225: Add --test-show-all-batch-targets to expose all targets in batched pytest

6 of 17 new or added lines in 2 files covered. (35.29%)

23030 existing lines in 605 files now uncovered.

31643 of 60422 relevant lines covered (52.37%)

1.05 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

69.43
/src/python/pants/core/goals/fix.py
1
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
2✔
5

6
import itertools
2✔
7
import logging
2✔
8
from collections import defaultdict
2✔
9
from collections.abc import Callable, Coroutine, Iterable, Iterator, Sequence
2✔
10
from dataclasses import dataclass
2✔
11
from typing import Any, ClassVar, NamedTuple, Protocol, TypeVar
2✔
12

13
from pants.base.specs import Specs
2✔
14
from pants.core.goals.lint import (
2✔
15
    AbstractLintRequest,
16
    LintFilesRequest,
17
    LintResult,
18
    LintTargetsRequest,
19
    _MultiToolGoalSubsystem,
20
    get_partitions_by_request_type,
21
)
22
from pants.core.goals.multi_tool_goal_helper import BatchSizeOption, OnlyOption
2✔
23
from pants.core.util_rules.partitions import PartitionerType, PartitionMetadataT
2✔
24
from pants.core.util_rules.partitions import Partitions as UntypedPartitions
2✔
25
from pants.engine.collection import Collection
2✔
26
from pants.engine.console import Console
2✔
27
from pants.engine.engine_aware import EngineAwareReturnType
2✔
28
from pants.engine.environment import EnvironmentName
2✔
29
from pants.engine.fs import MergeDigests, PathGlobs, Snapshot, SnapshotDiff, Workspace
2✔
30
from pants.engine.goal import Goal, GoalSubsystem
2✔
31
from pants.engine.intrinsics import digest_to_snapshot, merge_digests
2✔
32
from pants.engine.process import FallibleProcessResult, ProcessResult
2✔
33
from pants.engine.rules import collect_rules, concurrently, goal_rule, implicitly, rule
2✔
34
from pants.engine.unions import UnionMembership, UnionRule, distinct_union_type_per_subclass, union
2✔
35
from pants.option.option_types import BoolOption
2✔
36
from pants.util.collections import partition_sequentially
2✔
37
from pants.util.docutil import bin_name, doc_url
2✔
38
from pants.util.logging import LogLevel
2✔
39
from pants.util.ordered_set import FrozenOrderedSet
2✔
40
from pants.util.strutil import Simplifier, softwrap
2✔
41

42
logger = logging.getLogger(__name__)
2✔
43

44

45
@dataclass(frozen=True)
2✔
46
class FixResult(EngineAwareReturnType):
2✔
47
    input: Snapshot
2✔
48
    output: Snapshot
2✔
49
    stdout: str
2✔
50
    stderr: str
2✔
51
    tool_name: str
2✔
52

53
    @staticmethod
2✔
54
    async def create(
2✔
55
        request: AbstractFixRequest.Batch,
56
        process_result: ProcessResult | FallibleProcessResult,
57
        *,
58
        output_simplifier: Simplifier = Simplifier(),
59
    ) -> FixResult:
60
        return FixResult(
2✔
61
            input=request.snapshot,
62
            output=await digest_to_snapshot(process_result.output_digest),
63
            stdout=output_simplifier.simplify(process_result.stdout),
64
            stderr=output_simplifier.simplify(process_result.stderr),
65
            tool_name=request.tool_name,
66
        )
67

68
    def __post_init__(self):
2✔
69
        # NB: We debug log stdout/stderr because `message` doesn't log it.
70
        log = f"Output from {self.tool_name}"
2✔
71
        if self.stdout:
2✔
72
            log += f"\n{self.stdout}"
2✔
73
        if self.stderr:
2✔
74
            log += f"\n{self.stderr}"
2✔
75
        logger.debug(log)
2✔
76

77
    @property
2✔
78
    def did_change(self) -> bool:
2✔
79
        return self.output != self.input
2✔
80

81
    def level(self) -> LogLevel | None:
2✔
82
        return LogLevel.WARN if self.did_change else LogLevel.INFO
2✔
83

84
    def message(self) -> str | None:
2✔
85
        message = "made changes." if self.did_change else "made no changes."
2✔
86

87
        # NB: Instead of printing out `stdout` and `stderr`, we just print a list of files which
88
        # were changed/added/removed. We do this for two reasons:
89
        #   1. This is run as part of both `fmt`/`fix` and `lint`, and we want consistent output between both
90
        #   2. Different tools have different stdout/stderr. This way is consistent across all tools.
91
        if self.did_change:
2✔
92
            snapshot_diff = SnapshotDiff.from_snapshots(self.input, self.output)
2✔
93
            output = "".join(
2✔
94
                f"\n  {file}"
95
                for file in itertools.chain(
96
                    snapshot_diff.changed_files,
97
                    snapshot_diff.their_unique_files,  # added files
98
                    snapshot_diff.our_unique_files,  # removed files
99
                    # NB: there is no rename detection, so a rename will list
100
                    # both the old filename (removed) and the new filename (added).
101
                )
102
            )
103
        else:
104
            output = ""
2✔
105

106
        return f"{self.tool_name} {message}{output}"
2✔
107

108
    def cacheable(self) -> bool:
2✔
109
        """Is marked uncacheable to ensure that it always renders."""
110
        return False
2✔
111

112

113
Partitions = UntypedPartitions[str, PartitionMetadataT]
2✔
114

115

116
@union
2✔
117
class AbstractFixRequest(AbstractLintRequest):
2✔
118
    is_fixer = True
2✔
119

120
    # Enable support for re-using this request's rule in `lint`, where the success/failure of the linter corresponds to
121
    # whether the rule's output matches the input (i.e. whether the tool made changes or not).
122
    #
123
    # If you set this to `False`, you'll need to provide the following `UnionRule` with a custom class,
124
    # as well as their corresponding implementation rules:
125
    #   - `UnionRule(AbstractLintRequest, cls)`
126
    #   - `UnionRule(AbstractLintRequest.Batch, cls)`
127
    #
128
    # !!! Setting this to `False` should be exceedingly rare, as the default implementation handles two important things:
129
    #   - Re-use of the exact same process in `fix` as in `lint`, so runs like `pants fix lint` use
130
    #     cached/memoized results in `lint`. This pattern is commonly used by developers locally.
131
    #   - Ensuring that `pants lint` is checking that the file(s) are actually fixed. It's easy to forget to provide the
132
    #     `lint` implementation (which is used usually in CI, as opposed to `fix`), which allows files to be merged
133
    #     into the default branch un-fixed. (Fun fact, this happened in the Pants codebase before this inheritance existed
134
    #     and was the catalysts for this design).
135
    # The case for disabling this is when the `fix` implementation fixes a strict subset of some `lint` implementation, where
136
    # the check for is-this-fixed in the `lint` implementation isn't possible.
137
    # As an example, let's say tool `cruft` has `cruft lint` which lints for A, B and C. It also has `cruft lint --fix` which fixes A.
138
    # Tthere's no way to not check for `A` in `cruft lint`. Since you're already going to provide a `lint` implementation
139
    # which corresponds to `cruft lint`, there's no point in running `cruft check --fix` in `lint` as it's already covered by
140
    # `cruft lint`.
141
    enable_lint_rules: ClassVar[bool] = True
2✔
142

143
    @distinct_union_type_per_subclass(in_scope_types=[EnvironmentName])
2✔
144
    @dataclass(frozen=True)
2✔
145
    class Batch(AbstractLintRequest.Batch):
2✔
146
        snapshot: Snapshot
2✔
147

148
        @property
2✔
149
        def files(self) -> tuple[str, ...]:
2✔
150
            return tuple(FrozenOrderedSet(self.elements))
2✔
151

152
    @classmethod
2✔
153
    def _get_rules(cls) -> Iterable[UnionRule]:
2✔
154
        if cls.enable_lint_rules:
2✔
155
            yield from super()._get_rules()
2✔
156
        yield UnionRule(AbstractFixRequest, cls)
2✔
157
        yield UnionRule(AbstractFixRequest.Batch, cls.Batch)
2✔
158

159

160
class FixTargetsRequest(AbstractFixRequest, LintTargetsRequest):
2✔
161
    @classmethod
2✔
162
    def _get_rules(cls) -> Iterable:
2✔
163
        yield from cls.partitioner_type.default_rules(cls, by_file=True)
2✔
164
        yield from (
2✔
165
            rule
166
            for rule in super()._get_rules()
167
            # NB: We don't want to yield `lint.py`'s default partitioner
168
            if isinstance(rule, UnionRule)
169
        )
170
        yield UnionRule(FixTargetsRequest.PartitionRequest, cls.PartitionRequest)
2✔
171

172

173
class FixFilesRequest(AbstractFixRequest, LintFilesRequest):
2✔
174
    @classmethod
2✔
175
    def _get_rules(cls) -> Iterable:
2✔
176
        if cls.partitioner_type is not PartitionerType.CUSTOM:
2✔
177
            raise ValueError(
×
178
                "Pants does not provide default partitioners for `FixFilesRequest`."
179
                + " You will need to provide your own partitioner rule."
180
            )
181

182
        yield from super()._get_rules()
2✔
183
        yield UnionRule(FixFilesRequest.PartitionRequest, cls.PartitionRequest)
2✔
184

185

186
class _FixBatchElement(NamedTuple):
2✔
187
    request_type: type[AbstractFixRequest.Batch]
2✔
188
    tool_name: str
2✔
189
    files: tuple[str, ...]
2✔
190
    key: Any
2✔
191

192

193
class _FixBatchRequest(Collection[_FixBatchElement]):
2✔
194
    """Request to sequentially fix all the elements in the given batch."""
195

196

197
@dataclass(frozen=True)
2✔
198
class _FixBatchResult:
2✔
199
    results: tuple[FixResult, ...]
2✔
200

201
    @property
2✔
202
    def did_change(self) -> bool:
2✔
UNCOV
203
        return any(result.did_change for result in self.results)
×
204

205

206
class FixSubsystem(GoalSubsystem):
2✔
207
    name = "fix"
2✔
208
    help = softwrap(
2✔
209
        f"""
210
        Autofix source code.
211

212
        This goal runs tools that make 'semantic' changes to source code, where the meaning of the
213
        code may change.
214

215
        See also:
216

217
        - [The `fmt` goal]({doc_url("reference/goals/fix")} will run code-editing tools that may make only
218
          syntactic changes, not semantic ones. The `fix` includes running these `fmt` tools by
219
          default (see [the `skip_formatters` option](#skip_formatters) to control this).
220

221
        - [The `lint` goal]({doc_url("reference/goals/lint")}) will validate code is formatted, by running these
222
          fixers and checking there's no change.
223

224
        - Documentation about formatters for various ecosystems, such as:
225
          [Python]({doc_url("docs/python/overview/linters-and-formatters")}), [JVM]({doc_url("jvm/java-and-scala#lint-and-format")}),
226
          [SQL]({doc_url("docs/sql#enable-sqlfluff-linter")})
227
        """
228
    )
229

230
    @classmethod
2✔
231
    def activated(cls, union_membership: UnionMembership) -> bool:
2✔
232
        return AbstractFixRequest in union_membership
×
233

234
    only = OnlyOption("fixer", "autoflake", "pyupgrade")
2✔
235
    skip_formatters = BoolOption(
2✔
236
        default=False,
237
        help=softwrap(
238
            f"""
239
            If true, skip running all formatters.
240

241
            FYI: when running `{bin_name()} fix fmt ::`, there should be diminishing performance
242
            benefit to using this flag. Pants attempts to reuse the results from `fmt` when running
243
            `fix` where possible.
244
            """
245
        ),
246
    )
247
    batch_size = BatchSizeOption(uppercase="Fixer", lowercase="fixer")
2✔
248

249

250
class Fix(Goal):
2✔
251
    subsystem_cls = FixSubsystem
2✔
252
    environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
2✔
253

254

255
async def _write_files(workspace: Workspace, batched_results: Iterable[_FixBatchResult]):
2✔
UNCOV
256
    if any(batched_result.did_change for batched_result in batched_results):
×
257
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
258
        # than silently having one result override the other. In practice, this should never
259
        # happen due to us grouping each file's tools into a single digest.
UNCOV
260
        merged_digest = await merge_digests(
×
261
            MergeDigests(
262
                batched_result.results[-1].output.digest for batched_result in batched_results
263
            )
264
        )
UNCOV
265
        workspace.write_digest(merged_digest)
×
266

267

268
def _print_results(
2✔
269
    console: Console,
270
    results: Iterable[FixResult],
271
):
UNCOV
272
    if results:
×
UNCOV
273
        console.print_stderr("")
×
274

275
    # We group all results for the same tool so that we can give one final status in the
276
    # summary. This is only relevant if there were multiple results because of
277
    # `--per-file-caching`.
UNCOV
278
    tool_to_results = defaultdict(set)
×
UNCOV
279
    for result in results:
×
UNCOV
280
        tool_to_results[result.tool_name].add(result)
×
281

UNCOV
282
    for tool, results in sorted(tool_to_results.items()):
×
UNCOV
283
        if any(result.did_change for result in results):
×
UNCOV
284
            sigil = console.sigil_succeeded_with_edits()
×
UNCOV
285
            status = "made changes"
×
286
        else:
UNCOV
287
            sigil = console.sigil_succeeded()
×
UNCOV
288
            status = "made no changes"
×
UNCOV
289
        console.print_stderr(f"{sigil} {tool} {status}.")
×
290

291

292
_CoreRequestType = TypeVar("_CoreRequestType", bound=AbstractFixRequest)
2✔
293
_TargetPartitioner = TypeVar("_TargetPartitioner", bound=FixTargetsRequest.PartitionRequest)
2✔
294
_FilePartitioner = TypeVar("_FilePartitioner", bound=FixFilesRequest.PartitionRequest)
2✔
295
_GoalT = TypeVar("_GoalT", bound=Goal)
2✔
296

297

298
class _BatchableMultiToolGoalSubsystem(_MultiToolGoalSubsystem, Protocol):
2✔
299
    batch_size: BatchSizeOption
2✔
300

301

302
@rule(polymorphic=True)
2✔
303
async def fix_batch(batch: AbstractFixRequest.Batch) -> FixResult:
2✔
304
    raise NotImplementedError()
×
305

306

307
@rule
2✔
308
async def fix_batch_sequential(
2✔
309
    request: _FixBatchRequest,
310
) -> _FixBatchResult:
UNCOV
311
    current_snapshot = await digest_to_snapshot(
×
312
        **implicitly({PathGlobs(request[0].files): PathGlobs})
313
    )
314

UNCOV
315
    results = []
×
UNCOV
316
    for request_type, tool_name, files, key in request:
×
UNCOV
317
        batch = request_type(tool_name, files, key, current_snapshot)
×
UNCOV
318
        result = await fix_batch(**implicitly({batch: AbstractFixRequest.Batch}))
×
UNCOV
319
        results.append(result)
×
320

UNCOV
321
        assert set(result.output.files) == set(batch.files), (
×
322
            f"Expected {result.output.files} to match {batch.files}"
323
        )
UNCOV
324
        current_snapshot = result.output
×
UNCOV
325
    return _FixBatchResult(tuple(results))
×
326

327

328
async def _do_fix(
2✔
329
    core_request_types: Iterable[type[_CoreRequestType]],
330
    target_partitioners: Iterable[type[_TargetPartitioner]],
331
    file_partitioners: Iterable[type[_FilePartitioner]],
332
    goal_cls: type[_GoalT],
333
    subsystem: _BatchableMultiToolGoalSubsystem,
334
    specs: Specs,
335
    workspace: Workspace,
336
    console: Console,
337
    make_targets_partition_request_get: Callable[
338
        [_TargetPartitioner], Coroutine[Any, Any, Partitions]
339
    ],
340
    make_files_partition_request_get: Callable[[_FilePartitioner], Coroutine[Any, Any, Partitions]],
341
) -> _GoalT:
UNCOV
342
    partitions_by_request_type = await get_partitions_by_request_type(
×
343
        core_request_types,
344
        target_partitioners,
345
        file_partitioners,
346
        subsystem,
347
        specs,
348
        make_targets_partition_request_get,
349
        make_files_partition_request_get,
350
    )
351

UNCOV
352
    if not partitions_by_request_type:
×
UNCOV
353
        return goal_cls(exit_code=0)
×
354

UNCOV
355
    def batch_by_size(files: Iterable[str]) -> Iterator[tuple[str, ...]]:
×
UNCOV
356
        batches = partition_sequentially(
×
357
            files,
358
            key=lambda x: str(x),
359
            size_target=subsystem.batch_size,  # type: ignore[arg-type]
360
            size_max=4 * subsystem.batch_size,  # type: ignore[operator]
361
        )
UNCOV
362
        for batch in batches:
×
UNCOV
363
            yield tuple(batch)
×
364

UNCOV
365
    def _make_disjoint_batch_requests() -> Iterable[_FixBatchRequest]:
×
366
        partition_infos: Iterable[tuple[type[AbstractFixRequest], Any]]
367
        files: Sequence[str]
368

UNCOV
369
        partition_infos_by_files = defaultdict(list)
×
UNCOV
370
        for request_type, partitions_list in partitions_by_request_type.items():
×
UNCOV
371
            for partitions in partitions_list:
×
UNCOV
372
                for partition in partitions:
×
UNCOV
373
                    for file in partition.elements:
×
UNCOV
374
                        partition_infos_by_files[file].append((request_type, partition.metadata))
×
375

UNCOV
376
        files_by_partition_info = defaultdict(list)
×
UNCOV
377
        for file, partition_infos in partition_infos_by_files.items():
×
UNCOV
378
            deduped_partition_infos = FrozenOrderedSet(partition_infos)
×
UNCOV
379
            files_by_partition_info[deduped_partition_infos].append(file)
×
380

UNCOV
381
        for partition_infos, files in files_by_partition_info.items():
×
UNCOV
382
            for batch in batch_by_size(files):
×
UNCOV
383
                yield _FixBatchRequest(
×
384
                    _FixBatchElement(
385
                        request_type.Batch,
386
                        request_type.tool_name,
387
                        batch,
388
                        partition_metadata,
389
                    )
390
                    for request_type, partition_metadata in partition_infos
391
                )
392

UNCOV
393
    all_results = await concurrently(
×
394
        fix_batch_sequential(request) for request in _make_disjoint_batch_requests()
395
    )
396

UNCOV
397
    individual_results = list(
×
398
        itertools.chain.from_iterable(result.results for result in all_results)
399
    )
400

UNCOV
401
    await _write_files(workspace, all_results)
×
UNCOV
402
    _print_results(console, individual_results)
×
403

404
    # Since the rules to produce FixResult should use ProcessResult, rather than
405
    # FallibleProcessResult, we assume that there were no failures.
UNCOV
406
    return goal_cls(exit_code=0)
×
407

408

409
@rule(polymorphic=True)
2✔
410
async def partition_targets(req: FixTargetsRequest.PartitionRequest) -> Partitions:
2✔
411
    raise NotImplementedError()
×
412

413

414
@rule(polymorphic=True)
2✔
415
async def partition_files(req: FixFilesRequest.PartitionRequest) -> Partitions:
2✔
416
    raise NotImplementedError()
×
417

418

419
@goal_rule
2✔
420
async def fix(
2✔
421
    console: Console,
422
    specs: Specs,
423
    fix_subsystem: FixSubsystem,
424
    workspace: Workspace,
425
    union_membership: UnionMembership,
426
) -> Fix:
UNCOV
427
    return await _do_fix(
×
428
        sorted(
429
            (
430
                request_type
431
                for request_type in union_membership.get(AbstractFixRequest)
432
                if not (request_type.is_formatter and fix_subsystem.skip_formatters)
433
            ),
434
            # NB: We sort the core request types so that fixers are first. This is to ensure that, between
435
            # fixers and formatters, re-running isn't necessary due to tool conflicts (re-running may
436
            # still be necessary within formatters). This is because fixers are expected to modify
437
            # code irrespective of formatting, and formatters aren't expected to be modifying the code
438
            # in a way that needs to be fixed.
439
            key=lambda request_type: request_type.is_fixer,
440
            reverse=True,
441
        ),
442
        union_membership.get(FixTargetsRequest.PartitionRequest),
443
        union_membership.get(FixFilesRequest.PartitionRequest),
444
        Fix,
445
        fix_subsystem,  # type: ignore[arg-type]
446
        specs,
447
        workspace,
448
        console,
449
        lambda request_type: partition_targets(
450
            **implicitly({request_type: FixTargetsRequest.PartitionRequest})
451
        ),
452
        lambda request_type: partition_files(
453
            **implicitly({request_type: FixFilesRequest.PartitionRequest})
454
        ),
455
    )
456

457

458
@rule(level=LogLevel.DEBUG)
2✔
459
async def convert_fix_result_to_lint_result(fix_result: FixResult) -> LintResult:
2✔
UNCOV
460
    return LintResult(
×
461
        1 if fix_result.did_change else 0,
462
        fix_result.stdout,
463
        fix_result.stderr,
464
        linter_name=fix_result.tool_name,
465
        _render_message=False,  # Don't re-render the message
466
    )
467

468

469
def rules():
2✔
UNCOV
470
    return collect_rules()
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc