• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pantsbuild / pants / 22507851448

27 Feb 2026 11:28PM UTC coverage: 92.928% (-0.007%) from 92.935%
22507851448

push

github

web-flow
silence new HdrHistogram induced deprecation warning on Python 3.14 (#23144)

No more:
```
/usr/lib/python3.14/ctypes/_endian.py:33: DeprecationWarning: Due to '_pack_', the
'ExternalHeader' Structure will use memory layout compatible with MSVC (Windows). If this is intended, set _layout_ to 'ms'. The
 implicit default is deprecated and slated to become an error in Python 3.19.
  super().__setattr__(attrname, value)
/usr/lib/python3.14/ctypes/_endian.py:33: DeprecationWarning: Due to '_pack_', the 'PayloadHeader' Structure will use memory
layout compatible with MSVC (Windows). If this is intended, set _layout_ to 'ms'. The implicit default is deprecated and slated
to become an error in Python 3.19.
  super().__setattr__(attrname, value)
```

0 of 1 new or added line in 1 file covered. (0.0%)

71 existing lines in 12 files now uncovered.

90912 of 97831 relevant lines covered (92.93%)

4.06 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.41
/src/python/pants/core/goals/fix.py
1
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
2
# Licensed under the Apache License, Version 2.0 (see LICENSE).
3

4
from __future__ import annotations
12✔
5

6
import itertools
12✔
7
import logging
12✔
8
from collections import defaultdict
12✔
9
from collections.abc import Callable, Coroutine, Iterable, Iterator, Sequence
12✔
10
from dataclasses import dataclass
12✔
11
from typing import Any, ClassVar, NamedTuple, Protocol, TypeVar
12✔
12

13
from pants.base.specs import Specs
12✔
14
from pants.core.goals.lint import (
12✔
15
    AbstractLintRequest,
16
    LintFilesRequest,
17
    LintResult,
18
    LintTargetsRequest,
19
    _MultiToolGoalSubsystem,
20
    get_partitions_by_request_type,
21
)
22
from pants.core.goals.multi_tool_goal_helper import BatchSizeOption, OnlyOption
12✔
23
from pants.core.util_rules.partitions import PartitionerType, PartitionMetadataT
12✔
24
from pants.core.util_rules.partitions import Partitions as UntypedPartitions
12✔
25
from pants.engine.collection import Collection
12✔
26
from pants.engine.console import Console
12✔
27
from pants.engine.engine_aware import EngineAwareReturnType
12✔
28
from pants.engine.environment import EnvironmentName
12✔
29
from pants.engine.fs import MergeDigests, PathGlobs, Snapshot, SnapshotDiff, Workspace
12✔
30
from pants.engine.goal import Goal, GoalSubsystem
12✔
31
from pants.engine.intrinsics import digest_to_snapshot, merge_digests
12✔
32
from pants.engine.process import FallibleProcessResult, ProcessResult
12✔
33
from pants.engine.rules import collect_rules, concurrently, goal_rule, implicitly, rule
12✔
34
from pants.engine.unions import UnionMembership, UnionRule, distinct_union_type_per_subclass, union
12✔
35
from pants.option.option_types import BoolOption
12✔
36
from pants.util.collections import partition_sequentially
12✔
37
from pants.util.docutil import bin_name, doc_url
12✔
38
from pants.util.logging import LogLevel
12✔
39
from pants.util.ordered_set import FrozenOrderedSet
12✔
40
from pants.util.strutil import Simplifier, softwrap
12✔
41

42
logger = logging.getLogger(__name__)
12✔
43

44

45
@dataclass(frozen=True)
12✔
46
class FixResult(EngineAwareReturnType):
12✔
47
    input: Snapshot
12✔
48
    output: Snapshot
12✔
49
    stdout: str
12✔
50
    stderr: str
12✔
51
    tool_name: str
12✔
52

53
    @staticmethod
12✔
54
    async def create(
12✔
55
        request: AbstractFixRequest.Batch,
56
        process_result: ProcessResult | FallibleProcessResult,
57
        *,
58
        output_simplifier: Simplifier = Simplifier(),
59
    ) -> FixResult:
60
        return FixResult(
11✔
61
            input=request.snapshot,
62
            output=await digest_to_snapshot(process_result.output_digest),
63
            stdout=output_simplifier.simplify(process_result.stdout),
64
            stderr=output_simplifier.simplify(process_result.stderr),
65
            tool_name=request.tool_name,
66
        )
67

68
    def __post_init__(self):
12✔
69
        # NB: We debug log stdout/stderr because `message` doesn't log it.
70
        log = f"Output from {self.tool_name}"
12✔
71
        if self.stdout:
12✔
72
            log += f"\n{self.stdout}"
9✔
73
        if self.stderr:
12✔
74
            log += f"\n{self.stderr}"
9✔
75
        logger.debug(log)
12✔
76

77
    @property
12✔
78
    def did_change(self) -> bool:
12✔
79
        return self.output != self.input
12✔
80

81
    def level(self) -> LogLevel | None:
12✔
82
        return LogLevel.WARN if self.did_change else LogLevel.INFO
12✔
83

84
    def message(self) -> str | None:
12✔
85
        message = "made changes." if self.did_change else "made no changes."
12✔
86

87
        # NB: Instead of printing out `stdout` and `stderr`, we just print a list of files which
88
        # were changed/added/removed. We do this for two reasons:
89
        #   1. This is run as part of both `fmt`/`fix` and `lint`, and we want consistent output between both
90
        #   2. Different tools have different stdout/stderr. This way is consistent across all tools.
91
        if self.did_change:
12✔
92
            snapshot_diff = SnapshotDiff.from_snapshots(self.input, self.output)
12✔
93
            output = "".join(
12✔
94
                f"\n  {file}"
95
                for file in itertools.chain(
96
                    snapshot_diff.changed_files,
97
                    snapshot_diff.their_unique_files,  # added files
98
                    snapshot_diff.our_unique_files,  # removed files
99
                    # NB: there is no rename detection, so a rename will list
100
                    # both the old filename (removed) and the new filename (added).
101
                )
102
            )
103
        else:
104
            output = ""
12✔
105

106
        return f"{self.tool_name} {message}{output}"
12✔
107

108
    def cacheable(self) -> bool:
12✔
109
        """Is marked uncacheable to ensure that it always renders."""
110
        return False
12✔
111

112

113
Partitions = UntypedPartitions[str, PartitionMetadataT]
12✔
114

115

116
@union
12✔
117
class AbstractFixRequest(AbstractLintRequest):
12✔
118
    is_fixer = True
12✔
119

120
    # Enable support for re-using this request's rule in `lint`, where the success/failure of the linter corresponds to
121
    # whether the rule's output matches the input (i.e. whether the tool made changes or not).
122
    #
123
    # If you set this to `False`, you'll need to provide the following `UnionRule` with a custom class,
124
    # as well as their corresponding implementation rules:
125
    #   - `UnionRule(AbstractLintRequest, cls)`
126
    #   - `UnionRule(AbstractLintRequest.Batch, cls)`
127
    #
128
    # !!! Setting this to `False` should be exceedingly rare, as the default implementation handles two important things:
129
    #   - Re-use of the exact same process in `fix` as in `lint`, so runs like `pants fix lint` use
130
    #     cached/memoized results in `lint`. This pattern is commonly used by developers locally.
131
    #   - Ensuring that `pants lint` is checking that the file(s) are actually fixed. It's easy to forget to provide the
132
    #     `lint` implementation (which is used usually in CI, as opposed to `fix`), which allows files to be merged
133
    #     into the default branch un-fixed. (Fun fact, this happened in the Pants codebase before this inheritance existed
134
    #     and was the catalysts for this design).
135
    # The case for disabling this is when the `fix` implementation fixes a strict subset of some `lint` implementation, where
136
    # the check for is-this-fixed in the `lint` implementation isn't possible.
137
    # As an example, let's say tool `cruft` has `cruft lint` which lints for A, B and C. It also has `cruft lint --fix` which fixes A.
138
    # Tthere's no way to not check for `A` in `cruft lint`. Since you're already going to provide a `lint` implementation
139
    # which corresponds to `cruft lint`, there's no point in running `cruft check --fix` in `lint` as it's already covered by
140
    # `cruft lint`.
141
    enable_lint_rules: ClassVar[bool] = True
12✔
142

143
    @distinct_union_type_per_subclass(in_scope_types=[EnvironmentName])
12✔
144
    @dataclass(frozen=True)
12✔
145
    class Batch(AbstractLintRequest.Batch):
12✔
146
        snapshot: Snapshot
12✔
147

148
        @property
12✔
149
        def files(self) -> tuple[str, ...]:
12✔
150
            return tuple(FrozenOrderedSet(self.elements))
12✔
151

152
    @classmethod
12✔
153
    def _get_rules(cls) -> Iterable[UnionRule]:
12✔
154
        if cls.enable_lint_rules:
12✔
155
            yield from super()._get_rules()
12✔
156
        yield UnionRule(AbstractFixRequest, cls)
12✔
157
        yield UnionRule(AbstractFixRequest.Batch, cls.Batch)
12✔
158

159

160
class FixTargetsRequest(AbstractFixRequest, LintTargetsRequest):
12✔
161
    @classmethod
12✔
162
    def _get_rules(cls) -> Iterable:
12✔
163
        yield from cls.partitioner_type.default_rules(cls, by_file=True)
12✔
164
        yield from (
12✔
165
            rule
166
            for rule in super()._get_rules()
167
            # NB: We don't want to yield `lint.py`'s default partitioner
168
            if isinstance(rule, UnionRule)
169
        )
170
        yield UnionRule(FixTargetsRequest.PartitionRequest, cls.PartitionRequest)
12✔
171

172

173
class FixFilesRequest(AbstractFixRequest, LintFilesRequest):
12✔
174
    @classmethod
12✔
175
    def _get_rules(cls) -> Iterable:
12✔
176
        if cls.partitioner_type is not PartitionerType.CUSTOM:
8✔
177
            raise ValueError(
×
178
                "Pants does not provide default partitioners for `FixFilesRequest`."
179
                + " You will need to provide your own partitioner rule."
180
            )
181

182
        yield from super()._get_rules()
8✔
183
        yield UnionRule(FixFilesRequest.PartitionRequest, cls.PartitionRequest)
8✔
184

185

186
class _FixBatchElement(NamedTuple):
12✔
187
    request_type: type[AbstractFixRequest.Batch]
12✔
188
    tool_name: str
12✔
189
    files: tuple[str, ...]
12✔
190
    key: Any
12✔
191

192

193
class _FixBatchRequest(Collection[_FixBatchElement]):
12✔
194
    """Request to sequentially fix all the elements in the given batch."""
195

196

197
@dataclass(frozen=True)
12✔
198
class _FixBatchResult:
12✔
199
    results: tuple[FixResult, ...]
12✔
200

201
    @property
12✔
202
    def did_change(self) -> bool:
12✔
203
        return any(result.did_change for result in self.results)
2✔
204

205

206
class FixSubsystem(GoalSubsystem):
12✔
207
    name = "fix"
12✔
208
    help = softwrap(
12✔
209
        f"""
210
        Autofix source code.
211

212
        This goal runs tools that make 'semantic' changes to source code, where the meaning of the
213
        code may change.
214

215
        See also:
216

217
        - [The `fmt` goal]({doc_url("reference/goals/fix")} will run code-editing tools that may make only
218
          syntactic changes, not semantic ones. The `fix` includes running these `fmt` tools by
219
          default (see [the `skip_formatters` option](#skip_formatters) to control this).
220

221
        - [The `lint` goal]({doc_url("reference/goals/lint")}) will validate code is formatted, by running these
222
          fixers and checking there's no change.
223

224
        - Documentation about formatters for various ecosystems, such as:
225
          [Python]({doc_url("docs/python/overview/linters-and-formatters")}), [JVM]({doc_url("jvm/java-and-scala#lint-and-format")}),
226
          [SQL]({doc_url("docs/sql#enable-sqlfluff-linter")})
227
        """
228
    )
229

230
    @classmethod
12✔
231
    def activated(cls, union_membership: UnionMembership) -> bool:
12✔
232
        return AbstractFixRequest in union_membership
×
233

234
    only = OnlyOption("fixer", "autoflake", "pyupgrade")
12✔
235
    skip_formatters = BoolOption(
12✔
236
        default=False,
237
        help=softwrap(
238
            f"""
239
            If true, skip running all formatters.
240

241
            FYI: when running `{bin_name()} fix fmt ::`, there should be diminishing performance
242
            benefit to using this flag. Pants attempts to reuse the results from `fmt` when running
243
            `fix` where possible.
244
            """
245
        ),
246
    )
247
    batch_size = BatchSizeOption(uppercase="Fixer", lowercase="fixer")
12✔
248

249

250
class Fix(Goal):
12✔
251
    subsystem_cls = FixSubsystem
12✔
252
    environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
12✔
253

254

255
async def _write_files(workspace: Workspace, batched_results: Iterable[_FixBatchResult]):
12✔
256
    if any(batched_result.did_change for batched_result in batched_results):
2✔
257
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
258
        # than silently having one result override the other. In practice, this should never
259
        # happen due to us grouping each file's tools into a single digest.
260
        merged_digest = await merge_digests(
2✔
261
            MergeDigests(
262
                batched_result.results[-1].output.digest for batched_result in batched_results
263
            )
264
        )
265
        workspace.write_digest(merged_digest)
2✔
266

267

268
def _print_results(
12✔
269
    console: Console,
270
    results: Iterable[FixResult],
271
):
272
    if results:
2✔
273
        console.print_stderr("")
2✔
274

275
    # We group all results for the same tool so that we can give one final status in the
276
    # summary. This is only relevant if there were multiple results because of
277
    # `--per-file-caching`.
278
    tool_to_results = defaultdict(set)
2✔
279
    for result in results:
2✔
280
        tool_to_results[result.tool_name].add(result)
2✔
281

282
    for tool, results in sorted(tool_to_results.items()):
2✔
283
        if any(result.did_change for result in results):
2✔
284
            sigil = console.sigil_succeeded_with_edits()
2✔
285
            status = "made changes"
2✔
286
        else:
287
            sigil = console.sigil_succeeded()
1✔
288
            status = "made no changes"
1✔
289
        console.print_stderr(f"{sigil} {tool} {status}.")
2✔
290

291

292
_CoreRequestType = TypeVar("_CoreRequestType", bound=AbstractFixRequest)
12✔
293
_TargetPartitioner = TypeVar("_TargetPartitioner", bound=FixTargetsRequest.PartitionRequest)
12✔
294
_FilePartitioner = TypeVar("_FilePartitioner", bound=FixFilesRequest.PartitionRequest)
12✔
295
_GoalT = TypeVar("_GoalT", bound=Goal)
12✔
296

297

298
class _BatchableMultiToolGoalSubsystem(_MultiToolGoalSubsystem, Protocol):
12✔
299
    batch_size: BatchSizeOption
12✔
300

301

302
@rule(polymorphic=True)
12✔
303
async def fix_batch(batch: AbstractFixRequest.Batch) -> FixResult:
12✔
304
    raise NotImplementedError()
×
305

306

307
@rule
12✔
308
async def fix_batch_sequential(
12✔
309
    request: _FixBatchRequest,
310
) -> _FixBatchResult:
311
    current_snapshot = await digest_to_snapshot(
2✔
312
        **implicitly({PathGlobs(request[0].files): PathGlobs})
313
    )
314

315
    results = []
2✔
316
    for request_type, tool_name, files, key in request:
2✔
317
        batch = request_type(tool_name, files, key, current_snapshot)
2✔
318
        result = await fix_batch(**implicitly({batch: AbstractFixRequest.Batch}))
2✔
319
        results.append(result)
2✔
320

321
        assert set(result.output.files) == set(batch.files), (
2✔
322
            f"Expected {result.output.files} to match {batch.files}"
323
        )
324
        current_snapshot = result.output
2✔
325
    return _FixBatchResult(tuple(results))
2✔
326

327

328
async def _do_fix(
12✔
329
    core_request_types: Iterable[type[_CoreRequestType]],
330
    target_partitioners: Iterable[type[_TargetPartitioner]],
331
    file_partitioners: Iterable[type[_FilePartitioner]],
332
    goal_cls: type[_GoalT],
333
    subsystem: _BatchableMultiToolGoalSubsystem,
334
    specs: Specs,
335
    workspace: Workspace,
336
    console: Console,
337
    make_targets_partition_request_get: Callable[
338
        [_TargetPartitioner], Coroutine[Any, Any, Partitions]
339
    ],
340
    make_files_partition_request_get: Callable[[_FilePartitioner], Coroutine[Any, Any, Partitions]],
341
) -> _GoalT:
342
    partitions_by_request_type = await get_partitions_by_request_type(
2✔
343
        core_request_types,
344
        target_partitioners,
345
        file_partitioners,
346
        subsystem,
347
        specs,
348
        make_targets_partition_request_get,
349
        make_files_partition_request_get,
350
    )
351

352
    if not partitions_by_request_type:
2✔
353
        return goal_cls(exit_code=0)
1✔
354

355
    def batch_by_size(files: Iterable[str]) -> Iterator[tuple[str, ...]]:
2✔
356
        batches = partition_sequentially(
2✔
357
            files,
358
            key=lambda x: str(x),
359
            size_target=subsystem.batch_size,  # type: ignore[arg-type]
360
            size_max=4 * subsystem.batch_size,  # type: ignore[operator]
361
        )
362
        for batch in batches:
2✔
363
            yield tuple(batch)
2✔
364

365
    def _make_disjoint_batch_requests() -> Iterable[_FixBatchRequest]:
2✔
366
        partition_infos: Iterable[tuple[type[AbstractFixRequest], Any]]
367
        files: Sequence[str]
368

369
        partition_infos_by_files = defaultdict(list)
2✔
370
        for request_type, partitions_list in partitions_by_request_type.items():
2✔
371
            for partitions in partitions_list:
2✔
372
                for partition in partitions:
2✔
373
                    for file in partition.elements:
2✔
374
                        partition_infos_by_files[file].append((request_type, partition.metadata))
2✔
375

376
        files_by_partition_info = defaultdict(list)
2✔
377
        for file, partition_infos in partition_infos_by_files.items():
2✔
378
            deduped_partition_infos = FrozenOrderedSet(partition_infos)
2✔
379
            files_by_partition_info[deduped_partition_infos].append(file)
2✔
380

381
        for partition_infos, files in files_by_partition_info.items():
2✔
382
            for batch in batch_by_size(files):
2✔
383
                yield _FixBatchRequest(
2✔
384
                    _FixBatchElement(
385
                        request_type.Batch,
386
                        request_type.tool_name,
387
                        batch,
388
                        partition_metadata,
389
                    )
390
                    for request_type, partition_metadata in partition_infos
391
                )
392

393
    all_results = await concurrently(
2✔
394
        fix_batch_sequential(request) for request in _make_disjoint_batch_requests()
395
    )
396

397
    individual_results = list(
2✔
398
        itertools.chain.from_iterable(result.results for result in all_results)
399
    )
400

401
    await _write_files(workspace, all_results)
2✔
402
    _print_results(console, individual_results)
2✔
403

404
    # Since the rules to produce FixResult should use ProcessResult, rather than
405
    # FallibleProcessResult, we assume that there were no failures.
406
    return goal_cls(exit_code=0)
2✔
407

408

409
@rule(polymorphic=True)
12✔
410
async def partition_targets(req: FixTargetsRequest.PartitionRequest) -> Partitions:
12✔
UNCOV
411
    raise NotImplementedError()
×
412

413

414
@rule(polymorphic=True)
12✔
415
async def partition_files(req: FixFilesRequest.PartitionRequest) -> Partitions:
12✔
UNCOV
416
    raise NotImplementedError()
×
417

418

419
@goal_rule
12✔
420
async def fix(
12✔
421
    console: Console,
422
    specs: Specs,
423
    fix_subsystem: FixSubsystem,
424
    workspace: Workspace,
425
    union_membership: UnionMembership,
426
) -> Fix:
427
    return await _do_fix(
2✔
428
        sorted(
429
            (
430
                request_type
431
                for request_type in union_membership.get(AbstractFixRequest)
432
                if not (request_type.is_formatter and fix_subsystem.skip_formatters)
433
            ),
434
            # NB: We sort the core request types so that fixers are first. This is to ensure that, between
435
            # fixers and formatters, re-running isn't necessary due to tool conflicts (re-running may
436
            # still be necessary within formatters). This is because fixers are expected to modify
437
            # code irrespective of formatting, and formatters aren't expected to be modifying the code
438
            # in a way that needs to be fixed.
439
            key=lambda request_type: request_type.is_fixer,
440
            reverse=True,
441
        ),
442
        union_membership.get(FixTargetsRequest.PartitionRequest),
443
        union_membership.get(FixFilesRequest.PartitionRequest),
444
        Fix,
445
        fix_subsystem,  # type: ignore[arg-type]
446
        specs,
447
        workspace,
448
        console,
449
        lambda request_type: partition_targets(
450
            **implicitly({request_type: FixTargetsRequest.PartitionRequest})
451
        ),
452
        lambda request_type: partition_files(
453
            **implicitly({request_type: FixFilesRequest.PartitionRequest})
454
        ),
455
    )
456

457

458
@rule(level=LogLevel.DEBUG)
12✔
459
async def convert_fix_result_to_lint_result(fix_result: FixResult) -> LintResult:
12✔
460
    return LintResult(
1✔
461
        1 if fix_result.did_change else 0,
462
        fix_result.stdout,
463
        fix_result.stderr,
464
        linter_name=fix_result.tool_name,
465
        _render_message=False,  # Don't re-render the message
466
    )
467

468

469
def rules():
12✔
470
    return collect_rules()
7✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc