• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bramp / build-along / 20010582393

07 Dec 2025 09:22PM UTC coverage: 90.299% (-0.02%) from 90.316%
20010582393

push

github

bramp
refactor(classifiers): use BBox helpers filter_contained and filter_overlapping

5 of 8 new or added lines in 2 files covered. (62.5%)

144 existing lines in 10 files now uncovered.

10779 of 11937 relevant lines covered (90.3%)

0.9 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.09
/src/build_a_long/pdf_extract/classifier/classifier.py
1
"""
2
Rule-based classifier for labeling page elements.
3

4
Pipeline order and dependencies
5
--------------------------------
6
The classification pipeline operates in two main phases:
7

8
1. **Bottom-up Scoring**: All classifiers run independently to identify potential
9
   candidates (e.g. page numbers, part counts, step numbers) and score them based
10
   on heuristics. No construction of final elements happens here.
11

12
2. **Top-down Construction**: The root `PageClassifier` is invoked to construct
13
   the final `Page` object. It recursively requests the construction of its
14
   dependencies (e.g. "Give me the best PageNumber"), which in turn construct
15
   their own dependencies. This ensures a consistent and validated object tree.
16

17
"""
18

19
from __future__ import annotations
1✔
20

21
import logging
1✔
22

23
from build_a_long.pdf_extract.classifier.bags import (
1✔
24
    BagNumberClassifier,
25
    NewBagClassifier,
26
)
27
from build_a_long.pdf_extract.classifier.batch_classification_result import (
1✔
28
    BatchClassificationResult,
29
)
30
from build_a_long.pdf_extract.classifier.block_filter import (
1✔
31
    filter_background_blocks,
32
    filter_duplicate_blocks,
33
    filter_overlapping_text_blocks,
34
)
35
from build_a_long.pdf_extract.classifier.classification_result import (
1✔
36
    ClassificationResult,
37
)
38
from build_a_long.pdf_extract.classifier.classifier_config import ClassifierConfig
1✔
39
from build_a_long.pdf_extract.classifier.pages import (
1✔
40
    PageHintCollection,
41
)
42
from build_a_long.pdf_extract.classifier.pages.divider_classifier import (
1✔
43
    DividerClassifier,
44
)
45
from build_a_long.pdf_extract.classifier.pages.page_classifier import PageClassifier
1✔
46
from build_a_long.pdf_extract.classifier.pages.page_number_classifier import (
1✔
47
    PageNumberClassifier,
48
)
49
from build_a_long.pdf_extract.classifier.pages.progress_bar_classifier import (
1✔
50
    ProgressBarClassifier,
51
)
52
from build_a_long.pdf_extract.classifier.parts import (
1✔
53
    PartCountClassifier,
54
    PartNumberClassifier,
55
    PartsClassifier,
56
    PartsImageClassifier,
57
    PartsListClassifier,
58
    PieceLengthClassifier,
59
    ShineClassifier,
60
)
61
from build_a_long.pdf_extract.classifier.removal_reason import RemovalReason
1✔
62
from build_a_long.pdf_extract.classifier.steps import (
1✔
63
    ArrowClassifier,
64
    DiagramClassifier,
65
    RotationSymbolClassifier,
66
    StepClassifier,
67
    StepCountClassifier,
68
    StepNumberClassifier,
69
    SubAssemblyClassifier,
70
)
71
from build_a_long.pdf_extract.classifier.text import FontSizeHints, TextHistogram
1✔
72
from build_a_long.pdf_extract.classifier.topological_sort import topological_sort
1✔
73
from build_a_long.pdf_extract.extractor import PageData
1✔
74
from build_a_long.pdf_extract.extractor.bbox import filter_contained
1✔
75
from build_a_long.pdf_extract.extractor.lego_page_elements import (
1✔
76
    PageNumber,
77
    PartCount,
78
    PartsList,
79
    StepNumber,
80
)
81
from build_a_long.pdf_extract.extractor.page_blocks import Blocks
1✔
82

83
logger = logging.getLogger(__name__)
1✔
84

85
# Pages with more blocks than this threshold will be skipped during classification.
86
# This avoids O(n²) algorithms (like duplicate detection) that become prohibitively
87
# slow on pages with thousands of vector drawings. Such pages are typically info
88
# pages where each character is a separate vector graphic.
89
# TODO: Add spatial indexing to handle high-block pages efficiently.
90
MAX_BLOCKS_PER_PAGE = 1000
1✔
91

92

93
# TODO require config, so we don't accidentally use default empty config
94
def classify_elements(
1✔
95
    page: PageData, config: ClassifierConfig | None = None
96
) -> ClassificationResult:
97
    """Classify and label elements on a single page using rule-based heuristics.
98

99
    Args:
100
        page: A single PageData object to classify.
101
        config: Optional classifier configuration with font/page hints.
102
            If None, uses default empty configuration (no hints).
103
            For better classification accuracy, pass a config with
104
            FontSizeHints computed from multiple pages of the same PDF.
105

106
    Returns:
107
        A ClassificationResult object containing the classification results.
108
    """
109
    if config is None:
1✔
110
        config = ClassifierConfig()
1✔
111
    classifier = Classifier(config)
1✔
112

113
    return classifier.classify(page)
1✔
114

115

116
def classify_pages(
1✔
117
    pages: list[PageData], pages_for_hints: list[PageData] | None = None
118
) -> BatchClassificationResult:
119
    """Classify and label elements across multiple pages using rule-based heuristics.
120

121
    This function performs a three-phase process:
122
    1. Filtering phase: Mark duplicate/similar blocks as removed on each page
123
    2. Analysis phase: Build font size hints from text properties (excluding
124
       removed blocks)
125
    3. Classification phase: Use hints to guide element classification
126

127
    Args:
128
        pages: A list of PageData objects to classify.
129
        pages_for_hints: Optional list of pages to use for generating font/page hints.
130
            If None, uses `pages`. This allows generating hints from all pages
131
            while only classifying a subset (e.g., when using --pages filter).
132

133
    Returns:
134
        BatchClassificationResult containing per-page results and global histogram
135
    """
136

137
    # TODO There is a bunch of duplication in here between hints and non-hints. Refactor
138

139
    # Use all pages for hint generation if provided, otherwise use selected pages
140
    hint_pages = pages_for_hints if pages_for_hints is not None else pages
1✔
141

142
    # Phase 1: Filter duplicate blocks on each page and track removals
143
    # Skip pages with too many blocks to avoid O(n²) performance issues
144
    removed_blocks_per_page: list[dict[Blocks, RemovalReason]] = []
1✔
145
    skipped_pages: set[int] = set()  # Track page numbers that are skipped
1✔
146

147
    for page_data in pages:
1✔
148
        # Skip pages with too many blocks - these are likely info/inventory pages
149
        # with vectorized text that cause O(n²) algorithms to be very slow
150
        if len(page_data.blocks) > MAX_BLOCKS_PER_PAGE:
1✔
151
            logger.debug(
1✔
152
                f"Page {page_data.page_number}: skipping classification "
153
                f"({len(page_data.blocks)} blocks exceeds threshold of "
154
                f"{MAX_BLOCKS_PER_PAGE})"
155
            )
156
            skipped_pages.add(page_data.page_number)
1✔
157
            removed_blocks_per_page.append({})
1✔
158
            continue
1✔
159

160
        kept_blocks = page_data.blocks
1✔
161

162
        # Filter background blocks (full page blocks like background images)
163
        kept_blocks, background_removed = filter_background_blocks(
1✔
164
            kept_blocks, page_data.bbox.width, page_data.bbox.height
165
        )
166

167
        # Filter overlapping text blocks (e.g., "4" and "43" at same origin)
168
        kept_blocks, text_removed = filter_overlapping_text_blocks(kept_blocks)
1✔
169

170
        # Filter duplicate image/drawing blocks based on IOU
171
        kept_blocks, bbox_removed = filter_duplicate_blocks(kept_blocks)
1✔
172

173
        # Combine all removal mappings into a single dict for this page
174
        combined_removed_mapping = {
1✔
175
            **text_removed,
176
            **bbox_removed,
177
            **background_removed,
178
        }
179

180
        logger.debug(
1✔
181
            f"Page {page_data.page_number}: "
182
            f"filtered {len(text_removed)} overlapping text, "
183
            f"{len(bbox_removed)} duplicate bbox blocks, "
184
            f"{len(background_removed)} background blocks"
185
        )
186

187
        removed_blocks_per_page.append(combined_removed_mapping)
1✔
188

189
    # Phase 2: Extract font size hints from hint pages (excluding removed blocks)
190
    # Build pages with non-removed blocks for hint extraction and histogram
191

192
    # Filter duplicates from hint pages (may be different from pages to classify)
193
    hint_pages_without_duplicates = []
1✔
194
    for page_data in hint_pages:
1✔
195
        # Skip high-block pages for hints too (same threshold)
196
        if len(page_data.blocks) > MAX_BLOCKS_PER_PAGE:
1✔
197
            continue
1✔
198

199
        # TODO We are re-filtering duplicates here; optimize by changing the API
200
        # to accept one list of PageData, and seperate by page_numbers.
201
        kept_blocks = page_data.blocks
1✔
202
        kept_blocks, _ = filter_background_blocks(
1✔
203
            kept_blocks, page_data.bbox.width, page_data.bbox.height
204
        )
205
        kept_blocks, _ = filter_overlapping_text_blocks(kept_blocks)
1✔
206
        kept_blocks, _ = filter_duplicate_blocks(kept_blocks)
1✔
207

208
        hint_pages_without_duplicates.append(
1✔
209
            PageData(
210
                page_number=page_data.page_number,
211
                bbox=page_data.bbox,
212
                blocks=kept_blocks,
213
            )
214
        )
215

216
    # Build pages without duplicates for classification
217
    pages_without_duplicates = []
1✔
218
    for page_data, removed_mapping in zip(pages, removed_blocks_per_page, strict=True):
1✔
219
        # We need to filter blocks that were removed by ANY filter
220
        non_removed_blocks = [
1✔
221
            block for block in page_data.blocks if block not in removed_mapping
222
        ]
223
        pages_without_duplicates.append(
1✔
224
            PageData(
225
                page_number=page_data.page_number,
226
                bbox=page_data.bbox,
227
                blocks=non_removed_blocks,
228
            )
229
        )
230

231
    # Generate hints from hint pages, histogram from pages to classify
232
    font_size_hints = FontSizeHints.from_pages(hint_pages_without_duplicates)
1✔
233
    page_hints = PageHintCollection.from_pages(hint_pages_without_duplicates)
1✔
234
    histogram = TextHistogram.from_pages(pages_without_duplicates)
1✔
235

236
    # Phase 3: Classify using the hints (on pages without duplicates)
237
    config = ClassifierConfig(font_size_hints=font_size_hints, page_hints=page_hints)
1✔
238
    classifier = Classifier(config)
1✔
239

240
    results = []
1✔
241
    for page_data, page_without_duplicates, removed_mapping in zip(
1✔
242
        pages, pages_without_duplicates, removed_blocks_per_page, strict=True
243
    ):
244
        # Handle skipped pages
245
        if page_data.page_number in skipped_pages:
1✔
246
            result = ClassificationResult(
1✔
247
                page_data=page_data,
248
                skipped_reason=(
249
                    f"Page has {len(page_data.blocks)} blocks, which exceeds "
250
                    f"the threshold of {MAX_BLOCKS_PER_PAGE}. This is likely an "
251
                    f"info/inventory page with vectorized text."
252
                ),
253
            )
254
            results.append(result)
1✔
255
            continue
1✔
256

257
        # Classify using only non-removed blocks
258
        result = classifier.classify(page_without_duplicates)
1✔
259

260
        # Update result to use original page_data (with all blocks)
261
        result.page_data = page_data
1✔
262

263
        # Mark removed blocks
264
        for removed_block, removal_reason in removed_mapping.items():
1✔
265
            result.mark_removed(removed_block, removal_reason)
1✔
266

267
        results.append(result)
1✔
268

269
    return BatchClassificationResult(results=results, histogram=histogram)
1✔
270

271

272
type Classifiers = (
1✔
273
    PageNumberClassifier
274
    | ProgressBarClassifier
275
    | DividerClassifier
276
    | BagNumberClassifier
277
    | PartCountClassifier
278
    | PartNumberClassifier
279
    | StepNumberClassifier
280
    | StepCountClassifier
281
    | PieceLengthClassifier
282
    | PartsClassifier
283
    | PartsListClassifier
284
    | PartsImageClassifier
285
    | ShineClassifier
286
    | NewBagClassifier
287
    | DiagramClassifier
288
    | ArrowClassifier
289
    | SubAssemblyClassifier
290
    | StepClassifier
291
    | PageClassifier
292
)
293

294

295
class Classifier:
1✔
296
    """
297
    Performs a single run of classification based on rules, configuration, and hints.
298
    This class should be stateless.
299
    """
300

301
    def __init__(self, config: ClassifierConfig):
1✔
302
        self.config = config
1✔
303
        # Sort classifiers topologically based on their dependencies
304
        self.classifiers = topological_sort(
1✔
305
            [
306
                PageNumberClassifier(config=config),
307
                ProgressBarClassifier(config=config),
308
                DividerClassifier(config=config),
309
                BagNumberClassifier(config=config),
310
                PartCountClassifier(config=config),
311
                PartNumberClassifier(config=config),
312
                StepNumberClassifier(config=config),
313
                StepCountClassifier(config=config),
314
                PieceLengthClassifier(config=config),
315
                PartsClassifier(config=config),
316
                PartsListClassifier(config=config),
317
                DiagramClassifier(config=config),
318
                RotationSymbolClassifier(config=config),
319
                ArrowClassifier(config=config),
320
                PartsImageClassifier(config=config),
321
                ShineClassifier(config=config),
322
                NewBagClassifier(config=config),
323
                SubAssemblyClassifier(config=config),
324
                StepClassifier(config=config),
325
                PageClassifier(config=config),
326
            ]
327
        )
328

329
    def classify(self, page_data: PageData) -> ClassificationResult:
1✔
330
        """
331
        Runs the classification logic and returns a result.
332
        It does NOT modify page_data directly.
333

334
        The classification process runs in three phases:
335
        1. Score all classifiers (bottom-up) - auto-registers classifiers
336
        2. Construct final elements (top-down starting from Page)
337
        """
338
        result = ClassificationResult(page_data=page_data)
1✔
339

340
        logger.debug(f"Starting classification for page {page_data.page_number}")
1✔
341

342
        # 1. Score all classifiers (Bottom-Up)
343
        # Note: score() automatically registers each classifier for its output labels
344
        for classifier in self.classifiers:
1✔
345
            classifier.score(result)
1✔
346

347
        # 2. Construct (Top-Down)
348
        # Find the PageClassifier to start the construction process
349
        page_classifier = next(
1✔
350
            c for c in self.classifiers if isinstance(c, PageClassifier)
351
        )
352
        page_classifier.build_all(result)
1✔
353

354
        # TODO Do we actualy ever add warnings?
355
        warnings = self._log_post_classification_warnings(page_data, result)
1✔
356
        for warning in warnings:
1✔
357
            result.add_warning(warning)
1✔
358

359
        return result
1✔
360

361
    def _log_post_classification_warnings(
1✔
362
        self, page_data: PageData, result: ClassificationResult
363
    ) -> list[str]:
364
        warnings = []
1✔
365

366
        # Check if there's a page number
367
        page_numbers = result.get_winners_by_score("page_number", PageNumber)
1✔
368
        if not page_numbers:
1✔
369
            warnings.append(f"Page {page_data.page_number}: missing page number")
1✔
370

371
        # Get elements by label
372
        parts_lists = result.get_winners_by_score("parts_list", PartsList)
1✔
373
        part_counts = result.get_winners_by_score("part_count", PartCount)
1✔
374

375
        for pl in parts_lists:
1✔
376
            inside_counts = filter_contained(part_counts, pl.bbox)
1✔
377
            if not inside_counts:
1✔
UNCOV
378
                warnings.append(
×
379
                    f"Page {page_data.page_number}: parts list at {pl.bbox} "
380
                    f"contains no part counts"
381
                )
382

383
        steps = result.get_winners_by_score("step_number", StepNumber)
1✔
384
        ABOVE_EPS = 2.0
1✔
385
        for step in steps:
1✔
386
            sb = step.bbox
1✔
387
            above = [pl for pl in parts_lists if pl.bbox.y1 <= sb.y0 + ABOVE_EPS]
1✔
388
            if not above:
1✔
389
                warnings.append(
1✔
390
                    f"Page {page_data.page_number}: step number '{step.value}' "
391
                    f"at {sb} has no parts list above it"
392
                )
393
        return warnings
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc