• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

deepset-ai / haystack / 13116238781

03 Feb 2025 02:54PM UTC coverage: 91.383% (+0.02%) from 91.359%
13116238781

Pull #8794

github

web-flow
Merge a27287d42 into 503d275ad
Pull Request #8794: refactor: HF API Embedders - use `InferenceClient.feature_extraction` instead of `InferenceClient.post`

8898 of 9737 relevant lines covered (91.38%)

0.91 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.5
haystack/components/preprocessors/document_splitter.py
1
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
#
3
# SPDX-License-Identifier: Apache-2.0
4

5
from copy import deepcopy
1✔
6
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple
1✔
7

8
from more_itertools import windowed
1✔
9

10
from haystack import Document, component, logging
1✔
11
from haystack.components.preprocessors.sentence_tokenizer import Language, SentenceSplitter, nltk_imports
1✔
12
from haystack.core.serialization import default_from_dict, default_to_dict
1✔
13
from haystack.utils import deserialize_callable, serialize_callable
1✔
14

15
logger = logging.getLogger(__name__)
1✔
16

17
# mapping of split by character, 'function' and 'sentence' don't split by character
18
_CHARACTER_SPLIT_BY_MAPPING = {"page": "\f", "passage": "\n\n", "period": ".", "word": " ", "line": "\n"}
1✔
19

20

21
@component
1✔
22
class DocumentSplitter:
1✔
23
    """
24
    Splits long documents into smaller chunks.
25

26
    This is a common preprocessing step during indexing. It helps Embedders create meaningful semantic representations
27
    and prevents exceeding language model context limits.
28

29
    The DocumentSplitter is compatible with the following DocumentStores:
30
    - [Astra](https://docs.haystack.deepset.ai/docs/astradocumentstore)
31
    - [Chroma](https://docs.haystack.deepset.ai/docs/chromadocumentstore) limited support, overlapping information is
32
      not stored
33
    - [Elasticsearch](https://docs.haystack.deepset.ai/docs/elasticsearch-document-store)
34
    - [OpenSearch](https://docs.haystack.deepset.ai/docs/opensearch-document-store)
35
    - [Pgvector](https://docs.haystack.deepset.ai/docs/pgvectordocumentstore)
36
    - [Pinecone](https://docs.haystack.deepset.ai/docs/pinecone-document-store) limited support, overlapping
37
       information is not stored
38
    - [Qdrant](https://docs.haystack.deepset.ai/docs/qdrant-document-store)
39
    - [Weaviate](https://docs.haystack.deepset.ai/docs/weaviatedocumentstore)
40

41
    ### Usage example
42

43
    ```python
44
    from haystack import Document
45
    from haystack.components.preprocessors import DocumentSplitter
46

47
    doc = Document(content="Moonlight shimmered softly, wolves howled nearby, night enveloped everything.")
48

49
    splitter = DocumentSplitter(split_by="word", split_length=3, split_overlap=0)
50
    result = splitter.run(documents=[doc])
51
    ```
52
    """
53

54
    def __init__(  # pylint: disable=too-many-positional-arguments
1✔
55
        self,
56
        split_by: Literal["function", "page", "passage", "period", "word", "line", "sentence"] = "word",
57
        split_length: int = 200,
58
        split_overlap: int = 0,
59
        split_threshold: int = 0,
60
        splitting_function: Optional[Callable[[str], List[str]]] = None,
61
        respect_sentence_boundary: bool = False,
62
        language: Language = "en",
63
        use_split_rules: bool = True,
64
        extend_abbreviations: bool = True,
65
    ):
66
        """
67
        Initialize DocumentSplitter.
68

69
        :param split_by: The unit for splitting your documents. Choose from:
70
            - `word` for splitting by spaces (" ")
71
            - `period` for splitting by periods (".")
72
            - `page` for splitting by form feed ("\\f")
73
            - `passage` for splitting by double line breaks ("\\n\\n")
74
            - `line` for splitting each line ("\\n")
75
            - `sentence` for splitting by NLTK sentence tokenizer
76

77
        :param split_length: The maximum number of units in each split.
78
        :param split_overlap: The number of overlapping units for each split.
79
        :param split_threshold: The minimum number of units per split. If a split has fewer units
80
            than the threshold, it's attached to the previous split.
81
        :param splitting_function: Necessary when `split_by` is set to "function".
82
            This is a function which must accept a single `str` as input and return a `list` of `str` as output,
83
            representing the chunks after splitting.
84
        :param respect_sentence_boundary: Choose whether to respect sentence boundaries when splitting by "word".
85
            If True, uses NLTK to detect sentence boundaries, ensuring splits occur only between sentences.
86
        :param language: Choose the language for the NLTK tokenizer. The default is English ("en").
87
        :param use_split_rules: Choose whether to use additional split rules when splitting by `sentence`.
88
        :param extend_abbreviations: Choose whether to extend NLTK's PunktTokenizer abbreviations with a list
89
            of curated abbreviations, if available. This is currently supported for English ("en") and German ("de").
90
        """
91

92
        self.split_by = split_by
1✔
93
        self.split_length = split_length
1✔
94
        self.split_overlap = split_overlap
1✔
95
        self.split_threshold = split_threshold
1✔
96
        self.splitting_function = splitting_function
1✔
97
        self.respect_sentence_boundary = respect_sentence_boundary
1✔
98
        self.language = language
1✔
99
        self.use_split_rules = use_split_rules
1✔
100
        self.extend_abbreviations = extend_abbreviations
1✔
101

102
        self._init_checks(
1✔
103
            split_by=split_by,
104
            split_length=split_length,
105
            split_overlap=split_overlap,
106
            splitting_function=splitting_function,
107
            respect_sentence_boundary=respect_sentence_boundary,
108
        )
109
        self._use_sentence_splitter = split_by == "sentence" or (respect_sentence_boundary and split_by == "word")
1✔
110
        if self._use_sentence_splitter:
1✔
111
            nltk_imports.check()
1✔
112
            self.sentence_splitter = None
1✔
113

114
    def _init_checks(
1✔
115
        self,
116
        *,
117
        split_by: str,
118
        split_length: int,
119
        split_overlap: int,
120
        splitting_function: Optional[Callable],
121
        respect_sentence_boundary: bool,
122
    ) -> None:
123
        """
124
        Validates initialization parameters for DocumentSplitter.
125

126
        :param split_by: The unit for splitting documents
127
        :param split_length: The maximum number of units in each split
128
        :param split_overlap: The number of overlapping units for each split
129
        :param splitting_function: Custom function for splitting when split_by="function"
130
        :param respect_sentence_boundary: Whether to respect sentence boundaries when splitting
131
        :raises ValueError: If any parameter is invalid
132
        """
133
        valid_split_by = ["function", "page", "passage", "period", "word", "line", "sentence"]
1✔
134
        if split_by not in valid_split_by:
1✔
135
            raise ValueError(f"split_by must be one of {', '.join(valid_split_by)}.")
1✔
136

137
        if split_by == "function" and splitting_function is None:
1✔
138
            raise ValueError("When 'split_by' is set to 'function', a valid 'splitting_function' must be provided.")
1✔
139

140
        if split_length <= 0:
1✔
141
            raise ValueError("split_length must be greater than 0.")
1✔
142

143
        if split_overlap < 0:
1✔
144
            raise ValueError("split_overlap must be greater than or equal to 0.")
1✔
145

146
        if respect_sentence_boundary and split_by != "word":
1✔
147
            logger.warning(
1✔
148
                "The 'respect_sentence_boundary' option is only supported for `split_by='word'`. "
149
                "The option `respect_sentence_boundary` will be set to `False`."
150
            )
151
            self.respect_sentence_boundary = False
1✔
152

153
    def warm_up(self):
1✔
154
        """
155
        Warm up the DocumentSplitter by loading the sentence tokenizer.
156
        """
157
        if self._use_sentence_splitter and self.sentence_splitter is None:
1✔
158
            self.sentence_splitter = SentenceSplitter(
1✔
159
                language=self.language,
160
                use_split_rules=self.use_split_rules,
161
                extend_abbreviations=self.extend_abbreviations,
162
                keep_white_spaces=True,
163
            )
164

165
    @component.output_types(documents=List[Document])
1✔
166
    def run(self, documents: List[Document]):
1✔
167
        """
168
        Split documents into smaller parts.
169

170
        Splits documents by the unit expressed in `split_by`, with a length of `split_length`
171
        and an overlap of `split_overlap`.
172

173
        :param documents: The documents to split.
174
        :returns: A dictionary with the following key:
175
            - `documents`: List of documents with the split texts. Each document includes:
176
                - A metadata field `source_id` to track the original document.
177
                - A metadata field `page_number` to track the original page number.
178
                - All other metadata copied from the original document.
179

180
        :raises TypeError: if the input is not a list of Documents.
181
        :raises ValueError: if the content of a document is None.
182
        """
183
        if self._use_sentence_splitter and self.sentence_splitter is None:
1✔
184
            raise RuntimeError(
×
185
                "The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'."
186
            )
187

188
        if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
1✔
189
            raise TypeError("DocumentSplitter expects a List of Documents as input.")
1✔
190

191
        split_docs: List[Document] = []
1✔
192
        for doc in documents:
1✔
193
            if doc.content is None:
1✔
194
                raise ValueError(
1✔
195
                    f"DocumentSplitter only works with text documents but content for document ID {doc.id} is None."
196
                )
197
            if doc.content == "":
1✔
198
                logger.warning("Document ID {doc_id} has an empty content. Skipping this document.", doc_id=doc.id)
1✔
199
                continue
1✔
200

201
            split_docs += self._split_document(doc)
1✔
202
        return {"documents": split_docs}
1✔
203

204
    def _split_document(self, doc: Document) -> List[Document]:
1✔
205
        if self.split_by == "sentence" or self.respect_sentence_boundary:
1✔
206
            return self._split_by_nltk_sentence(doc)
1✔
207

208
        if self.split_by == "function" and self.splitting_function is not None:
1✔
209
            return self._split_by_function(doc)
1✔
210

211
        return self._split_by_character(doc)
1✔
212

213
    def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:
1✔
214
        split_docs = []
1✔
215

216
        result = self.sentence_splitter.split_sentences(doc.content)  # type: ignore # None check is done in run()
1✔
217
        units = [sentence["sentence"] for sentence in result]
1✔
218

219
        if self.respect_sentence_boundary:
1✔
220
            text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(
1✔
221
                sentences=units, split_length=self.split_length, split_overlap=self.split_overlap
222
            )
223
        else:
224
            text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
1✔
225
                elements=units,
226
                split_length=self.split_length,
227
                split_overlap=self.split_overlap,
228
                split_threshold=self.split_threshold,
229
            )
230
        metadata = deepcopy(doc.meta)
1✔
231
        metadata["source_id"] = doc.id
1✔
232
        split_docs += self._create_docs_from_splits(
1✔
233
            text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
234
        )
235

236
        return split_docs
1✔
237

238
    def _split_by_character(self, doc) -> List[Document]:
1✔
239
        split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]
1✔
240
        units = doc.content.split(split_at)
1✔
241
        # Add the delimiter back to all units except the last one
242
        for i in range(len(units) - 1):
1✔
243
            units[i] += split_at
1✔
244
        text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
1✔
245
            units, self.split_length, self.split_overlap, self.split_threshold
246
        )
247
        metadata = deepcopy(doc.meta)
1✔
248
        metadata["source_id"] = doc.id
1✔
249
        return self._create_docs_from_splits(
1✔
250
            text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
251
        )
252

253
    def _split_by_function(self, doc) -> List[Document]:
1✔
254
        # the check for None is done already in the run method
255
        splits = self.splitting_function(doc.content)  # type: ignore
1✔
256
        docs: List[Document] = []
1✔
257
        for s in splits:
1✔
258
            meta = deepcopy(doc.meta)
1✔
259
            meta["source_id"] = doc.id
1✔
260
            docs.append(Document(content=s, meta=meta))
1✔
261
        return docs
1✔
262

263
    def _concatenate_units(
1✔
264
        self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int
265
    ) -> Tuple[List[str], List[int], List[int]]:
266
        """
267
        Concatenates the elements into parts of split_length units.
268

269
        Keeps track of the original page number that each element belongs. If the length of the current units is less
270
        than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current
271
        units with the last split, preventing the creation of excessively small splits.
272
        """
273

274
        text_splits: List[str] = []
1✔
275
        splits_pages: List[int] = []
1✔
276
        splits_start_idxs: List[int] = []
1✔
277
        cur_start_idx = 0
1✔
278
        cur_page = 1
1✔
279
        segments = windowed(elements, n=split_length, step=split_length - split_overlap)
1✔
280

281
        for seg in segments:
1✔
282
            current_units = [unit for unit in seg if unit is not None]
1✔
283
            txt = "".join(current_units)
1✔
284

285
            # check if length of current units is below split_threshold
286
            if len(current_units) < split_threshold and len(text_splits) > 0:
1✔
287
                # concatenate the last split with the current one
288
                text_splits[-1] += txt
1✔
289

290
            # NOTE: This line skips documents that have content=""
291
            elif len(txt) > 0:
1✔
292
                text_splits.append(txt)
1✔
293
                splits_pages.append(cur_page)
1✔
294
                splits_start_idxs.append(cur_start_idx)
1✔
295

296
            processed_units = current_units[: split_length - split_overlap]
1✔
297
            cur_start_idx += len("".join(processed_units))
1✔
298

299
            if self.split_by == "page":
1✔
300
                num_page_breaks = len(processed_units)
1✔
301
            else:
302
                num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units)
1✔
303

304
            cur_page += num_page_breaks
1✔
305

306
        return text_splits, splits_pages, splits_start_idxs
1✔
307

308
    def _create_docs_from_splits(
1✔
309
        self, text_splits: List[str], splits_pages: List[int], splits_start_idxs: List[int], meta: Dict[str, Any]
310
    ) -> List[Document]:
311
        """
312
        Creates Document objects from splits enriching them with page number and the metadata of the original document.
313
        """
314
        documents: List[Document] = []
1✔
315

316
        for i, (txt, split_idx) in enumerate(zip(text_splits, splits_start_idxs)):
1✔
317
            copied_meta = deepcopy(meta)
1✔
318
            copied_meta["page_number"] = splits_pages[i]
1✔
319
            copied_meta["split_id"] = i
1✔
320
            copied_meta["split_idx_start"] = split_idx
1✔
321
            doc = Document(content=txt, meta=copied_meta)
1✔
322
            documents.append(doc)
1✔
323

324
            if self.split_overlap <= 0:
1✔
325
                continue
1✔
326

327
            doc.meta["_split_overlap"] = []
1✔
328

329
            if i == 0:
1✔
330
                continue
1✔
331

332
            doc_start_idx = splits_start_idxs[i]
1✔
333
            previous_doc = documents[i - 1]
1✔
334
            previous_doc_start_idx = splits_start_idxs[i - 1]
1✔
335
            self._add_split_overlap_information(doc, doc_start_idx, previous_doc, previous_doc_start_idx)
1✔
336

337
        return documents
1✔
338

339
    @staticmethod
1✔
340
    def _add_split_overlap_information(
1✔
341
        current_doc: Document, current_doc_start_idx: int, previous_doc: Document, previous_doc_start_idx: int
342
    ):
343
        """
344
        Adds split overlap information to the current and previous Document's meta.
345

346
        :param current_doc: The Document that is being split.
347
        :param current_doc_start_idx: The starting index of the current Document.
348
        :param previous_doc: The Document that was split before the current Document.
349
        :param previous_doc_start_idx: The starting index of the previous Document.
350
        """
351
        overlapping_range = (current_doc_start_idx - previous_doc_start_idx, len(previous_doc.content))  # type: ignore
1✔
352

353
        if overlapping_range[0] < overlapping_range[1]:
1✔
354
            overlapping_str = previous_doc.content[overlapping_range[0] : overlapping_range[1]]  # type: ignore
1✔
355

356
            if current_doc.content.startswith(overlapping_str):  # type: ignore
1✔
357
                # add split overlap information to this Document regarding the previous Document
358
                current_doc.meta["_split_overlap"].append({"doc_id": previous_doc.id, "range": overlapping_range})
1✔
359

360
                # add split overlap information to previous Document regarding this Document
361
                overlapping_range = (0, overlapping_range[1] - overlapping_range[0])
1✔
362
                previous_doc.meta["_split_overlap"].append({"doc_id": current_doc.id, "range": overlapping_range})
1✔
363

364
    def to_dict(self) -> Dict[str, Any]:
1✔
365
        """
366
        Serializes the component to a dictionary.
367
        """
368
        serialized = default_to_dict(
1✔
369
            self,
370
            split_by=self.split_by,
371
            split_length=self.split_length,
372
            split_overlap=self.split_overlap,
373
            split_threshold=self.split_threshold,
374
            respect_sentence_boundary=self.respect_sentence_boundary,
375
            language=self.language,
376
            use_split_rules=self.use_split_rules,
377
            extend_abbreviations=self.extend_abbreviations,
378
        )
379
        if self.splitting_function:
1✔
380
            serialized["init_parameters"]["splitting_function"] = serialize_callable(self.splitting_function)
1✔
381
        return serialized
1✔
382

383
    @classmethod
1✔
384
    def from_dict(cls, data: Dict[str, Any]) -> "DocumentSplitter":
1✔
385
        """
386
        Deserializes the component from a dictionary.
387
        """
388
        init_params = data.get("init_parameters", {})
1✔
389

390
        splitting_function = init_params.get("splitting_function", None)
1✔
391
        if splitting_function:
1✔
392
            init_params["splitting_function"] = deserialize_callable(splitting_function)
1✔
393

394
        return default_from_dict(cls, data)
1✔
395

396
    @staticmethod
1✔
397
    def _concatenate_sentences_based_on_word_amount(
1✔
398
        sentences: List[str], split_length: int, split_overlap: int
399
    ) -> Tuple[List[str], List[int], List[int]]:
400
        """
401
        Groups the sentences into chunks of `split_length` words while respecting sentence boundaries.
402

403
        This function is only used when splitting by `word` and `respect_sentence_boundary` is set to `True`, i.e.:
404
        with NLTK sentence tokenizer.
405

406
        :param sentences: The list of sentences to split.
407
        :param split_length: The maximum number of words in each split.
408
        :param split_overlap: The number of overlapping words in each split.
409
        :returns: A tuple containing the concatenated sentences, the start page numbers, and the start indices.
410
        """
411
        # chunk information
412
        chunk_word_count = 0
1✔
413
        chunk_starting_page_number = 1
1✔
414
        chunk_start_idx = 0
1✔
415
        current_chunk: List[str] = []
1✔
416
        # output lists
417
        split_start_page_numbers = []
1✔
418
        list_of_splits: List[List[str]] = []
1✔
419
        split_start_indices = []
1✔
420

421
        for sentence_idx, sentence in enumerate(sentences):
1✔
422
            current_chunk.append(sentence)
1✔
423
            chunk_word_count += len(sentence.split())
1✔
424
            next_sentence_word_count = (
1✔
425
                len(sentences[sentence_idx + 1].split()) if sentence_idx < len(sentences) - 1 else 0
426
            )
427

428
            # Number of words in the current chunk plus the next sentence is larger than the split_length,
429
            # or we reached the last sentence
430
            if (chunk_word_count + next_sentence_word_count) > split_length or sentence_idx == len(sentences) - 1:
1✔
431
                #  Save current chunk and start a new one
432
                list_of_splits.append(current_chunk)
1✔
433
                split_start_page_numbers.append(chunk_starting_page_number)
1✔
434
                split_start_indices.append(chunk_start_idx)
1✔
435

436
                # Get the number of sentences that overlap with the next chunk
437
                num_sentences_to_keep = DocumentSplitter._number_of_sentences_to_keep(
1✔
438
                    sentences=current_chunk, split_length=split_length, split_overlap=split_overlap
439
                )
440
                # Set up information for the new chunk
441
                if num_sentences_to_keep > 0:
1✔
442
                    # Processed sentences are the ones that are not overlapping with the next chunk
443
                    processed_sentences = current_chunk[:-num_sentences_to_keep]
1✔
444
                    chunk_starting_page_number += sum(sent.count("\f") for sent in processed_sentences)
1✔
445
                    chunk_start_idx += len("".join(processed_sentences))
1✔
446
                    # Next chunk starts with the sentences that were overlapping with the previous chunk
447
                    current_chunk = current_chunk[-num_sentences_to_keep:]
1✔
448
                    chunk_word_count = sum(len(s.split()) for s in current_chunk)
1✔
449
                else:
450
                    # Here processed_sentences is the same as current_chunk since there is no overlap
451
                    chunk_starting_page_number += sum(sent.count("\f") for sent in current_chunk)
1✔
452
                    chunk_start_idx += len("".join(current_chunk))
1✔
453
                    current_chunk = []
1✔
454
                    chunk_word_count = 0
1✔
455

456
        # Concatenate the sentences together within each split
457
        text_splits = []
1✔
458
        for split in list_of_splits:
1✔
459
            text = "".join(split)
1✔
460
            if len(text) > 0:
1✔
461
                text_splits.append(text)
1✔
462

463
        return text_splits, split_start_page_numbers, split_start_indices
1✔
464

465
    @staticmethod
1✔
466
    def _number_of_sentences_to_keep(sentences: List[str], split_length: int, split_overlap: int) -> int:
1✔
467
        """
468
        Returns the number of sentences to keep in the next chunk based on the `split_overlap` and `split_length`.
469

470
        :param sentences: The list of sentences to split.
471
        :param split_length: The maximum number of words in each split.
472
        :param split_overlap: The number of overlapping words in each split.
473
        :returns: The number of sentences to keep in the next chunk.
474
        """
475
        # If the split_overlap is 0, we don't need to keep any sentences
476
        if split_overlap == 0:
1✔
477
            return 0
1✔
478

479
        num_sentences_to_keep = 0
1✔
480
        num_words = 0
1✔
481
        # Next overlapping Document should not start exactly the same as the previous one, so we skip the first sentence
482
        for sent in reversed(sentences[1:]):
1✔
483
            num_words += len(sent.split())
1✔
484
            # If the number of words is larger than the split_length then don't add any more sentences
485
            if num_words > split_length:
1✔
486
                break
1✔
487
            num_sentences_to_keep += 1
1✔
488
            if num_words > split_overlap:
1✔
489
                break
1✔
490
        return num_sentences_to_keep
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc