• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

deepset-ai / haystack / 16493136922

24 Jul 2025 09:29AM UTC coverage: 90.796% (-0.02%) from 90.813%
16493136922

Pull #9650

github

web-flow
Merge b8da7dd3b into d059cf2c2
Pull Request #9650: feat: Add support for the union operator `|` added in python 3.10

12864 of 14168 relevant lines covered (90.8%)

0.91 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.51
haystack/components/preprocessors/document_splitter.py
1
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
#
3
# SPDX-License-Identifier: Apache-2.0
4

5
from copy import deepcopy
1✔
6
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple
1✔
7

8
from more_itertools import windowed
1✔
9

10
from haystack import Document, component, logging
1✔
11
from haystack.components.preprocessors.sentence_tokenizer import Language, SentenceSplitter, nltk_imports
1✔
12
from haystack.core.serialization import default_from_dict, default_to_dict
1✔
13
from haystack.utils import deserialize_callable, serialize_callable
1✔
14

15
logger = logging.getLogger(__name__)
1✔
16

17
# mapping of split by character, 'function' and 'sentence' don't split by character
18
_CHARACTER_SPLIT_BY_MAPPING = {"page": "\f", "passage": "\n\n", "period": ".", "word": " ", "line": "\n"}
1✔
19

20

21
@component
1✔
22
class DocumentSplitter:
1✔
23
    """
24
    Splits long documents into smaller chunks.
25

26
    This is a common preprocessing step during indexing. It helps Embedders create meaningful semantic representations
27
    and prevents exceeding language model context limits.
28

29
    The DocumentSplitter is compatible with the following DocumentStores:
30
    - [Astra](https://docs.haystack.deepset.ai/docs/astradocumentstore)
31
    - [Chroma](https://docs.haystack.deepset.ai/docs/chromadocumentstore) limited support, overlapping information is
32
      not stored
33
    - [Elasticsearch](https://docs.haystack.deepset.ai/docs/elasticsearch-document-store)
34
    - [OpenSearch](https://docs.haystack.deepset.ai/docs/opensearch-document-store)
35
    - [Pgvector](https://docs.haystack.deepset.ai/docs/pgvectordocumentstore)
36
    - [Pinecone](https://docs.haystack.deepset.ai/docs/pinecone-document-store) limited support, overlapping
37
       information is not stored
38
    - [Qdrant](https://docs.haystack.deepset.ai/docs/qdrant-document-store)
39
    - [Weaviate](https://docs.haystack.deepset.ai/docs/weaviatedocumentstore)
40

41
    ### Usage example
42

43
    ```python
44
    from haystack import Document
45
    from haystack.components.preprocessors import DocumentSplitter
46

47
    doc = Document(content="Moonlight shimmered softly, wolves howled nearby, night enveloped everything.")
48

49
    splitter = DocumentSplitter(split_by="word", split_length=3, split_overlap=0)
50
    result = splitter.run(documents=[doc])
51
    ```
52
    """
53

54
    def __init__(  # pylint: disable=too-many-positional-arguments
1✔
55
        self,
56
        split_by: Literal["function", "page", "passage", "period", "word", "line", "sentence"] = "word",
57
        split_length: int = 200,
58
        split_overlap: int = 0,
59
        split_threshold: int = 0,
60
        splitting_function: Optional[Callable[[str], List[str]]] = None,
61
        respect_sentence_boundary: bool = False,
62
        language: Language = "en",
63
        use_split_rules: bool = True,
64
        extend_abbreviations: bool = True,
65
        *,
66
        skip_empty_documents: bool = True,
67
    ):
68
        """
69
        Initialize DocumentSplitter.
70

71
        :param split_by: The unit for splitting your documents. Choose from:
72
            - `word` for splitting by spaces (" ")
73
            - `period` for splitting by periods (".")
74
            - `page` for splitting by form feed ("\\f")
75
            - `passage` for splitting by double line breaks ("\\n\\n")
76
            - `line` for splitting each line ("\\n")
77
            - `sentence` for splitting by NLTK sentence tokenizer
78

79
        :param split_length: The maximum number of units in each split.
80
        :param split_overlap: The number of overlapping units for each split.
81
        :param split_threshold: The minimum number of units per split. If a split has fewer units
82
            than the threshold, it's attached to the previous split.
83
        :param splitting_function: Necessary when `split_by` is set to "function".
84
            This is a function which must accept a single `str` as input and return a `list` of `str` as output,
85
            representing the chunks after splitting.
86
        :param respect_sentence_boundary: Choose whether to respect sentence boundaries when splitting by "word".
87
            If True, uses NLTK to detect sentence boundaries, ensuring splits occur only between sentences.
88
        :param language: Choose the language for the NLTK tokenizer. The default is English ("en").
89
        :param use_split_rules: Choose whether to use additional split rules when splitting by `sentence`.
90
        :param extend_abbreviations: Choose whether to extend NLTK's PunktTokenizer abbreviations with a list
91
            of curated abbreviations, if available. This is currently supported for English ("en") and German ("de").
92
        :param skip_empty_documents: Choose whether to skip documents with empty content. Default is True.
93
            Set to False when downstream components in the Pipeline (like LLMDocumentContentExtractor) can extract text
94
            from non-textual documents.
95
        """
96

97
        self.split_by = split_by
1✔
98
        self.split_length = split_length
1✔
99
        self.split_overlap = split_overlap
1✔
100
        self.split_threshold = split_threshold
1✔
101
        self.splitting_function = splitting_function
1✔
102
        self.respect_sentence_boundary = respect_sentence_boundary
1✔
103
        self.language = language
1✔
104
        self.use_split_rules = use_split_rules
1✔
105
        self.extend_abbreviations = extend_abbreviations
1✔
106
        self.skip_empty_documents = skip_empty_documents
1✔
107

108
        self._init_checks(
1✔
109
            split_by=split_by,
110
            split_length=split_length,
111
            split_overlap=split_overlap,
112
            splitting_function=splitting_function,
113
            respect_sentence_boundary=respect_sentence_boundary,
114
        )
115
        self._use_sentence_splitter = split_by == "sentence" or (respect_sentence_boundary and split_by == "word")
1✔
116
        if self._use_sentence_splitter:
1✔
117
            nltk_imports.check()
1✔
118
            self.sentence_splitter: Optional[SentenceSplitter] = None
1✔
119

120
    def _init_checks(
1✔
121
        self,
122
        *,
123
        split_by: str,
124
        split_length: int,
125
        split_overlap: int,
126
        splitting_function: Optional[Callable],
127
        respect_sentence_boundary: bool,
128
    ) -> None:
129
        """
130
        Validates initialization parameters for DocumentSplitter.
131

132
        :param split_by: The unit for splitting documents
133
        :param split_length: The maximum number of units in each split
134
        :param split_overlap: The number of overlapping units for each split
135
        :param splitting_function: Custom function for splitting when split_by="function"
136
        :param respect_sentence_boundary: Whether to respect sentence boundaries when splitting
137
        :raises ValueError: If any parameter is invalid
138
        """
139
        valid_split_by = ["function", "page", "passage", "period", "word", "line", "sentence"]
1✔
140
        if split_by not in valid_split_by:
1✔
141
            raise ValueError(f"split_by must be one of {', '.join(valid_split_by)}.")
1✔
142

143
        if split_by == "function" and splitting_function is None:
1✔
144
            raise ValueError("When 'split_by' is set to 'function', a valid 'splitting_function' must be provided.")
1✔
145

146
        if split_length <= 0:
1✔
147
            raise ValueError("split_length must be greater than 0.")
1✔
148

149
        if split_overlap < 0:
1✔
150
            raise ValueError("split_overlap must be greater than or equal to 0.")
1✔
151

152
        if respect_sentence_boundary and split_by != "word":
1✔
153
            logger.warning(
1✔
154
                "The 'respect_sentence_boundary' option is only supported for `split_by='word'`. "
155
                "The option `respect_sentence_boundary` will be set to `False`."
156
            )
157
            self.respect_sentence_boundary = False
1✔
158

159
    def warm_up(self):
1✔
160
        """
161
        Warm up the DocumentSplitter by loading the sentence tokenizer.
162
        """
163
        if self._use_sentence_splitter and self.sentence_splitter is None:
1✔
164
            self.sentence_splitter = SentenceSplitter(
1✔
165
                language=self.language,
166
                use_split_rules=self.use_split_rules,
167
                extend_abbreviations=self.extend_abbreviations,
168
                keep_white_spaces=True,
169
            )
170

171
    @component.output_types(documents=List[Document])
1✔
172
    def run(self, documents: List[Document]):
1✔
173
        """
174
        Split documents into smaller parts.
175

176
        Splits documents by the unit expressed in `split_by`, with a length of `split_length`
177
        and an overlap of `split_overlap`.
178

179
        :param documents: The documents to split.
180
        :returns: A dictionary with the following key:
181
            - `documents`: List of documents with the split texts. Each document includes:
182
                - A metadata field `source_id` to track the original document.
183
                - A metadata field `page_number` to track the original page number.
184
                - All other metadata copied from the original document.
185

186
        :raises TypeError: if the input is not a list of Documents.
187
        :raises ValueError: if the content of a document is None.
188
        """
189
        if self._use_sentence_splitter and self.sentence_splitter is None:
1✔
190
            raise RuntimeError(
×
191
                "The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'."
192
            )
193

194
        if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
1✔
195
            raise TypeError("DocumentSplitter expects a List of Documents as input.")
1✔
196

197
        split_docs: List[Document] = []
1✔
198
        for doc in documents:
1✔
199
            if doc.content is None:
1✔
200
                raise ValueError(
1✔
201
                    f"DocumentSplitter only works with text documents but content for document ID {doc.id} is None."
202
                )
203
            if doc.content == "" and self.skip_empty_documents:
1✔
204
                logger.warning("Document ID {doc_id} has an empty content. Skipping this document.", doc_id=doc.id)
1✔
205
                continue
1✔
206

207
            split_docs += self._split_document(doc)
1✔
208
        return {"documents": split_docs}
1✔
209

210
    def _split_document(self, doc: Document) -> List[Document]:
1✔
211
        if self.split_by == "sentence" or self.respect_sentence_boundary:
1✔
212
            return self._split_by_nltk_sentence(doc)
1✔
213

214
        if self.split_by == "function" and self.splitting_function is not None:
1✔
215
            return self._split_by_function(doc)
1✔
216

217
        return self._split_by_character(doc)
1✔
218

219
    def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:
1✔
220
        split_docs = []
1✔
221

222
        result = self.sentence_splitter.split_sentences(doc.content)  # type: ignore # None check is done in run()
1✔
223
        units = [sentence["sentence"] for sentence in result]
1✔
224

225
        if self.respect_sentence_boundary:
1✔
226
            text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(
1✔
227
                sentences=units, split_length=self.split_length, split_overlap=self.split_overlap
228
            )
229
        else:
230
            text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
1✔
231
                elements=units,
232
                split_length=self.split_length,
233
                split_overlap=self.split_overlap,
234
                split_threshold=self.split_threshold,
235
            )
236
        metadata = deepcopy(doc.meta)
1✔
237
        metadata["source_id"] = doc.id
1✔
238
        split_docs += self._create_docs_from_splits(
1✔
239
            text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
240
        )
241

242
        return split_docs
1✔
243

244
    def _split_by_character(self, doc) -> List[Document]:
1✔
245
        split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]
1✔
246
        units = doc.content.split(split_at)
1✔
247
        # Add the delimiter back to all units except the last one
248
        for i in range(len(units) - 1):
1✔
249
            units[i] += split_at
1✔
250
        text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
1✔
251
            units, self.split_length, self.split_overlap, self.split_threshold
252
        )
253
        metadata = deepcopy(doc.meta)
1✔
254
        metadata["source_id"] = doc.id
1✔
255
        return self._create_docs_from_splits(
1✔
256
            text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
257
        )
258

259
    def _split_by_function(self, doc) -> List[Document]:
1✔
260
        # the check for None is done already in the run method
261
        splits = self.splitting_function(doc.content)  # type: ignore
1✔
262
        docs: List[Document] = []
1✔
263
        for s in splits:
1✔
264
            meta = deepcopy(doc.meta)
1✔
265
            meta["source_id"] = doc.id
1✔
266
            docs.append(Document(content=s, meta=meta))
1✔
267
        return docs
1✔
268

269
    def _concatenate_units(
1✔
270
        self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int
271
    ) -> Tuple[List[str], List[int], List[int]]:
272
        """
273
        Concatenates the elements into parts of split_length units.
274

275
        Keeps track of the original page number that each element belongs. If the length of the current units is less
276
        than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current
277
        units with the last split, preventing the creation of excessively small splits.
278
        """
279

280
        text_splits: List[str] = []
1✔
281
        splits_pages: List[int] = []
1✔
282
        splits_start_idxs: List[int] = []
1✔
283
        cur_start_idx = 0
1✔
284
        cur_page = 1
1✔
285
        segments = windowed(elements, n=split_length, step=split_length - split_overlap)
1✔
286

287
        for seg in segments:
1✔
288
            current_units = [unit for unit in seg if unit is not None]
1✔
289
            txt = "".join(current_units)
1✔
290

291
            # check if length of current units is below split_threshold
292
            if len(current_units) < split_threshold and len(text_splits) > 0:
1✔
293
                # concatenate the last split with the current one
294
                text_splits[-1] += txt
1✔
295

296
            # NOTE: If skip_empty_documents is True, this line skips documents that have content=""
297
            elif not self.skip_empty_documents or len(txt) > 0:
1✔
298
                text_splits.append(txt)
1✔
299
                splits_pages.append(cur_page)
1✔
300
                splits_start_idxs.append(cur_start_idx)
1✔
301

302
            processed_units = current_units[: split_length - split_overlap]
1✔
303
            cur_start_idx += len("".join(processed_units))
1✔
304

305
            if self.split_by == "page":
1✔
306
                num_page_breaks = len(processed_units)
1✔
307
            else:
308
                num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units)
1✔
309

310
            cur_page += num_page_breaks
1✔
311

312
        return text_splits, splits_pages, splits_start_idxs
1✔
313

314
    def _create_docs_from_splits(
1✔
315
        self, text_splits: List[str], splits_pages: List[int], splits_start_idxs: List[int], meta: Dict[str, Any]
316
    ) -> List[Document]:
317
        """
318
        Creates Document objects from splits enriching them with page number and the metadata of the original document.
319
        """
320
        documents: List[Document] = []
1✔
321

322
        for i, (txt, split_idx) in enumerate(zip(text_splits, splits_start_idxs)):
1✔
323
            copied_meta = deepcopy(meta)
1✔
324
            copied_meta["page_number"] = splits_pages[i]
1✔
325
            copied_meta["split_id"] = i
1✔
326
            copied_meta["split_idx_start"] = split_idx
1✔
327
            doc = Document(content=txt, meta=copied_meta)
1✔
328
            documents.append(doc)
1✔
329

330
            if self.split_overlap <= 0:
1✔
331
                continue
1✔
332

333
            doc.meta["_split_overlap"] = []
1✔
334

335
            if i == 0:
1✔
336
                continue
1✔
337

338
            doc_start_idx = splits_start_idxs[i]
1✔
339
            previous_doc = documents[i - 1]
1✔
340
            previous_doc_start_idx = splits_start_idxs[i - 1]
1✔
341
            self._add_split_overlap_information(doc, doc_start_idx, previous_doc, previous_doc_start_idx)
1✔
342

343
        return documents
1✔
344

345
    @staticmethod
1✔
346
    def _add_split_overlap_information(
1✔
347
        current_doc: Document, current_doc_start_idx: int, previous_doc: Document, previous_doc_start_idx: int
348
    ):
349
        """
350
        Adds split overlap information to the current and previous Document's meta.
351

352
        :param current_doc: The Document that is being split.
353
        :param current_doc_start_idx: The starting index of the current Document.
354
        :param previous_doc: The Document that was split before the current Document.
355
        :param previous_doc_start_idx: The starting index of the previous Document.
356
        """
357
        overlapping_range = (current_doc_start_idx - previous_doc_start_idx, len(previous_doc.content))  # type: ignore
1✔
358

359
        if overlapping_range[0] < overlapping_range[1]:
1✔
360
            overlapping_str = previous_doc.content[overlapping_range[0] : overlapping_range[1]]  # type: ignore
1✔
361

362
            if current_doc.content.startswith(overlapping_str):  # type: ignore
1✔
363
                # add split overlap information to this Document regarding the previous Document
364
                current_doc.meta["_split_overlap"].append({"doc_id": previous_doc.id, "range": overlapping_range})
1✔
365

366
                # add split overlap information to previous Document regarding this Document
367
                overlapping_range = (0, overlapping_range[1] - overlapping_range[0])
1✔
368
                previous_doc.meta["_split_overlap"].append({"doc_id": current_doc.id, "range": overlapping_range})
1✔
369

370
    def to_dict(self) -> Dict[str, Any]:
1✔
371
        """
372
        Serializes the component to a dictionary.
373
        """
374
        serialized = default_to_dict(
1✔
375
            self,
376
            split_by=self.split_by,
377
            split_length=self.split_length,
378
            split_overlap=self.split_overlap,
379
            split_threshold=self.split_threshold,
380
            respect_sentence_boundary=self.respect_sentence_boundary,
381
            language=self.language,
382
            use_split_rules=self.use_split_rules,
383
            extend_abbreviations=self.extend_abbreviations,
384
            skip_empty_documents=self.skip_empty_documents,
385
        )
386
        if self.splitting_function:
1✔
387
            serialized["init_parameters"]["splitting_function"] = serialize_callable(self.splitting_function)
1✔
388
        return serialized
1✔
389

390
    @classmethod
1✔
391
    def from_dict(cls, data: Dict[str, Any]) -> "DocumentSplitter":
1✔
392
        """
393
        Deserializes the component from a dictionary.
394
        """
395
        init_params = data.get("init_parameters", {})
1✔
396

397
        splitting_function = init_params.get("splitting_function", None)
1✔
398
        if splitting_function:
1✔
399
            init_params["splitting_function"] = deserialize_callable(splitting_function)
1✔
400

401
        return default_from_dict(cls, data)
1✔
402

403
    @staticmethod
1✔
404
    def _concatenate_sentences_based_on_word_amount(
1✔
405
        sentences: List[str], split_length: int, split_overlap: int
406
    ) -> Tuple[List[str], List[int], List[int]]:
407
        """
408
        Groups the sentences into chunks of `split_length` words while respecting sentence boundaries.
409

410
        This function is only used when splitting by `word` and `respect_sentence_boundary` is set to `True`, i.e.:
411
        with NLTK sentence tokenizer.
412

413
        :param sentences: The list of sentences to split.
414
        :param split_length: The maximum number of words in each split.
415
        :param split_overlap: The number of overlapping words in each split.
416
        :returns: A tuple containing the concatenated sentences, the start page numbers, and the start indices.
417
        """
418
        # chunk information
419
        chunk_word_count = 0
1✔
420
        chunk_starting_page_number = 1
1✔
421
        chunk_start_idx = 0
1✔
422
        current_chunk: List[str] = []
1✔
423
        # output lists
424
        split_start_page_numbers = []
1✔
425
        list_of_splits: List[List[str]] = []
1✔
426
        split_start_indices = []
1✔
427

428
        for sentence_idx, sentence in enumerate(sentences):
1✔
429
            current_chunk.append(sentence)
1✔
430
            chunk_word_count += len(sentence.split())
1✔
431
            next_sentence_word_count = (
1✔
432
                len(sentences[sentence_idx + 1].split()) if sentence_idx < len(sentences) - 1 else 0
433
            )
434

435
            # Number of words in the current chunk plus the next sentence is larger than the split_length,
436
            # or we reached the last sentence
437
            if (chunk_word_count + next_sentence_word_count) > split_length or sentence_idx == len(sentences) - 1:
1✔
438
                #  Save current chunk and start a new one
439
                list_of_splits.append(current_chunk)
1✔
440
                split_start_page_numbers.append(chunk_starting_page_number)
1✔
441
                split_start_indices.append(chunk_start_idx)
1✔
442

443
                # Get the number of sentences that overlap with the next chunk
444
                num_sentences_to_keep = DocumentSplitter._number_of_sentences_to_keep(
1✔
445
                    sentences=current_chunk, split_length=split_length, split_overlap=split_overlap
446
                )
447
                # Set up information for the new chunk
448
                if num_sentences_to_keep > 0:
1✔
449
                    # Processed sentences are the ones that are not overlapping with the next chunk
450
                    processed_sentences = current_chunk[:-num_sentences_to_keep]
1✔
451
                    chunk_starting_page_number += sum(sent.count("\f") for sent in processed_sentences)
1✔
452
                    chunk_start_idx += len("".join(processed_sentences))
1✔
453
                    # Next chunk starts with the sentences that were overlapping with the previous chunk
454
                    current_chunk = current_chunk[-num_sentences_to_keep:]
1✔
455
                    chunk_word_count = sum(len(s.split()) for s in current_chunk)
1✔
456
                else:
457
                    # Here processed_sentences is the same as current_chunk since there is no overlap
458
                    chunk_starting_page_number += sum(sent.count("\f") for sent in current_chunk)
1✔
459
                    chunk_start_idx += len("".join(current_chunk))
1✔
460
                    current_chunk = []
1✔
461
                    chunk_word_count = 0
1✔
462

463
        # Concatenate the sentences together within each split
464
        text_splits = []
1✔
465
        for split in list_of_splits:
1✔
466
            text = "".join(split)
1✔
467
            if len(text) > 0:
1✔
468
                text_splits.append(text)
1✔
469

470
        return text_splits, split_start_page_numbers, split_start_indices
1✔
471

472
    @staticmethod
1✔
473
    def _number_of_sentences_to_keep(sentences: List[str], split_length: int, split_overlap: int) -> int:
1✔
474
        """
475
        Returns the number of sentences to keep in the next chunk based on the `split_overlap` and `split_length`.
476

477
        :param sentences: The list of sentences to split.
478
        :param split_length: The maximum number of words in each split.
479
        :param split_overlap: The number of overlapping words in each split.
480
        :returns: The number of sentences to keep in the next chunk.
481
        """
482
        # If the split_overlap is 0, we don't need to keep any sentences
483
        if split_overlap == 0:
1✔
484
            return 0
1✔
485

486
        num_sentences_to_keep = 0
1✔
487
        num_words = 0
1✔
488
        # Next overlapping Document should not start exactly the same as the previous one, so we skip the first sentence
489
        for sent in reversed(sentences[1:]):
1✔
490
            num_words += len(sent.split())
1✔
491
            # If the number of words is larger than the split_length then don't add any more sentences
492
            if num_words > split_length:
1✔
493
                break
1✔
494
            num_sentences_to_keep += 1
1✔
495
            if num_words > split_overlap:
1✔
496
                break
1✔
497
        return num_sentences_to_keep
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc