• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

deepset-ai / haystack / 13112059876

03 Feb 2025 11:04AM UTC coverage: 91.357% (-0.002%) from 91.359%
13112059876

Pull #8797

github

web-flow
Merge 273c34790 into 379711f63
Pull Request #8797: chore: Remove DocumentSplitter warning - split_by='sentence'

8868 of 9707 relevant lines covered (91.36%)

0.91 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.51
haystack/components/preprocessors/document_splitter.py
1
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
#
3
# SPDX-License-Identifier: Apache-2.0
4

5
import warnings
1✔
6
from copy import deepcopy
1✔
7
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple
1✔
8

9
from more_itertools import windowed
1✔
10

11
from haystack import Document, component, logging
1✔
12
from haystack.components.preprocessors.sentence_tokenizer import Language, SentenceSplitter, nltk_imports
1✔
13
from haystack.core.serialization import default_from_dict, default_to_dict
1✔
14
from haystack.utils import deserialize_callable, serialize_callable
1✔
15

16
logger = logging.getLogger(__name__)
1✔
17

18
# mapping of split by character, 'function' and 'sentence' don't split by character
19
_CHARACTER_SPLIT_BY_MAPPING = {"page": "\f", "passage": "\n\n", "period": ".", "word": " ", "line": "\n"}
1✔
20

21

22
@component
1✔
23
class DocumentSplitter:
1✔
24
    """
25
    Splits long documents into smaller chunks.
26

27
    This is a common preprocessing step during indexing. It helps Embedders create meaningful semantic representations
28
    and prevents exceeding language model context limits.
29

30
    The DocumentSplitter is compatible with the following DocumentStores:
31
    - [Astra](https://docs.haystack.deepset.ai/docs/astradocumentstore)
32
    - [Chroma](https://docs.haystack.deepset.ai/docs/chromadocumentstore) limited support, overlapping information is
33
      not stored
34
    - [Elasticsearch](https://docs.haystack.deepset.ai/docs/elasticsearch-document-store)
35
    - [OpenSearch](https://docs.haystack.deepset.ai/docs/opensearch-document-store)
36
    - [Pgvector](https://docs.haystack.deepset.ai/docs/pgvectordocumentstore)
37
    - [Pinecone](https://docs.haystack.deepset.ai/docs/pinecone-document-store) limited support, overlapping
38
       information is not stored
39
    - [Qdrant](https://docs.haystack.deepset.ai/docs/qdrant-document-store)
40
    - [Weaviate](https://docs.haystack.deepset.ai/docs/weaviatedocumentstore)
41

42
    ### Usage example
43

44
    ```python
45
    from haystack import Document
46
    from haystack.components.preprocessors import DocumentSplitter
47

48
    doc = Document(content="Moonlight shimmered softly, wolves howled nearby, night enveloped everything.")
49

50
    splitter = DocumentSplitter(split_by="word", split_length=3, split_overlap=0)
51
    result = splitter.run(documents=[doc])
52
    ```
53
    """
54

55
    def __init__(  # pylint: disable=too-many-positional-arguments
1✔
56
        self,
57
        split_by: Literal["function", "page", "passage", "period", "word", "line", "sentence"] = "word",
58
        split_length: int = 200,
59
        split_overlap: int = 0,
60
        split_threshold: int = 0,
61
        splitting_function: Optional[Callable[[str], List[str]]] = None,
62
        respect_sentence_boundary: bool = False,
63
        language: Language = "en",
64
        use_split_rules: bool = True,
65
        extend_abbreviations: bool = True,
66
    ):
67
        """
68
        Initialize DocumentSplitter.
69

70
        :param split_by: The unit for splitting your documents. Choose from:
71
            - `word` for splitting by spaces (" ")
72
            - `period` for splitting by periods (".")
73
            - `page` for splitting by form feed ("\\f")
74
            - `passage` for splitting by double line breaks ("\\n\\n")
75
            - `line` for splitting each line ("\\n")
76
            - `sentence` for splitting by NLTK sentence tokenizer
77

78
        :param split_length: The maximum number of units in each split.
79
        :param split_overlap: The number of overlapping units for each split.
80
        :param split_threshold: The minimum number of units per split. If a split has fewer units
81
            than the threshold, it's attached to the previous split.
82
        :param splitting_function: Necessary when `split_by` is set to "function".
83
            This is a function which must accept a single `str` as input and return a `list` of `str` as output,
84
            representing the chunks after splitting.
85
        :param respect_sentence_boundary: Choose whether to respect sentence boundaries when splitting by "word".
86
            If True, uses NLTK to detect sentence boundaries, ensuring splits occur only between sentences.
87
        :param language: Choose the language for the NLTK tokenizer. The default is English ("en").
88
        :param use_split_rules: Choose whether to use additional split rules when splitting by `sentence`.
89
        :param extend_abbreviations: Choose whether to extend NLTK's PunktTokenizer abbreviations with a list
90
            of curated abbreviations, if available. This is currently supported for English ("en") and German ("de").
91
        """
92

93
        self.split_by = split_by
1✔
94
        self.split_length = split_length
1✔
95
        self.split_overlap = split_overlap
1✔
96
        self.split_threshold = split_threshold
1✔
97
        self.splitting_function = splitting_function
1✔
98
        self.respect_sentence_boundary = respect_sentence_boundary
1✔
99
        self.language = language
1✔
100
        self.use_split_rules = use_split_rules
1✔
101
        self.extend_abbreviations = extend_abbreviations
1✔
102

103
        self._init_checks(
1✔
104
            split_by=split_by,
105
            split_length=split_length,
106
            split_overlap=split_overlap,
107
            splitting_function=splitting_function,
108
            respect_sentence_boundary=respect_sentence_boundary,
109
        )
110
        self._use_sentence_splitter = split_by == "sentence" or (respect_sentence_boundary and split_by == "word")
1✔
111
        if self._use_sentence_splitter:
1✔
112
            nltk_imports.check()
1✔
113
            self.sentence_splitter = None
1✔
114

115
    def _init_checks(
1✔
116
        self,
117
        *,
118
        split_by: str,
119
        split_length: int,
120
        split_overlap: int,
121
        splitting_function: Optional[Callable],
122
        respect_sentence_boundary: bool,
123
    ) -> None:
124
        """
125
        Validates initialization parameters for DocumentSplitter.
126

127
        :param split_by: The unit for splitting documents
128
        :param split_length: The maximum number of units in each split
129
        :param split_overlap: The number of overlapping units for each split
130
        :param splitting_function: Custom function for splitting when split_by="function"
131
        :param respect_sentence_boundary: Whether to respect sentence boundaries when splitting
132
        :raises ValueError: If any parameter is invalid
133
        """
134
        valid_split_by = ["function", "page", "passage", "period", "word", "line", "sentence"]
1✔
135
        if split_by not in valid_split_by:
1✔
136
            raise ValueError(f"split_by must be one of {', '.join(valid_split_by)}.")
1✔
137

138
        if split_by == "function" and splitting_function is None:
1✔
139
            raise ValueError("When 'split_by' is set to 'function', a valid 'splitting_function' must be provided.")
1✔
140

141
        if split_length <= 0:
1✔
142
            raise ValueError("split_length must be greater than 0.")
1✔
143

144
        if split_overlap < 0:
1✔
145
            raise ValueError("split_overlap must be greater than or equal to 0.")
1✔
146

147
        if respect_sentence_boundary and split_by != "word":
1✔
148
            logger.warning(
1✔
149
                "The 'respect_sentence_boundary' option is only supported for `split_by='word'`. "
150
                "The option `respect_sentence_boundary` will be set to `False`."
151
            )
152
            self.respect_sentence_boundary = False
1✔
153

154
    def warm_up(self):
1✔
155
        """
156
        Warm up the DocumentSplitter by loading the sentence tokenizer.
157
        """
158
        if self._use_sentence_splitter and self.sentence_splitter is None:
1✔
159
            self.sentence_splitter = SentenceSplitter(
1✔
160
                language=self.language,
161
                use_split_rules=self.use_split_rules,
162
                extend_abbreviations=self.extend_abbreviations,
163
                keep_white_spaces=True,
164
            )
165

166
    @component.output_types(documents=List[Document])
1✔
167
    def run(self, documents: List[Document]):
1✔
168
        """
169
        Split documents into smaller parts.
170

171
        Splits documents by the unit expressed in `split_by`, with a length of `split_length`
172
        and an overlap of `split_overlap`.
173

174
        :param documents: The documents to split.
175
        :returns: A dictionary with the following key:
176
            - `documents`: List of documents with the split texts. Each document includes:
177
                - A metadata field `source_id` to track the original document.
178
                - A metadata field `page_number` to track the original page number.
179
                - All other metadata copied from the original document.
180

181
        :raises TypeError: if the input is not a list of Documents.
182
        :raises ValueError: if the content of a document is None.
183
        """
184
        if self._use_sentence_splitter and self.sentence_splitter is None:
1✔
185
            raise RuntimeError(
×
186
                "The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'."
187
            )
188

189
        if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
1✔
190
            raise TypeError("DocumentSplitter expects a List of Documents as input.")
1✔
191

192
        split_docs: List[Document] = []
1✔
193
        for doc in documents:
1✔
194
            if doc.content is None:
1✔
195
                raise ValueError(
1✔
196
                    f"DocumentSplitter only works with text documents but content for document ID {doc.id} is None."
197
                )
198
            if doc.content == "":
1✔
199
                logger.warning("Document ID {doc_id} has an empty content. Skipping this document.", doc_id=doc.id)
1✔
200
                continue
1✔
201

202
            split_docs += self._split_document(doc)
1✔
203
        return {"documents": split_docs}
1✔
204

205
    def _split_document(self, doc: Document) -> List[Document]:
1✔
206
        if self.split_by == "sentence" or self.respect_sentence_boundary:
1✔
207
            return self._split_by_nltk_sentence(doc)
1✔
208

209
        if self.split_by == "function" and self.splitting_function is not None:
1✔
210
            return self._split_by_function(doc)
1✔
211

212
        return self._split_by_character(doc)
1✔
213

214
    def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:
1✔
215
        split_docs = []
1✔
216

217
        result = self.sentence_splitter.split_sentences(doc.content)  # type: ignore # None check is done in run()
1✔
218
        units = [sentence["sentence"] for sentence in result]
1✔
219

220
        if self.respect_sentence_boundary:
1✔
221
            text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(
1✔
222
                sentences=units, split_length=self.split_length, split_overlap=self.split_overlap
223
            )
224
        else:
225
            text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
1✔
226
                elements=units,
227
                split_length=self.split_length,
228
                split_overlap=self.split_overlap,
229
                split_threshold=self.split_threshold,
230
            )
231
        metadata = deepcopy(doc.meta)
1✔
232
        metadata["source_id"] = doc.id
1✔
233
        split_docs += self._create_docs_from_splits(
1✔
234
            text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
235
        )
236

237
        return split_docs
1✔
238

239
    def _split_by_character(self, doc) -> List[Document]:
1✔
240
        split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]
1✔
241
        units = doc.content.split(split_at)
1✔
242
        # Add the delimiter back to all units except the last one
243
        for i in range(len(units) - 1):
1✔
244
            units[i] += split_at
1✔
245
        text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
1✔
246
            units, self.split_length, self.split_overlap, self.split_threshold
247
        )
248
        metadata = deepcopy(doc.meta)
1✔
249
        metadata["source_id"] = doc.id
1✔
250
        return self._create_docs_from_splits(
1✔
251
            text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
252
        )
253

254
    def _split_by_function(self, doc) -> List[Document]:
1✔
255
        # the check for None is done already in the run method
256
        splits = self.splitting_function(doc.content)  # type: ignore
1✔
257
        docs: List[Document] = []
1✔
258
        for s in splits:
1✔
259
            meta = deepcopy(doc.meta)
1✔
260
            meta["source_id"] = doc.id
1✔
261
            docs.append(Document(content=s, meta=meta))
1✔
262
        return docs
1✔
263

264
    def _concatenate_units(
1✔
265
        self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int
266
    ) -> Tuple[List[str], List[int], List[int]]:
267
        """
268
        Concatenates the elements into parts of split_length units.
269

270
        Keeps track of the original page number that each element belongs. If the length of the current units is less
271
        than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current
272
        units with the last split, preventing the creation of excessively small splits.
273
        """
274

275
        text_splits: List[str] = []
1✔
276
        splits_pages: List[int] = []
1✔
277
        splits_start_idxs: List[int] = []
1✔
278
        cur_start_idx = 0
1✔
279
        cur_page = 1
1✔
280
        segments = windowed(elements, n=split_length, step=split_length - split_overlap)
1✔
281

282
        for seg in segments:
1✔
283
            current_units = [unit for unit in seg if unit is not None]
1✔
284
            txt = "".join(current_units)
1✔
285

286
            # check if length of current units is below split_threshold
287
            if len(current_units) < split_threshold and len(text_splits) > 0:
1✔
288
                # concatenate the last split with the current one
289
                text_splits[-1] += txt
1✔
290

291
            # NOTE: This line skips documents that have content=""
292
            elif len(txt) > 0:
1✔
293
                text_splits.append(txt)
1✔
294
                splits_pages.append(cur_page)
1✔
295
                splits_start_idxs.append(cur_start_idx)
1✔
296

297
            processed_units = current_units[: split_length - split_overlap]
1✔
298
            cur_start_idx += len("".join(processed_units))
1✔
299

300
            if self.split_by == "page":
1✔
301
                num_page_breaks = len(processed_units)
1✔
302
            else:
303
                num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units)
1✔
304

305
            cur_page += num_page_breaks
1✔
306

307
        return text_splits, splits_pages, splits_start_idxs
1✔
308

309
    def _create_docs_from_splits(
1✔
310
        self, text_splits: List[str], splits_pages: List[int], splits_start_idxs: List[int], meta: Dict[str, Any]
311
    ) -> List[Document]:
312
        """
313
        Creates Document objects from splits enriching them with page number and the metadata of the original document.
314
        """
315
        documents: List[Document] = []
1✔
316

317
        for i, (txt, split_idx) in enumerate(zip(text_splits, splits_start_idxs)):
1✔
318
            copied_meta = deepcopy(meta)
1✔
319
            copied_meta["page_number"] = splits_pages[i]
1✔
320
            copied_meta["split_id"] = i
1✔
321
            copied_meta["split_idx_start"] = split_idx
1✔
322
            doc = Document(content=txt, meta=copied_meta)
1✔
323
            documents.append(doc)
1✔
324

325
            if self.split_overlap <= 0:
1✔
326
                continue
1✔
327

328
            doc.meta["_split_overlap"] = []
1✔
329

330
            if i == 0:
1✔
331
                continue
1✔
332

333
            doc_start_idx = splits_start_idxs[i]
1✔
334
            previous_doc = documents[i - 1]
1✔
335
            previous_doc_start_idx = splits_start_idxs[i - 1]
1✔
336
            self._add_split_overlap_information(doc, doc_start_idx, previous_doc, previous_doc_start_idx)
1✔
337

338
        return documents
1✔
339

340
    @staticmethod
1✔
341
    def _add_split_overlap_information(
1✔
342
        current_doc: Document, current_doc_start_idx: int, previous_doc: Document, previous_doc_start_idx: int
343
    ):
344
        """
345
        Adds split overlap information to the current and previous Document's meta.
346

347
        :param current_doc: The Document that is being split.
348
        :param current_doc_start_idx: The starting index of the current Document.
349
        :param previous_doc: The Document that was split before the current Document.
350
        :param previous_doc_start_idx: The starting index of the previous Document.
351
        """
352
        overlapping_range = (current_doc_start_idx - previous_doc_start_idx, len(previous_doc.content))  # type: ignore
1✔
353

354
        if overlapping_range[0] < overlapping_range[1]:
1✔
355
            overlapping_str = previous_doc.content[overlapping_range[0] : overlapping_range[1]]  # type: ignore
1✔
356

357
            if current_doc.content.startswith(overlapping_str):  # type: ignore
1✔
358
                # add split overlap information to this Document regarding the previous Document
359
                current_doc.meta["_split_overlap"].append({"doc_id": previous_doc.id, "range": overlapping_range})
1✔
360

361
                # add split overlap information to previous Document regarding this Document
362
                overlapping_range = (0, overlapping_range[1] - overlapping_range[0])
1✔
363
                previous_doc.meta["_split_overlap"].append({"doc_id": current_doc.id, "range": overlapping_range})
1✔
364

365
    def to_dict(self) -> Dict[str, Any]:
1✔
366
        """
367
        Serializes the component to a dictionary.
368
        """
369
        serialized = default_to_dict(
1✔
370
            self,
371
            split_by=self.split_by,
372
            split_length=self.split_length,
373
            split_overlap=self.split_overlap,
374
            split_threshold=self.split_threshold,
375
            respect_sentence_boundary=self.respect_sentence_boundary,
376
            language=self.language,
377
            use_split_rules=self.use_split_rules,
378
            extend_abbreviations=self.extend_abbreviations,
379
        )
380
        if self.splitting_function:
1✔
381
            serialized["init_parameters"]["splitting_function"] = serialize_callable(self.splitting_function)
1✔
382
        return serialized
1✔
383

384
    @classmethod
1✔
385
    def from_dict(cls, data: Dict[str, Any]) -> "DocumentSplitter":
1✔
386
        """
387
        Deserializes the component from a dictionary.
388
        """
389
        init_params = data.get("init_parameters", {})
1✔
390

391
        splitting_function = init_params.get("splitting_function", None)
1✔
392
        if splitting_function:
1✔
393
            init_params["splitting_function"] = deserialize_callable(splitting_function)
1✔
394

395
        return default_from_dict(cls, data)
1✔
396

397
    @staticmethod
1✔
398
    def _concatenate_sentences_based_on_word_amount(
1✔
399
        sentences: List[str], split_length: int, split_overlap: int
400
    ) -> Tuple[List[str], List[int], List[int]]:
401
        """
402
        Groups the sentences into chunks of `split_length` words while respecting sentence boundaries.
403

404
        This function is only used when splitting by `word` and `respect_sentence_boundary` is set to `True`, i.e.:
405
        with NLTK sentence tokenizer.
406

407
        :param sentences: The list of sentences to split.
408
        :param split_length: The maximum number of words in each split.
409
        :param split_overlap: The number of overlapping words in each split.
410
        :returns: A tuple containing the concatenated sentences, the start page numbers, and the start indices.
411
        """
412
        # chunk information
413
        chunk_word_count = 0
1✔
414
        chunk_starting_page_number = 1
1✔
415
        chunk_start_idx = 0
1✔
416
        current_chunk: List[str] = []
1✔
417
        # output lists
418
        split_start_page_numbers = []
1✔
419
        list_of_splits: List[List[str]] = []
1✔
420
        split_start_indices = []
1✔
421

422
        for sentence_idx, sentence in enumerate(sentences):
1✔
423
            current_chunk.append(sentence)
1✔
424
            chunk_word_count += len(sentence.split())
1✔
425
            next_sentence_word_count = (
1✔
426
                len(sentences[sentence_idx + 1].split()) if sentence_idx < len(sentences) - 1 else 0
427
            )
428

429
            # Number of words in the current chunk plus the next sentence is larger than the split_length,
430
            # or we reached the last sentence
431
            if (chunk_word_count + next_sentence_word_count) > split_length or sentence_idx == len(sentences) - 1:
1✔
432
                #  Save current chunk and start a new one
433
                list_of_splits.append(current_chunk)
1✔
434
                split_start_page_numbers.append(chunk_starting_page_number)
1✔
435
                split_start_indices.append(chunk_start_idx)
1✔
436

437
                # Get the number of sentences that overlap with the next chunk
438
                num_sentences_to_keep = DocumentSplitter._number_of_sentences_to_keep(
1✔
439
                    sentences=current_chunk, split_length=split_length, split_overlap=split_overlap
440
                )
441
                # Set up information for the new chunk
442
                if num_sentences_to_keep > 0:
1✔
443
                    # Processed sentences are the ones that are not overlapping with the next chunk
444
                    processed_sentences = current_chunk[:-num_sentences_to_keep]
1✔
445
                    chunk_starting_page_number += sum(sent.count("\f") for sent in processed_sentences)
1✔
446
                    chunk_start_idx += len("".join(processed_sentences))
1✔
447
                    # Next chunk starts with the sentences that were overlapping with the previous chunk
448
                    current_chunk = current_chunk[-num_sentences_to_keep:]
1✔
449
                    chunk_word_count = sum(len(s.split()) for s in current_chunk)
1✔
450
                else:
451
                    # Here processed_sentences is the same as current_chunk since there is no overlap
452
                    chunk_starting_page_number += sum(sent.count("\f") for sent in current_chunk)
1✔
453
                    chunk_start_idx += len("".join(current_chunk))
1✔
454
                    current_chunk = []
1✔
455
                    chunk_word_count = 0
1✔
456

457
        # Concatenate the sentences together within each split
458
        text_splits = []
1✔
459
        for split in list_of_splits:
1✔
460
            text = "".join(split)
1✔
461
            if len(text) > 0:
1✔
462
                text_splits.append(text)
1✔
463

464
        return text_splits, split_start_page_numbers, split_start_indices
1✔
465

466
    @staticmethod
1✔
467
    def _number_of_sentences_to_keep(sentences: List[str], split_length: int, split_overlap: int) -> int:
1✔
468
        """
469
        Returns the number of sentences to keep in the next chunk based on the `split_overlap` and `split_length`.
470

471
        :param sentences: The list of sentences to split.
472
        :param split_length: The maximum number of words in each split.
473
        :param split_overlap: The number of overlapping words in each split.
474
        :returns: The number of sentences to keep in the next chunk.
475
        """
476
        # If the split_overlap is 0, we don't need to keep any sentences
477
        if split_overlap == 0:
1✔
478
            return 0
1✔
479

480
        num_sentences_to_keep = 0
1✔
481
        num_words = 0
1✔
482
        # Next overlapping Document should not start exactly the same as the previous one, so we skip the first sentence
483
        for sent in reversed(sentences[1:]):
1✔
484
            num_words += len(sent.split())
1✔
485
            # If the number of words is larger than the split_length then don't add any more sentences
486
            if num_words > split_length:
1✔
487
                break
1✔
488
            num_sentences_to_keep += 1
1✔
489
            if num_words > split_overlap:
1✔
490
                break
1✔
491
        return num_sentences_to_keep
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc