• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

deepset-ai / haystack / 17760482097

16 Sep 2025 08:58AM UTC coverage: 92.059% (+0.01%) from 92.047%
17760482097

Pull #9754

github

web-flow
Merge 0c0073114 into e3d4e9e94
Pull Request #9754: feat: support structured outputs in `OpenAIChatGenerator`

12996 of 14117 relevant lines covered (92.06%)

0.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.7
haystack/components/generators/chat/openai.py
1
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
#
3
# SPDX-License-Identifier: Apache-2.0
4

5
import json
1✔
6
import os
1✔
7
from datetime import datetime
1✔
8
from typing import Any, Optional, Union
1✔
9

10
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
1✔
11
from openai.lib._pydantic import to_strict_json_schema
1✔
12
from openai.types.chat import (
1✔
13
    ChatCompletion,
14
    ChatCompletionChunk,
15
    ChatCompletionMessage,
16
    ChatCompletionMessageCustomToolCall,
17
    ParsedChatCompletion,
18
    ParsedChatCompletionMessage,
19
)
20
from openai.types.chat.chat_completion import Choice
1✔
21
from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice
1✔
22
from pydantic import BaseModel
1✔
23

24
from haystack import component, default_from_dict, default_to_dict, logging
1✔
25
from haystack.components.generators.utils import _convert_streaming_chunks_to_chat_message
1✔
26
from haystack.dataclasses import (
1✔
27
    AsyncStreamingCallbackT,
28
    ChatMessage,
29
    ComponentInfo,
30
    FinishReason,
31
    StreamingCallbackT,
32
    StreamingChunk,
33
    SyncStreamingCallbackT,
34
    ToolCall,
35
    ToolCallDelta,
36
    select_streaming_callback,
37
)
38
from haystack.tools import (
1✔
39
    Tool,
40
    Toolset,
41
    _check_duplicate_tool_names,
42
    deserialize_tools_or_toolset_inplace,
43
    serialize_tools_or_toolset,
44
)
45
from haystack.utils import Secret, deserialize_callable, deserialize_secrets_inplace, serialize_callable
1✔
46
from haystack.utils.http_client import init_http_client
1✔
47

48
logger = logging.getLogger(__name__)
1✔
49

50

51
@component
1✔
52
class OpenAIChatGenerator:
1✔
53
    """
54
    Completes chats using OpenAI's large language models (LLMs).
55

56
    It works with the gpt-4 and o-series models and supports streaming responses
57
    from OpenAI API. It uses [ChatMessage](https://docs.haystack.deepset.ai/docs/chatmessage)
58
    format in input and output.
59

60
    You can customize how the text is generated by passing parameters to the
61
    OpenAI API. Use the `**generation_kwargs` argument when you initialize
62
    the component or when you run it. Any parameter that works with
63
    `openai.ChatCompletion.create` will work here too.
64

65
    For details on OpenAI API parameters, see
66
    [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat).
67

68
    ### Usage example
69

70
    ```python
71
    from haystack.components.generators.chat import OpenAIChatGenerator
72
    from haystack.dataclasses import ChatMessage
73

74
    messages = [ChatMessage.from_user("What's Natural Language Processing?")]
75

76
    client = OpenAIChatGenerator()
77
    response = client.run(messages)
78
    print(response)
79
    ```
80
    Output:
81
    ```
82
    {'replies':
83
        [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=
84
        [TextContent(text="Natural Language Processing (NLP) is a branch of artificial intelligence
85
            that focuses on enabling computers to understand, interpret, and generate human language in
86
            a way that is meaningful and useful.")],
87
         _name=None,
88
         _meta={'model': 'gpt-4o-mini', 'index': 0, 'finish_reason': 'stop',
89
         'usage': {'prompt_tokens': 15, 'completion_tokens': 36, 'total_tokens': 51}})
90
        ]
91
    }
92
    ```
93
    """
94

95
    def __init__(  # pylint: disable=too-many-positional-arguments
1✔
96
        self,
97
        api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
98
        model: str = "gpt-4o-mini",
99
        streaming_callback: Optional[StreamingCallbackT] = None,
100
        api_base_url: Optional[str] = None,
101
        organization: Optional[str] = None,
102
        generation_kwargs: Optional[dict[str, Any]] = None,
103
        timeout: Optional[float] = None,
104
        max_retries: Optional[int] = None,
105
        tools: Optional[Union[list[Tool], Toolset]] = None,
106
        tools_strict: bool = False,
107
        http_client_kwargs: Optional[dict[str, Any]] = None,
108
    ):
109
        """
110
        Creates an instance of OpenAIChatGenerator. Unless specified otherwise in `model`, uses OpenAI's gpt-4o-mini
111

112
        Before initializing the component, you can set the 'OPENAI_TIMEOUT' and 'OPENAI_MAX_RETRIES'
113
        environment variables to override the `timeout` and `max_retries` parameters respectively
114
        in the OpenAI client.
115

116
        :param api_key: The OpenAI API key.
117
            You can set it with an environment variable `OPENAI_API_KEY`, or pass with this parameter
118
            during initialization.
119
        :param model: The name of the model to use.
120
        :param streaming_callback: A callback function that is called when a new token is received from the stream.
121
            The callback function accepts [StreamingChunk](https://docs.haystack.deepset.ai/docs/data-classes#streamingchunk)
122
            as an argument.
123
        :param api_base_url: An optional base URL.
124
        :param organization: Your organization ID, defaults to `None`. See
125
        [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization).
126
        :param generation_kwargs: Other parameters to use for the model. These parameters are sent directly to
127
            the OpenAI endpoint. See OpenAI [documentation](https://platform.openai.com/docs/api-reference/chat) for
128
            more details.
129
            Some of the supported parameters:
130
            - `max_tokens`: The maximum number of tokens the output text can have.
131
            - `temperature`: What sampling temperature to use. Higher values mean the model will take more risks.
132
                Try 0.9 for more creative applications and 0 (argmax sampling) for ones with a well-defined answer.
133
            - `top_p`: An alternative to sampling with temperature, called nucleus sampling, where the model
134
                considers the results of the tokens with top_p probability mass. For example, 0.1 means only the tokens
135
                comprising the top 10% probability mass are considered.
136
            - `n`: How many completions to generate for each prompt. For example, if the LLM gets 3 prompts and n is 2,
137
                it will generate two completions for each of the three prompts, ending up with 6 completions in total.
138
            - `stop`: One or more sequences after which the LLM should stop generating tokens.
139
            - `presence_penalty`: What penalty to apply if a token is already present at all. Bigger values mean
140
                the model will be less likely to repeat the same token in the text.
141
            - `frequency_penalty`: What penalty to apply if a token has already been generated in the text.
142
                Bigger values mean the model will be less likely to repeat the same token in the text.
143
            - `logit_bias`: Add a logit bias to specific tokens. The keys of the dictionary are tokens, and the
144
                values are the bias to add to that token.
145
            - `response_format`: A JSON schema or a Pydantic model that enforces the structure of the model's response.
146
                If provided, the output will always be validated against this
147
                format (unless the model returns a tool call).
148
                For details, see the [OpenAI Structured Outputs documentation](https://platform.openai.com/docs/guides/structured-outputs).
149
                Notes:
150
                - This parameter accepts Pydantic models and JSON schemas for latest models starting from GPT-4o.
151
                  Older models only support basic version of structured outputs through `{"type": "json_object"}`.
152
                  For detailed information on JSON mode, see the [OpenAI Structured Outputs documentation](https://platform.openai.com/docs/guides/structured-outputs#json-mode).
153
                - For structured outputs with streaming,
154
                  the `response_format` must be a JSON schema and not a Pydantic model.
155
        :param timeout:
156
            Timeout for OpenAI client calls. If not set, it defaults to either the
157
            `OPENAI_TIMEOUT` environment variable, or 30 seconds.
158
        :param max_retries:
159
            Maximum number of retries to contact OpenAI after an internal error.
160
            If not set, it defaults to either the `OPENAI_MAX_RETRIES` environment variable, or set to 5.
161
        :param tools:
162
            A list of tools or a Toolset for which the model can prepare calls. This parameter can accept either a
163
            list of `Tool` objects or a `Toolset` instance.
164
        :param tools_strict:
165
            Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
166
            the schema provided in the `parameters` field of the tool definition, but this may increase latency.
167
        :param http_client_kwargs:
168
            A dictionary of keyword arguments to configure a custom `httpx.Client`or `httpx.AsyncClient`.
169
            For more information, see the [HTTPX documentation](https://www.python-httpx.org/api/#client).
170

171
        """
172
        self.api_key = api_key
1✔
173
        self.model = model
1✔
174
        self.generation_kwargs = generation_kwargs or {}
1✔
175
        self.streaming_callback = streaming_callback
1✔
176
        self.api_base_url = api_base_url
1✔
177
        self.organization = organization
1✔
178
        self.timeout = timeout
1✔
179
        self.max_retries = max_retries
1✔
180
        self.tools = tools  # Store tools as-is, whether it's a list or a Toolset
1✔
181
        self.tools_strict = tools_strict
1✔
182
        self.http_client_kwargs = http_client_kwargs
1✔
183
        # Check for duplicate tool names
184
        _check_duplicate_tool_names(list(self.tools or []))
1✔
185

186
        if timeout is None:
1✔
187
            timeout = float(os.environ.get("OPENAI_TIMEOUT", "30.0"))
1✔
188
        if max_retries is None:
1✔
189
            max_retries = int(os.environ.get("OPENAI_MAX_RETRIES", "5"))
1✔
190

191
        client_kwargs: dict[str, Any] = {
1✔
192
            "api_key": api_key.resolve_value(),
193
            "organization": organization,
194
            "base_url": api_base_url,
195
            "timeout": timeout,
196
            "max_retries": max_retries,
197
        }
198

199
        self.client = OpenAI(http_client=init_http_client(self.http_client_kwargs, async_client=False), **client_kwargs)
1✔
200
        self.async_client = AsyncOpenAI(
1✔
201
            http_client=init_http_client(self.http_client_kwargs, async_client=True), **client_kwargs
202
        )
203

204
    def _get_telemetry_data(self) -> dict[str, Any]:
1✔
205
        """
206
        Data that is sent to Posthog for usage analytics.
207
        """
208
        return {"model": self.model}
1✔
209

210
    def to_dict(self) -> dict[str, Any]:
1✔
211
        """
212
        Serialize this component to a dictionary.
213

214
        :returns:
215
            The serialized component as a dictionary.
216
        """
217
        callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None
1✔
218
        generation_kwargs = self.generation_kwargs.copy()
1✔
219
        response_format = generation_kwargs.get("response_format")
1✔
220

221
        # If the response format is a Pydantic model, it's converted to openai's json schema format
222
        # If it's already a json schema, it's left as is
223
        if response_format and issubclass(response_format, BaseModel):
1✔
224
            json_schema = {
1✔
225
                "type": "json_schema",
226
                "json_schema": {
227
                    "name": response_format.__name__,
228
                    "strict": True,
229
                    "schema": to_strict_json_schema(response_format),
230
                },
231
            }
232
            generation_kwargs["response_format"] = json_schema
1✔
233

234
        return default_to_dict(
1✔
235
            self,
236
            model=self.model,
237
            streaming_callback=callback_name,
238
            api_base_url=self.api_base_url,
239
            organization=self.organization,
240
            generation_kwargs=generation_kwargs,
241
            api_key=self.api_key.to_dict(),
242
            timeout=self.timeout,
243
            max_retries=self.max_retries,
244
            tools=serialize_tools_or_toolset(self.tools),
245
            tools_strict=self.tools_strict,
246
            http_client_kwargs=self.http_client_kwargs,
247
        )
248

249
    @classmethod
1✔
250
    def from_dict(cls, data: dict[str, Any]) -> "OpenAIChatGenerator":
1✔
251
        """
252
        Deserialize this component from a dictionary.
253

254
        :param data: The dictionary representation of this component.
255
        :returns:
256
            The deserialized component instance.
257
        """
258
        deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
1✔
259
        deserialize_tools_or_toolset_inplace(data["init_parameters"], key="tools")
1✔
260
        init_params = data.get("init_parameters", {})
1✔
261
        serialized_callback_handler = init_params.get("streaming_callback")
1✔
262

263
        if serialized_callback_handler:
1✔
264
            data["init_parameters"]["streaming_callback"] = deserialize_callable(serialized_callback_handler)
1✔
265
        return default_from_dict(cls, data)
1✔
266

267
    @component.output_types(replies=list[ChatMessage])
1✔
268
    def run(
1✔
269
        self,
270
        messages: list[ChatMessage],
271
        streaming_callback: Optional[StreamingCallbackT] = None,
272
        generation_kwargs: Optional[dict[str, Any]] = None,
273
        *,
274
        tools: Optional[Union[list[Tool], Toolset]] = None,
275
        tools_strict: Optional[bool] = None,
276
    ):
277
        """
278
        Invokes chat completion based on the provided messages and generation parameters.
279

280
        :param messages:
281
            A list of ChatMessage instances representing the input messages.
282
        :param streaming_callback:
283
            A callback function that is called when a new token is received from the stream.
284
        :param generation_kwargs:
285
            Additional keyword arguments for text generation. These parameters will
286
            override the parameters passed during component initialization.
287
            For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
288
        :param tools:
289
            A list of tools or a Toolset for which the model can prepare calls. If set, it will override the
290
            `tools` parameter set during component initialization. This parameter can accept either a list of
291
            `Tool` objects or a `Toolset` instance.
292
        :param tools_strict:
293
            Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
294
            the schema provided in the `parameters` field of the tool definition, but this may increase latency.
295
            If set, it will override the `tools_strict` parameter set during component initialization.
296

297
        :returns:
298
            A dictionary with the following key:
299
            - `replies`: A list containing the generated responses as ChatMessage instances.
300
        """
301
        if len(messages) == 0:
1✔
302
            return {"replies": []}
1✔
303

304
        streaming_callback = select_streaming_callback(
1✔
305
            init_callback=self.streaming_callback, runtime_callback=streaming_callback, requires_async=False
306
        )
307
        chat_completion: Union[Stream[ChatCompletionChunk], ChatCompletion, ParsedChatCompletion]
308

309
        api_args = self._prepare_api_call(
1✔
310
            messages=messages,
311
            streaming_callback=streaming_callback,
312
            generation_kwargs=generation_kwargs,
313
            tools=tools,
314
            tools_strict=tools_strict,
315
        )
316
        openai_endpoint = api_args.pop("openai_endpoint")
1✔
317
        openai_endpoint_method = getattr(self.client.chat.completions, openai_endpoint)
1✔
318
        chat_completion = openai_endpoint_method(**api_args)
1✔
319

320
        if streaming_callback is not None:
1✔
321
            completions = self._handle_stream_response(
1✔
322
                # we cannot check isinstance(chat_completion, Stream) because some observability tools wrap Stream
323
                # and return a different type. See https://github.com/deepset-ai/haystack/issues/9014.
324
                chat_completion,  # type: ignore
325
                streaming_callback,
326
            )
327

328
        else:
329
            assert isinstance(chat_completion, ChatCompletion), "Unexpected response type for non-streaming request."
1✔
330
            completions = [
1✔
331
                _convert_chat_completion_to_chat_message(chat_completion, choice) for choice in chat_completion.choices
332
            ]
333

334
        # before returning, do post-processing of the completions
335
        for message in completions:
1✔
336
            _check_finish_reason(message.meta)
1✔
337

338
        return {"replies": completions}
1✔
339

340
    @component.output_types(replies=list[ChatMessage])
1✔
341
    async def run_async(
1✔
342
        self,
343
        messages: list[ChatMessage],
344
        streaming_callback: Optional[StreamingCallbackT] = None,
345
        generation_kwargs: Optional[dict[str, Any]] = None,
346
        *,
347
        tools: Optional[Union[list[Tool], Toolset]] = None,
348
        tools_strict: Optional[bool] = None,
349
    ):
350
        """
351
        Asynchronously invokes chat completion based on the provided messages and generation parameters.
352

353
        This is the asynchronous version of the `run` method. It has the same parameters and return values
354
        but can be used with `await` in async code.
355

356
        :param messages:
357
            A list of ChatMessage instances representing the input messages.
358
        :param streaming_callback:
359
            A callback function that is called when a new token is received from the stream.
360
            Must be a coroutine.
361
        :param generation_kwargs:
362
            Additional keyword arguments for text generation. These parameters will
363
            override the parameters passed during component initialization.
364
            For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
365
        :param tools:
366
            A list of tools or a Toolset for which the model can prepare calls. If set, it will override the
367
            `tools` parameter set during component initialization. This parameter can accept either a list of
368
            `Tool` objects or a `Toolset` instance.
369
        :param tools_strict:
370
            Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
371
            the schema provided in the `parameters` field of the tool definition, but this may increase latency.
372
            If set, it will override the `tools_strict` parameter set during component initialization.
373

374
        :returns:
375
            A dictionary with the following key:
376
            - `replies`: A list containing the generated responses as ChatMessage instances.
377
        """
378
        # validate and select the streaming callback
379
        streaming_callback = select_streaming_callback(
1✔
380
            init_callback=self.streaming_callback, runtime_callback=streaming_callback, requires_async=True
381
        )
382
        chat_completion: Union[AsyncStream[ChatCompletionChunk], ChatCompletion, ParsedChatCompletion]
383

384
        if len(messages) == 0:
1✔
385
            return {"replies": []}
×
386

387
        api_args = self._prepare_api_call(
1✔
388
            messages=messages,
389
            streaming_callback=streaming_callback,
390
            generation_kwargs=generation_kwargs,
391
            tools=tools,
392
            tools_strict=tools_strict,
393
        )
394

395
        openai_endpoint = api_args.pop("openai_endpoint")
1✔
396
        openai_endpoint_method = getattr(self.async_client.chat.completions, openai_endpoint)
1✔
397
        chat_completion = await openai_endpoint_method(**api_args)
1✔
398

399
        if streaming_callback is not None:
1✔
400
            completions = await self._handle_async_stream_response(
1✔
401
                # we cannot check isinstance(chat_completion, AsyncStream) because some observability tools wrap
402
                # AsyncStream and return a different type. See https://github.com/deepset-ai/haystack/issues/9014.
403
                chat_completion,  # type: ignore
404
                streaming_callback,
405
            )
406

407
        else:
408
            assert isinstance(chat_completion, ChatCompletion), "Unexpected response type for non-streaming request."
1✔
409
            completions = [
1✔
410
                _convert_chat_completion_to_chat_message(chat_completion, choice) for choice in chat_completion.choices
411
            ]
412

413
        # before returning, do post-processing of the completions
414
        for message in completions:
1✔
415
            _check_finish_reason(message.meta)
1✔
416

417
        return {"replies": completions}
1✔
418

419
    def _prepare_api_call(  # noqa: PLR0913
1✔
420
        self,
421
        *,
422
        messages: list[ChatMessage],
423
        streaming_callback: Optional[StreamingCallbackT] = None,
424
        generation_kwargs: Optional[dict[str, Any]] = None,
425
        tools: Optional[Union[list[Tool], Toolset]] = None,
426
        tools_strict: Optional[bool] = None,
427
    ) -> dict[str, Any]:
428
        # update generation kwargs by merging with the generation kwargs passed to the run method
429
        generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}
1✔
430

431
        is_streaming = streaming_callback is not None
1✔
432
        num_responses = generation_kwargs.pop("n", 1)
1✔
433

434
        if is_streaming and num_responses > 1:
1✔
435
            raise ValueError("Cannot stream multiple responses, please set n=1.")
×
436
        response_format = generation_kwargs.pop("response_format", None)
1✔
437

438
        # adapt ChatMessage(s) to the format expected by the OpenAI API
439
        openai_formatted_messages = [message.to_openai_dict_format() for message in messages]
1✔
440

441
        tools = tools or self.tools
1✔
442
        if isinstance(tools, Toolset):
1✔
443
            tools = list(tools)
×
444
        tools_strict = tools_strict if tools_strict is not None else self.tools_strict
1✔
445
        _check_duplicate_tool_names(tools)
1✔
446

447
        openai_tools = {}
1✔
448
        if tools:
1✔
449
            tool_definitions = []
1✔
450
            for t in tools:
1✔
451
                function_spec = {**t.tool_spec}
1✔
452
                if tools_strict:
1✔
453
                    function_spec["strict"] = True
1✔
454
                    function_spec["parameters"]["additionalProperties"] = False
1✔
455
                tool_definitions.append({"type": "function", "function": function_spec})
1✔
456
            openai_tools = {"tools": tool_definitions}
1✔
457

458
        base_args = {
1✔
459
            "model": self.model,
460
            "messages": openai_formatted_messages,
461
            "n": num_responses,
462
            **openai_tools,
463
            **generation_kwargs,
464
        }
465

466
        if response_format and not is_streaming:
1✔
467
            # for structured outputs without streaming, we use openai's parse endpoint
468
            # Note: `stream` cannot be passed to chat.completions.parse
469
            # we pass a key `openai_endpoint` as a hint to the run method to use the parse endpoint
470
            # this key will be removed before the API call is made
471
            return {**base_args, "response_format": response_format, "openai_endpoint": "parse"}
1✔
472

473
        # for structured outputs with streaming, we use openai's create endpoint
474
        # we pass a key `openai_endpoint` as a hint to the run method to use the create endpoint
475
        # this key will be removed before the API call is made
476
        return {
1✔
477
            **base_args,
478
            "stream": streaming_callback is not None,
479
            "response_format": response_format,
480
            "openai_endpoint": "create",
481
        }
482

483
    def _handle_stream_response(self, chat_completion: Stream, callback: SyncStreamingCallbackT) -> list[ChatMessage]:
1✔
484
        component_info = ComponentInfo.from_component(self)
1✔
485
        chunks: list[StreamingChunk] = []
1✔
486
        for chunk in chat_completion:  # pylint: disable=not-an-iterable
1✔
487
            assert len(chunk.choices) <= 1, "Streaming responses should have at most one choice."
1✔
488
            chunk_delta = _convert_chat_completion_chunk_to_streaming_chunk(
1✔
489
                chunk=chunk, previous_chunks=chunks, component_info=component_info
490
            )
491
            chunks.append(chunk_delta)
1✔
492
            callback(chunk_delta)
1✔
493
        return [_convert_streaming_chunks_to_chat_message(chunks=chunks)]
1✔
494

495
    async def _handle_async_stream_response(
1✔
496
        self, chat_completion: AsyncStream, callback: AsyncStreamingCallbackT
497
    ) -> list[ChatMessage]:
498
        component_info = ComponentInfo.from_component(self)
1✔
499
        chunks: list[StreamingChunk] = []
1✔
500
        async for chunk in chat_completion:  # pylint: disable=not-an-iterable
1✔
501
            assert len(chunk.choices) <= 1, "Streaming responses should have at most one choice."
1✔
502
            chunk_delta = _convert_chat_completion_chunk_to_streaming_chunk(
1✔
503
                chunk=chunk, previous_chunks=chunks, component_info=component_info
504
            )
505
            chunks.append(chunk_delta)
1✔
506
            await callback(chunk_delta)
1✔
507
        return [_convert_streaming_chunks_to_chat_message(chunks=chunks)]
1✔
508

509

510
def _check_finish_reason(meta: dict[str, Any]) -> None:
1✔
511
    if meta["finish_reason"] == "length":
1✔
512
        logger.warning(
1✔
513
            "The completion for index {index} has been truncated before reaching a natural stopping point. "
514
            "Increase the max_tokens parameter to allow for longer completions.",
515
            index=meta["index"],
516
            finish_reason=meta["finish_reason"],
517
        )
518
    if meta["finish_reason"] == "content_filter":
1✔
519
        logger.warning(
1✔
520
            "The completion for index {index} has been truncated due to the content filter.",
521
            index=meta["index"],
522
            finish_reason=meta["finish_reason"],
523
        )
524

525

526
def _convert_chat_completion_to_chat_message(
1✔
527
    completion: Union[ChatCompletion, ParsedChatCompletion], choice: Choice
528
) -> ChatMessage:
529
    """
530
    Converts the non-streaming response from the OpenAI API to a ChatMessage.
531

532
    :param completion: The completion returned by the OpenAI API.
533
    :param choice: The choice returned by the OpenAI API.
534
    :return: The ChatMessage.
535
    """
536
    message: Union[ChatCompletionMessage, ParsedChatCompletionMessage] = choice.message
1✔
537
    text = message.content
1✔
538
    tool_calls = []
1✔
539
    if message.tool_calls:
1✔
540
        # we currently only support function tools (not custom tools)
541
        # https://platform.openai.com/docs/guides/function-calling#custom-tools
542
        openai_tool_calls = [tc for tc in message.tool_calls if not isinstance(tc, ChatCompletionMessageCustomToolCall)]
1✔
543
        for openai_tc in openai_tool_calls:
1✔
544
            arguments_str = openai_tc.function.arguments
1✔
545
            try:
1✔
546
                arguments = json.loads(arguments_str)
1✔
547
                tool_calls.append(ToolCall(id=openai_tc.id, tool_name=openai_tc.function.name, arguments=arguments))
1✔
548
            except json.JSONDecodeError:
1✔
549
                logger.warning(
1✔
550
                    "OpenAI returned a malformed JSON string for tool call arguments. This tool call "
551
                    "will be skipped. To always generate a valid JSON, set `tools_strict` to `True`. "
552
                    "Tool call ID: {_id}, Tool name: {_name}, Arguments: {_arguments}",
553
                    _id=openai_tc.id,
554
                    _name=openai_tc.function.name,
555
                    _arguments=arguments_str,
556
                )
557

558
    chat_message = ChatMessage.from_assistant(
1✔
559
        text=text,
560
        tool_calls=tool_calls,
561
        meta={
562
            "model": completion.model,
563
            "index": choice.index,
564
            "finish_reason": choice.finish_reason,
565
            "usage": _serialize_usage(completion.usage),
566
        },
567
    )
568

569
    return chat_message
1✔
570

571

572
def _convert_chat_completion_chunk_to_streaming_chunk(
1✔
573
    chunk: ChatCompletionChunk, previous_chunks: list[StreamingChunk], component_info: Optional[ComponentInfo] = None
574
) -> StreamingChunk:
575
    """
576
    Converts the streaming response chunk from the OpenAI API to a StreamingChunk.
577

578
    :param chunk: The chunk returned by the OpenAI API.
579
    :param previous_chunks: A list of previously received StreamingChunks.
580
    :param component_info: An optional `ComponentInfo` object containing information about the component that
581
        generated the chunk, such as the component name and type.
582

583
    :returns:
584
        A StreamingChunk object representing the content of the chunk from the OpenAI API.
585
    """
586
    finish_reason_mapping: dict[str, FinishReason] = {
1✔
587
        "stop": "stop",
588
        "length": "length",
589
        "content_filter": "content_filter",
590
        "tool_calls": "tool_calls",
591
        "function_call": "tool_calls",
592
    }
593
    # On very first chunk so len(previous_chunks) == 0, the Choices field only provides role info (e.g. "assistant")
594
    # Choices is empty if include_usage is set to True where the usage information is returned.
595
    if len(chunk.choices) == 0:
1✔
596
        return StreamingChunk(
1✔
597
            content="",
598
            component_info=component_info,
599
            # Index is None since it's only set to an int when a content block is present
600
            index=None,
601
            finish_reason=None,
602
            meta={
603
                "model": chunk.model,
604
                "received_at": datetime.now().isoformat(),
605
                "usage": _serialize_usage(chunk.usage),
606
            },
607
        )
608

609
    choice: ChunkChoice = chunk.choices[0]
1✔
610

611
    # create a list of ToolCallDelta objects from the tool calls
612
    if choice.delta.tool_calls:
1✔
613
        tool_calls_deltas = []
1✔
614
        for tool_call in choice.delta.tool_calls:
1✔
615
            function = tool_call.function
1✔
616
            tool_calls_deltas.append(
1✔
617
                ToolCallDelta(
618
                    index=tool_call.index,
619
                    id=tool_call.id,
620
                    tool_name=function.name if function else None,
621
                    arguments=function.arguments if function and function.arguments else None,
622
                )
623
            )
624
        chunk_message = StreamingChunk(
1✔
625
            content=choice.delta.content or "",
626
            component_info=component_info,
627
            # We adopt the first tool_calls_deltas.index as the overall index of the chunk.
628
            index=tool_calls_deltas[0].index,
629
            tool_calls=tool_calls_deltas,
630
            start=tool_calls_deltas[0].tool_name is not None,
631
            finish_reason=finish_reason_mapping.get(choice.finish_reason) if choice.finish_reason else None,
632
            meta={
633
                "model": chunk.model,
634
                "index": choice.index,
635
                "tool_calls": choice.delta.tool_calls,
636
                "finish_reason": choice.finish_reason,
637
                "received_at": datetime.now().isoformat(),
638
                "usage": _serialize_usage(chunk.usage),
639
            },
640
        )
641
        return chunk_message
1✔
642

643
    # On very first chunk the choice field only provides role info (e.g. "assistant") so we set index to None
644
    # We set all chunks missing the content field to index of None. E.g. can happen if chunk only contains finish
645
    # reason.
646
    if choice.delta.content is None or choice.delta.role is not None:
1✔
647
        resolved_index = None
1✔
648
    else:
649
        # We set the index to be 0 since if text content is being streamed then no tool calls are being streamed
650
        # NOTE: We may need to revisit this if OpenAI allows planning/thinking content before tool calls like
651
        #       Anthropic Claude
652
        resolved_index = 0
1✔
653
    chunk_message = StreamingChunk(
1✔
654
        content=choice.delta.content or "",
655
        component_info=component_info,
656
        index=resolved_index,
657
        # The first chunk is always a start message chunk that only contains role information, so if we reach here
658
        # and previous_chunks is length 1 then this is the start of text content.
659
        start=len(previous_chunks) == 1,
660
        finish_reason=finish_reason_mapping.get(choice.finish_reason) if choice.finish_reason else None,
661
        meta={
662
            "model": chunk.model,
663
            "index": choice.index,
664
            "tool_calls": choice.delta.tool_calls,
665
            "finish_reason": choice.finish_reason,
666
            "received_at": datetime.now().isoformat(),
667
            "usage": _serialize_usage(chunk.usage),
668
        },
669
    )
670
    return chunk_message
1✔
671

672

673
def _serialize_usage(usage):
1✔
674
    """Convert OpenAI usage object to serializable dict recursively"""
675
    if hasattr(usage, "model_dump"):
1✔
676
        return usage.model_dump()
1✔
677
    elif hasattr(usage, "__dict__"):
1✔
678
        return {k: _serialize_usage(v) for k, v in usage.__dict__.items() if not k.startswith("_")}
×
679
    elif isinstance(usage, dict):
1✔
680
        return {k: _serialize_usage(v) for k, v in usage.items()}
×
681
    elif isinstance(usage, list):
1✔
682
        return [_serialize_usage(item) for item in usage]
×
683
    else:
684
        return usage
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc