• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

deepset-ai / haystack / 14645042953

24 Apr 2025 03:06PM UTC coverage: 90.447% (-0.04%) from 90.482%
14645042953

Pull #9303

github

web-flow
Merge fdc9cc510 into f97472329
Pull Request #9303: fix: make `HuggingFaceAPIChatGenerator` convert Tool Call `arguments` from string

10860 of 12007 relevant lines covered (90.45%)

0.9 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.27
haystack/components/generators/chat/hugging_face_api.py
1
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
#
3
# SPDX-License-Identifier: Apache-2.0
4

5
import json
1✔
6
from datetime import datetime
1✔
7
from typing import Any, AsyncIterable, Dict, Iterable, List, Optional, Union
1✔
8

9
from haystack import component, default_from_dict, default_to_dict, logging
1✔
10
from haystack.dataclasses import ChatMessage, StreamingChunk, ToolCall, select_streaming_callback
1✔
11
from haystack.dataclasses.streaming_chunk import StreamingCallbackT
1✔
12
from haystack.lazy_imports import LazyImport
1✔
13
from haystack.tools import (
1✔
14
    Tool,
15
    Toolset,
16
    _check_duplicate_tool_names,
17
    deserialize_tools_or_toolset_inplace,
18
    serialize_tools_or_toolset,
19
)
20
from haystack.utils import Secret, deserialize_callable, deserialize_secrets_inplace, serialize_callable
1✔
21
from haystack.utils.hf import HFGenerationAPIType, HFModelType, check_valid_model, convert_message_to_hf_format
1✔
22
from haystack.utils.url_validation import is_valid_http_url
1✔
23

24
logger = logging.getLogger(__name__)
1✔
25

26
with LazyImport(message="Run 'pip install \"huggingface_hub[inference]>=0.27.0\"'") as huggingface_hub_import:
1✔
27
    from huggingface_hub import (
1✔
28
        AsyncInferenceClient,
29
        ChatCompletionInputFunctionDefinition,
30
        ChatCompletionInputTool,
31
        ChatCompletionOutput,
32
        ChatCompletionStreamOutput,
33
        InferenceClient,
34
    )
35

36

37
@component
1✔
38
class HuggingFaceAPIChatGenerator:
1✔
39
    """
40
    Completes chats using Hugging Face APIs.
41

42
    HuggingFaceAPIChatGenerator uses the [ChatMessage](https://docs.haystack.deepset.ai/docs/chatmessage)
43
    format for input and output. Use it to generate text with Hugging Face APIs:
44
    - [Free Serverless Inference API](https://huggingface.co/inference-api)
45
    - [Paid Inference Endpoints](https://huggingface.co/inference-endpoints)
46
    - [Self-hosted Text Generation Inference](https://github.com/huggingface/text-generation-inference)
47

48
    ### Usage examples
49

50
    #### With the free serverless inference API
51

52
    ```python
53
    from haystack.components.generators.chat import HuggingFaceAPIChatGenerator
54
    from haystack.dataclasses import ChatMessage
55
    from haystack.utils import Secret
56
    from haystack.utils.hf import HFGenerationAPIType
57

58
    messages = [ChatMessage.from_system("\\nYou are a helpful, respectful and honest assistant"),
59
                ChatMessage.from_user("What's Natural Language Processing?")]
60

61
    # the api_type can be expressed using the HFGenerationAPIType enum or as a string
62
    api_type = HFGenerationAPIType.SERVERLESS_INFERENCE_API
63
    api_type = "serverless_inference_api" # this is equivalent to the above
64

65
    generator = HuggingFaceAPIChatGenerator(api_type=api_type,
66
                                            api_params={"model": "HuggingFaceH4/zephyr-7b-beta"},
67
                                            token=Secret.from_token("<your-api-key>"))
68

69
    result = generator.run(messages)
70
    print(result)
71
    ```
72

73
    #### With paid inference endpoints
74

75
    ```python
76
    from haystack.components.generators.chat import HuggingFaceAPIChatGenerator
77
    from haystack.dataclasses import ChatMessage
78
    from haystack.utils import Secret
79

80
    messages = [ChatMessage.from_system("\\nYou are a helpful, respectful and honest assistant"),
81
                ChatMessage.from_user("What's Natural Language Processing?")]
82

83
    generator = HuggingFaceAPIChatGenerator(api_type="inference_endpoints",
84
                                            api_params={"url": "<your-inference-endpoint-url>"},
85
                                            token=Secret.from_token("<your-api-key>"))
86

87
    result = generator.run(messages)
88
    print(result)
89

90
    #### With self-hosted text generation inference
91

92
    ```python
93
    from haystack.components.generators.chat import HuggingFaceAPIChatGenerator
94
    from haystack.dataclasses import ChatMessage
95

96
    messages = [ChatMessage.from_system("\\nYou are a helpful, respectful and honest assistant"),
97
                ChatMessage.from_user("What's Natural Language Processing?")]
98

99
    generator = HuggingFaceAPIChatGenerator(api_type="text_generation_inference",
100
                                            api_params={"url": "http://localhost:8080"})
101

102
    result = generator.run(messages)
103
    print(result)
104
    ```
105
    """
106

107
    def __init__(  # pylint: disable=too-many-positional-arguments
1✔
108
        self,
109
        api_type: Union[HFGenerationAPIType, str],
110
        api_params: Dict[str, str],
111
        token: Optional[Secret] = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
112
        generation_kwargs: Optional[Dict[str, Any]] = None,
113
        stop_words: Optional[List[str]] = None,
114
        streaming_callback: Optional[StreamingCallbackT] = None,
115
        tools: Optional[Union[List[Tool], Toolset]] = None,
116
    ):
117
        """
118
        Initialize the HuggingFaceAPIChatGenerator instance.
119

120
        :param api_type:
121
            The type of Hugging Face API to use. Available types:
122
            - `text_generation_inference`: See [TGI](https://github.com/huggingface/text-generation-inference).
123
            - `inference_endpoints`: See [Inference Endpoints](https://huggingface.co/inference-endpoints).
124
            - `serverless_inference_api`: See [Serverless Inference API](https://huggingface.co/inference-api).
125
        :param api_params:
126
            A dictionary with the following keys:
127
            - `model`: Hugging Face model ID. Required when `api_type` is `SERVERLESS_INFERENCE_API`.
128
            - `url`: URL of the inference endpoint. Required when `api_type` is `INFERENCE_ENDPOINTS` or
129
            `TEXT_GENERATION_INFERENCE`.
130
        :param token:
131
            The Hugging Face token to use as HTTP bearer authorization.
132
            Check your HF token in your [account settings](https://huggingface.co/settings/tokens).
133
        :param generation_kwargs:
134
            A dictionary with keyword arguments to customize text generation.
135
                Some examples: `max_tokens`, `temperature`, `top_p`.
136
                For details, see [Hugging Face chat_completion documentation](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion).
137
        :param stop_words:
138
            An optional list of strings representing the stop words.
139
        :param streaming_callback:
140
            An optional callable for handling streaming responses.
141
        :param tools:
142
            A list of tools or a Toolset for which the model can prepare calls.
143
            The chosen model should support tool/function calling, according to the model card.
144
            Support for tools in the Hugging Face API and TGI is not yet fully refined and you may experience
145
            unexpected behavior. This parameter can accept either a list of `Tool` objects or a `Toolset` instance.
146
        """
147

148
        huggingface_hub_import.check()
1✔
149

150
        if isinstance(api_type, str):
1✔
151
            api_type = HFGenerationAPIType.from_str(api_type)
1✔
152

153
        if api_type == HFGenerationAPIType.SERVERLESS_INFERENCE_API:
1✔
154
            model = api_params.get("model")
1✔
155
            if model is None:
1✔
156
                raise ValueError(
1✔
157
                    "To use the Serverless Inference API, you need to specify the `model` parameter in `api_params`."
158
                )
159
            check_valid_model(model, HFModelType.GENERATION, token)
1✔
160
            model_or_url = model
1✔
161
        elif api_type in [HFGenerationAPIType.INFERENCE_ENDPOINTS, HFGenerationAPIType.TEXT_GENERATION_INFERENCE]:
1✔
162
            url = api_params.get("url")
1✔
163
            if url is None:
1✔
164
                msg = (
1✔
165
                    "To use Text Generation Inference or Inference Endpoints, you need to specify the `url` parameter "
166
                    "in `api_params`."
167
                )
168
                raise ValueError(msg)
1✔
169
            if not is_valid_http_url(url):
1✔
170
                raise ValueError(f"Invalid URL: {url}")
1✔
171
            model_or_url = url
1✔
172
        else:
173
            msg = f"Unknown api_type {api_type}"
×
174
            raise ValueError(msg)
×
175

176
        if tools and streaming_callback is not None:
1✔
177
            raise ValueError("Using tools and streaming at the same time is not supported. Please choose one.")
1✔
178
        _check_duplicate_tool_names(list(tools or []))
1✔
179

180
        # handle generation kwargs setup
181
        generation_kwargs = generation_kwargs.copy() if generation_kwargs else {}
1✔
182
        generation_kwargs["stop"] = generation_kwargs.get("stop", [])
1✔
183
        generation_kwargs["stop"].extend(stop_words or [])
1✔
184
        generation_kwargs.setdefault("max_tokens", 512)
1✔
185

186
        self.api_type = api_type
1✔
187
        self.api_params = api_params
1✔
188
        self.token = token
1✔
189
        self.generation_kwargs = generation_kwargs
1✔
190
        self.streaming_callback = streaming_callback
1✔
191
        self._client = InferenceClient(model_or_url, token=token.resolve_value() if token else None)
1✔
192
        self._async_client = AsyncInferenceClient(model_or_url, token=token.resolve_value() if token else None)
1✔
193
        self.tools = tools
1✔
194

195
    def to_dict(self) -> Dict[str, Any]:
1✔
196
        """
197
        Serialize this component to a dictionary.
198

199
        :returns:
200
            A dictionary containing the serialized component.
201
        """
202
        callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None
1✔
203
        return default_to_dict(
1✔
204
            self,
205
            api_type=str(self.api_type),
206
            api_params=self.api_params,
207
            token=self.token.to_dict() if self.token else None,
208
            generation_kwargs=self.generation_kwargs,
209
            streaming_callback=callback_name,
210
            tools=serialize_tools_or_toolset(self.tools),
211
        )
212

213
    @classmethod
1✔
214
    def from_dict(cls, data: Dict[str, Any]) -> "HuggingFaceAPIChatGenerator":
1✔
215
        """
216
        Deserialize this component from a dictionary.
217
        """
218
        deserialize_secrets_inplace(data["init_parameters"], keys=["token"])
1✔
219
        deserialize_tools_or_toolset_inplace(data["init_parameters"], key="tools")
1✔
220
        init_params = data.get("init_parameters", {})
1✔
221
        serialized_callback_handler = init_params.get("streaming_callback")
1✔
222
        if serialized_callback_handler:
1✔
223
            data["init_parameters"]["streaming_callback"] = deserialize_callable(serialized_callback_handler)
×
224
        return default_from_dict(cls, data)
1✔
225

226
    @component.output_types(replies=List[ChatMessage])
1✔
227
    def run(
1✔
228
        self,
229
        messages: List[ChatMessage],
230
        generation_kwargs: Optional[Dict[str, Any]] = None,
231
        tools: Optional[Union[List[Tool], Toolset]] = None,
232
        streaming_callback: Optional[StreamingCallbackT] = None,
233
    ):
234
        """
235
        Invoke the text generation inference based on the provided messages and generation parameters.
236

237
        :param messages:
238
            A list of ChatMessage objects representing the input messages.
239
        :param generation_kwargs:
240
            Additional keyword arguments for text generation.
241
        :param tools:
242
            A list of tools or a Toolset for which the model can prepare calls. If set, it will override
243
            the `tools` parameter set during component initialization. This parameter can accept either a
244
            list of `Tool` objects or a `Toolset` instance.
245
        :param streaming_callback:
246
            An optional callable for handling streaming responses. If set, it will override the `streaming_callback`
247
            parameter set during component initialization.
248
        :returns: A dictionary with the following keys:
249
            - `replies`: A list containing the generated responses as ChatMessage objects.
250
        """
251

252
        # update generation kwargs by merging with the default ones
253
        generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}
1✔
254

255
        formatted_messages = [convert_message_to_hf_format(message) for message in messages]
1✔
256

257
        tools = tools or self.tools
1✔
258
        if tools and self.streaming_callback:
1✔
259
            raise ValueError("Using tools and streaming at the same time is not supported. Please choose one.")
1✔
260
        _check_duplicate_tool_names(list(tools or []))
1✔
261

262
        # validate and select the streaming callback
263
        streaming_callback = select_streaming_callback(
1✔
264
            self.streaming_callback, streaming_callback, requires_async=False
265
        )
266

267
        if streaming_callback:
1✔
268
            return self._run_streaming(formatted_messages, generation_kwargs, streaming_callback)
1✔
269

270
        hf_tools = None
1✔
271
        if tools:
1✔
272
            if isinstance(tools, Toolset):
1✔
273
                tools = list(tools)
×
274
            hf_tools = [
1✔
275
                ChatCompletionInputTool(
276
                    function=ChatCompletionInputFunctionDefinition(
277
                        name=tool.name, description=tool.description, arguments=tool.parameters
278
                    ),
279
                    type="function",
280
                )
281
                for tool in tools
282
            ]
283
        return self._run_non_streaming(formatted_messages, generation_kwargs, hf_tools)
1✔
284

285
    @component.output_types(replies=List[ChatMessage])
1✔
286
    async def run_async(
1✔
287
        self,
288
        messages: List[ChatMessage],
289
        generation_kwargs: Optional[Dict[str, Any]] = None,
290
        tools: Optional[Union[List[Tool], Toolset]] = None,
291
        streaming_callback: Optional[StreamingCallbackT] = None,
292
    ):
293
        """
294
        Asynchronously invokes the text generation inference based on the provided messages and generation parameters.
295

296
        This is the asynchronous version of the `run` method. It has the same parameters
297
        and return values but can be used with `await` in an async code.
298

299
        :param messages:
300
            A list of ChatMessage objects representing the input messages.
301
        :param generation_kwargs:
302
            Additional keyword arguments for text generation.
303
        :param tools:
304
            A list of tools or a Toolset for which the model can prepare calls. If set, it will override the `tools`
305
            parameter set during component initialization. This parameter can accept either a list of `Tool` objects
306
            or a `Toolset` instance.
307
        :param streaming_callback:
308
            An optional callable for handling streaming responses. If set, it will override the `streaming_callback`
309
            parameter set during component initialization.
310
        :returns: A dictionary with the following keys:
311
            - `replies`: A list containing the generated responses as ChatMessage objects.
312
        """
313

314
        # update generation kwargs by merging with the default ones
315
        generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}
1✔
316

317
        formatted_messages = [convert_message_to_hf_format(message) for message in messages]
1✔
318

319
        tools = tools or self.tools
1✔
320
        if tools and self.streaming_callback:
1✔
321
            raise ValueError("Using tools and streaming at the same time is not supported. Please choose one.")
×
322
        _check_duplicate_tool_names(list(tools or []))
1✔
323

324
        # validate and select the streaming callback
325
        streaming_callback = select_streaming_callback(self.streaming_callback, streaming_callback, requires_async=True)
1✔
326

327
        if streaming_callback:
1✔
328
            return await self._run_streaming_async(formatted_messages, generation_kwargs, streaming_callback)
1✔
329

330
        hf_tools = None
1✔
331
        if tools:
1✔
332
            if isinstance(tools, Toolset):
1✔
333
                tools = list(tools)
×
334
            hf_tools = [
1✔
335
                ChatCompletionInputTool(
336
                    function=ChatCompletionInputFunctionDefinition(
337
                        name=tool.name, description=tool.description, arguments=tool.parameters
338
                    ),
339
                    type="function",
340
                )
341
                for tool in tools
342
            ]
343
        return await self._run_non_streaming_async(formatted_messages, generation_kwargs, hf_tools)
1✔
344

345
    def _run_streaming(
1✔
346
        self, messages: List[Dict[str, str]], generation_kwargs: Dict[str, Any], streaming_callback: StreamingCallbackT
347
    ):
348
        api_output: Iterable[ChatCompletionStreamOutput] = self._client.chat_completion(
1✔
349
            messages, stream=True, **generation_kwargs
350
        )
351

352
        generated_text = ""
1✔
353
        first_chunk_time = None
1✔
354

355
        for chunk in api_output:
1✔
356
            # n is unused, so the API always returns only one choice
357
            # the argument is probably allowed for compatibility with OpenAI
358
            # see https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion.n
359
            choice = chunk.choices[0]
1✔
360

361
            text = choice.delta.content or ""
1✔
362
            generated_text += text
1✔
363

364
            finish_reason = choice.finish_reason
1✔
365

366
            meta: Dict[str, Any] = {}
1✔
367
            if finish_reason:
1✔
368
                meta["finish_reason"] = finish_reason
1✔
369

370
            if first_chunk_time is None:
1✔
371
                first_chunk_time = datetime.now().isoformat()
1✔
372

373
            stream_chunk = StreamingChunk(text, meta)
1✔
374
            streaming_callback(stream_chunk)
1✔
375

376
        meta.update(
1✔
377
            {
378
                "model": self._client.model,
379
                "finish_reason": finish_reason,
380
                "index": 0,
381
                "usage": {"prompt_tokens": 0, "completion_tokens": 0},  # not available in streaming
382
                "completion_start_time": first_chunk_time,
383
            }
384
        )
385

386
        message = ChatMessage.from_assistant(text=generated_text, meta=meta)
1✔
387

388
        return {"replies": [message]}
1✔
389

390
    def _run_non_streaming(
1✔
391
        self,
392
        messages: List[Dict[str, str]],
393
        generation_kwargs: Dict[str, Any],
394
        tools: Optional[List["ChatCompletionInputTool"]] = None,
395
    ) -> Dict[str, List[ChatMessage]]:
396
        api_chat_output: ChatCompletionOutput = self._client.chat_completion(
1✔
397
            messages=messages, tools=tools, **generation_kwargs
398
        )
399

400
        if len(api_chat_output.choices) == 0:
1✔
401
            return {"replies": []}
×
402

403
        # n is unused, so the API always returns only one choice
404
        # the argument is probably allowed for compatibility with OpenAI
405
        # see https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion.n
406
        choice = api_chat_output.choices[0]
1✔
407

408
        text = choice.message.content
1✔
409
        tool_calls = []
1✔
410

411
        if hfapi_tool_calls := choice.message.tool_calls:
1✔
412
            for hfapi_tc in hfapi_tool_calls:
1✔
413
                arguments = hfapi_tc.function.arguments
1✔
414
                if isinstance(arguments, str):
1✔
415
                    try:
×
416
                        arguments = json.loads(arguments)
×
417
                    except json.JSONDecodeError:
×
418
                        logger.warning(
×
419
                            "HuggingFace API returned a malformed JSON string for tool call arguments. "
420
                            "The raw string will be wrapped in a 'raw_arguments' field. "
421
                            "Tool name: %s, Arguments: %s",
422
                            hfapi_tc.function.name,
423
                            arguments,
424
                        )
425
                        arguments = {"raw_arguments": arguments}
×
426

427
                tool_call = ToolCall(tool_name=hfapi_tc.function.name, arguments=arguments, id=hfapi_tc.id)
1✔
428
                tool_calls.append(tool_call)
1✔
429

430
        meta: Dict[str, Any] = {
1✔
431
            "model": self._client.model,
432
            "finish_reason": choice.finish_reason,
433
            "index": choice.index,
434
        }
435

436
        usage = {"prompt_tokens": 0, "completion_tokens": 0}
1✔
437
        if api_chat_output.usage:
1✔
438
            usage = {
1✔
439
                "prompt_tokens": api_chat_output.usage.prompt_tokens,
440
                "completion_tokens": api_chat_output.usage.completion_tokens,
441
            }
442
        meta["usage"] = usage
1✔
443

444
        message = ChatMessage.from_assistant(text=text, tool_calls=tool_calls, meta=meta)
1✔
445
        return {"replies": [message]}
1✔
446

447
    async def _run_streaming_async(
1✔
448
        self, messages: List[Dict[str, str]], generation_kwargs: Dict[str, Any], streaming_callback: StreamingCallbackT
449
    ):
450
        api_output: AsyncIterable[ChatCompletionStreamOutput] = await self._async_client.chat_completion(
1✔
451
            messages, stream=True, **generation_kwargs
452
        )
453

454
        generated_text = ""
1✔
455
        first_chunk_time = None
1✔
456

457
        async for chunk in api_output:
1✔
458
            choice = chunk.choices[0]
1✔
459

460
            text = choice.delta.content or ""
1✔
461
            generated_text += text
1✔
462

463
            finish_reason = choice.finish_reason
1✔
464

465
            meta: Dict[str, Any] = {}
1✔
466
            if finish_reason:
1✔
467
                meta["finish_reason"] = finish_reason
1✔
468

469
            if first_chunk_time is None:
1✔
470
                first_chunk_time = datetime.now().isoformat()
1✔
471

472
            stream_chunk = StreamingChunk(text, meta)
1✔
473
            await streaming_callback(stream_chunk)  # type: ignore
1✔
474

475
        meta.update(
1✔
476
            {
477
                "model": self._async_client.model,
478
                "finish_reason": finish_reason,
479
                "index": 0,
480
                "usage": {"prompt_tokens": 0, "completion_tokens": 0},
481
                "completion_start_time": first_chunk_time,
482
            }
483
        )
484

485
        message = ChatMessage.from_assistant(text=generated_text, meta=meta)
1✔
486
        return {"replies": [message]}
1✔
487

488
    async def _run_non_streaming_async(
1✔
489
        self,
490
        messages: List[Dict[str, str]],
491
        generation_kwargs: Dict[str, Any],
492
        tools: Optional[List["ChatCompletionInputTool"]] = None,
493
    ) -> Dict[str, List[ChatMessage]]:
494
        api_chat_output: ChatCompletionOutput = await self._async_client.chat_completion(
1✔
495
            messages=messages, tools=tools, **generation_kwargs
496
        )
497

498
        if len(api_chat_output.choices) == 0:
1✔
499
            return {"replies": []}
×
500

501
        choice = api_chat_output.choices[0]
1✔
502

503
        text = choice.message.content
1✔
504
        tool_calls = []
1✔
505

506
        if hfapi_tool_calls := choice.message.tool_calls:
1✔
507
            for hfapi_tc in hfapi_tool_calls:
1✔
508
                arguments = hfapi_tc.function.arguments
1✔
509
                if isinstance(arguments, str):
1✔
510
                    try:
×
511
                        arguments = json.loads(arguments)
×
512
                    except json.JSONDecodeError:
×
513
                        logger.warning(
×
514
                            "HuggingFace API returned a malformed JSON string for tool call arguments. "
515
                            "The raw string will be wrapped in a 'raw_arguments' field. "
516
                            "Tool name: %s, Arguments: %s",
517
                            hfapi_tc.function.name,
518
                            arguments,
519
                        )
520
                        arguments = {"raw_arguments": arguments}
×
521

522
                tool_call = ToolCall(tool_name=hfapi_tc.function.name, arguments=arguments, id=hfapi_tc.id)
1✔
523
                tool_calls.append(tool_call)
1✔
524

525
        meta: Dict[str, Any] = {
1✔
526
            "model": self._async_client.model,
527
            "finish_reason": choice.finish_reason,
528
            "index": choice.index,
529
        }
530

531
        usage = {"prompt_tokens": 0, "completion_tokens": 0}
1✔
532
        if api_chat_output.usage:
1✔
533
            usage = {
1✔
534
                "prompt_tokens": api_chat_output.usage.prompt_tokens,
535
                "completion_tokens": api_chat_output.usage.completion_tokens,
536
            }
537
        meta["usage"] = usage
1✔
538

539
        message = ChatMessage.from_assistant(text=text, tool_calls=tool_calls, meta=meta)
1✔
540
        return {"replies": [message]}
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc