• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12753874046

13 Jan 2025 06:40PM UTC coverage: 95.076% (+14.5%) from 80.557%
12753874046

Pull #61

github

3a8d3f
kevdevg
fix: vision pillow read bytes
Pull Request #61: feat(Hugging face): Vision methods - image classification / image segmentation / object detection

179 of 189 new or added lines in 5 files covered. (94.71%)

34 existing lines in 9 files now uncovered.

2008 of 2112 relevant lines covered (95.08%)

3.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.15
/scope3ai/tracers/huggingface/text_to_speech.py
1
import time
4✔
2
from dataclasses import dataclass
4✔
3
from typing import Any, Callable, Optional, Union
4✔
4

5
import tiktoken
4✔
6
from aiohttp import ClientResponse
4✔
7
from huggingface_hub import InferenceClient, AsyncInferenceClient  # type: ignore[import-untyped]
4✔
8
from huggingface_hub import TextToSpeechOutput as _TextToSpeechOutput
4✔
9
from requests import Response
4✔
10

11
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
12
from scope3ai.api.typesgen import Task
4✔
13
from scope3ai.constants import PROVIDERS
4✔
14
from scope3ai.lib import Scope3AI
4✔
15
from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture
4✔
16
from scope3ai.response_interceptor.requests_interceptor import requests_response_capture
4✔
17

18
PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value
4✔
19
HUGGING_FACE_TEXT_TO_SPEECH_TASK = "text-to-speech"
4✔
20

21

22
@dataclass
4✔
23
class TextToSpeechOutput(_TextToSpeechOutput):
4✔
24
    scope3ai: Optional[Scope3AIContext] = None
4✔
25

26

27
def _hugging_face_text_to_speech_wrapper(
4✔
28
    timer_start: Any,
29
    model: Any,
30
    response: Any,
31
    http_response: Union[ClientResponse, Response],
32
    args: Any,
33
    kwargs: Any,
34
) -> TextToSpeechOutput:
35
    input_tokens = None
4✔
36
    if http_response:
4✔
37
        compute_time = http_response.headers.get("x-compute-time")
4✔
38
        input_tokens = http_response.headers.get("x-compute-characters")
4✔
39
    else:
UNCOV
40
        compute_time = time.perf_counter() - timer_start
×
41
    if not input_tokens:
4✔
42
        encoder = tiktoken.get_encoding("cl100k_base")
4✔
43
        prompt = args[0] if len(args) > 0 else kwargs.get("text", "")
4✔
44
        input_tokens = len(encoder.encode(prompt)) if prompt != "" else 0
4✔
45

46
    scope3_row = ImpactRow(
4✔
47
        model=Model(id=model),
48
        input_tokens=input_tokens,
49
        task=Task.text_to_speech,
50
        request_duration_ms=float(compute_time) * 1000,
51
        managed_service_id=PROVIDER,
52
    )
53

54
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
55
    result = TextToSpeechOutput(audio=response, sampling_rate=16000)
4✔
56
    result.scope3ai = scope3_ctx
4✔
57
    return result
4✔
58

59

60
def huggingface_text_to_speech_wrapper(
4✔
61
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
62
) -> TextToSpeechOutput:
63
    timer_start = time.perf_counter()
4✔
64
    http_response: Response | None = None
4✔
65
    with requests_response_capture() as responses:
4✔
66
        response = wrapped(*args, **kwargs)
4✔
67
        http_responses = responses.get()
4✔
68
        if len(http_responses) > 0:
4✔
69
            http_response = http_responses[-1]
4✔
70
    model = kwargs.get("model") or instance.get_recommended_model(
4✔
71
        HUGGING_FACE_TEXT_TO_SPEECH_TASK
72
    )
73
    return _hugging_face_text_to_speech_wrapper(
4✔
74
        timer_start, model, response, http_response, args, kwargs
75
    )
76

77

78
async def huggingface_text_to_speech_wrapper_async(
4✔
79
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
80
) -> TextToSpeechOutput:
81
    timer_start = time.perf_counter()
4✔
82
    http_response: ClientResponse | None = None
4✔
83
    with aiohttp_response_capture() as responses:
4✔
84
        response = await wrapped(*args, **kwargs)
4✔
85
        http_responses = responses.get()
4✔
86
        if len(http_responses) > 0:
4✔
87
            http_response = http_responses[-1]
4✔
88
    model = kwargs.get("model") or instance.get_recommended_model(
4✔
89
        HUGGING_FACE_TEXT_TO_SPEECH_TASK
90
    )
91
    return _hugging_face_text_to_speech_wrapper(
4✔
92
        timer_start, model, response, http_response, args, kwargs
93
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc