• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12680309189

08 Jan 2025 10:40PM UTC coverage: 94.312% (+13.8%) from 80.557%
12680309189

Pull #57

github

5d8466
tito
feat(openai): add support for speech translation
Pull Request #57: feat(openai): add support for speech translation

67 of 73 new or added lines in 4 files covered. (91.78%)

36 existing lines in 7 files now uncovered.

1741 of 1846 relevant lines covered (94.31%)

3.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

47.22
/scope3ai/tracers/huggingface/text_to_speech.py
1
import time
4✔
2
import tiktoken
4✔
3
from dataclasses import dataclass, asdict
4✔
4
from typing import Any, Callable, Optional, Union
4✔
5

6
from huggingface_hub import InferenceClient  # type: ignore[import-untyped]
4✔
7
from huggingface_hub import TextToSpeechOutput as _TextToSpeechOutput
4✔
8
from requests import Response
4✔
9

10
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
11
from scope3ai.api.typesgen import Task
4✔
12
from scope3ai.constants import PROVIDERS
4✔
13
from scope3ai.lib import Scope3AI
4✔
14

15
PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value
4✔
16

17

18
@dataclass
4✔
19
class TextToSpeechOutput(_TextToSpeechOutput):
4✔
20
    scope3ai: Optional[Scope3AIContext] = None
4✔
21

22

23
def huggingface_text_to_speech_wrapper_non_stream(
4✔
24
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
25
) -> TextToSpeechOutput:
26
    timer_start = time.perf_counter()
×
27
    response = wrapped(*args, **kwargs)
×
28
    request_latency = (time.perf_counter() - timer_start) * 1000
×
29
    model = kwargs.get("model") or instance.get_recommended_model("text-to-speech")
×
30
    encoder = tiktoken.get_encoding("cl100k_base")
×
31
    if len(args) > 0:
×
UNCOV
32
        prompt = args[0]
×
33
    else:
34
        prompt = kwargs["text"]
×
35
    http_response: Union[Response, None] = getattr(instance, "response")
×
36
    if http_response is not None:
×
37
        if http_response.headers.get("x-compute-time"):
×
38
            request_latency = float(http_response.headers.get("x-compute-time"))
×
39
    input_tokens = len(encoder.encode(prompt))
×
UNCOV
40
    scope3_row = ImpactRow(
×
41
        model=Model(id=model),
42
        input_tokens=input_tokens,
43
        task=Task.text_to_speech,
44
        request_duration_ms=request_latency,
45
        managed_service_id=PROVIDER,
46
    )
47

48
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
×
49
    result = TextToSpeechOutput(**asdict(response))
×
50
    result.scope3ai = scope3_ctx
×
UNCOV
51
    return result
×
52

53

54
def huggingface_text_to_speech_wrapper(
4✔
55
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
56
) -> TextToSpeechOutput:
UNCOV
57
    return huggingface_text_to_speech_wrapper_non_stream(
×
58
        wrapped, instance, args, kwargs
59
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc