• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12680309189

08 Jan 2025 10:40PM UTC coverage: 94.312% (+13.8%) from 80.557%
12680309189

Pull #57

github

5d8466
tito
feat(openai): add support for speech translation
Pull Request #57: feat(openai): add support for speech translation

67 of 73 new or added lines in 4 files covered. (91.78%)

36 existing lines in 7 files now uncovered.

1741 of 1846 relevant lines covered (94.31%)

3.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.44
/scope3ai/tracers/huggingface/translation.py
1
from dataclasses import dataclass, asdict
4✔
2
from typing import Any, Callable, Optional
4✔
3

4
import tiktoken
4✔
5
from aiohttp import ClientResponse
4✔
6
from huggingface_hub import InferenceClient, AsyncInferenceClient  # type: ignore[import-untyped]
4✔
7
from huggingface_hub import TranslationOutput as _TranslationOutput
4✔
8
from requests import Response
4✔
9

10
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
11
from scope3ai.api.typesgen import Task
4✔
12
from scope3ai.constants import PROVIDERS
4✔
13
from scope3ai.lib import Scope3AI
4✔
14
from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture
4✔
15
from scope3ai.response_interceptor.requests_interceptor import requests_response_capture
4✔
16

17
PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value
4✔
18

19

20
@dataclass
4✔
21
class TranslationOutput(_TranslationOutput):
4✔
22
    scope3ai: Optional[Scope3AIContext] = None
4✔
23

24

25
def huggingface_translation_wrapper_non_stream(
4✔
26
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
27
) -> TranslationOutput:
28
    http_response: Response | None = None
4✔
29
    with requests_response_capture() as responses:
4✔
30
        response = wrapped(*args, **kwargs)
4✔
31
        http_responses = responses.get()
4✔
32
        if len(http_responses) > 0:
4✔
33
            http_response = http_responses[-1]
4✔
34
    model = kwargs.get("model") or instance.get_recommended_model("translation")
4✔
35
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
36
    if len(args) > 0:
4✔
37
        prompt = args[0]
4✔
38
    else:
UNCOV
39
        prompt = kwargs["text"]
×
40
    compute_time = http_response.headers.get("x-compute-time")
4✔
41
    input_tokens = len(encoder.encode(prompt))
4✔
42
    output_tokens = len(encoder.encode(response.translation_text))
4✔
43
    scope3_row = ImpactRow(
4✔
44
        model=Model(id=model),
45
        task=Task.translation,
46
        input_tokens=input_tokens,
47
        output_tokens=output_tokens,  # TODO: How we can calculate the output tokens of a translation?
48
        request_duration_ms=float(compute_time) * 1000,
49
        managed_service_id=PROVIDER,
50
    )
51

52
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
53
    result = TranslationOutput(**asdict(response))
4✔
54
    result.scope3ai = scope3_ctx
4✔
55
    return result
4✔
56

57

58
async def huggingface_translation_wrapper_async_non_stream(
4✔
59
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
60
) -> TranslationOutput:
61
    http_response: ClientResponse | None = None
4✔
62
    with aiohttp_response_capture() as responses:
4✔
63
        response = await wrapped(*args, **kwargs)
4✔
64
        http_responses = responses.get()
4✔
65
        if len(http_responses) > 0:
4✔
66
            http_response = http_responses[-1]
4✔
67
    model = kwargs.get("model") or instance.get_recommended_model("translation")
4✔
68
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
69
    if len(args) > 0:
4✔
70
        prompt = args[0]
4✔
71
    else:
UNCOV
72
        prompt = kwargs["text"]
×
73
    compute_time = http_response.headers.get("x-compute-time")
4✔
74
    input_tokens = len(encoder.encode(prompt))
4✔
75
    output_tokens = len(encoder.encode(response.translation_text))
4✔
76
    scope3_row = ImpactRow(
4✔
77
        model=Model(id=model),
78
        task=Task.translation,
79
        input_tokens=input_tokens,
80
        output_tokens=output_tokens,
81
        request_duration_ms=float(compute_time) * 1000,
82
        managed_service_id=PROVIDER,
83
    )
84

85
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
86
    result = TranslationOutput(**asdict(response))
4✔
87
    result.scope3ai = scope3_ctx
4✔
88
    return result
4✔
89

90

91
async def huggingface_text_to_image_wrapper_async(
4✔
92
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
93
) -> TranslationOutput:
UNCOV
94
    return huggingface_translation_wrapper_async_non_stream(
×
95
        wrapped, instance, args, kwargs
96
    )
97

98

99
def huggingface_text_to_image_wrapper(
4✔
100
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
101
) -> TranslationOutput:
UNCOV
102
    return huggingface_translation_wrapper_non_stream(wrapped, instance, args, kwargs)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc