• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12593246423

03 Jan 2025 05:27AM UTC coverage: 92.904% (+12.3%) from 80.557%
12593246423

push

github

329c1c
kevdevg
fix: triying to wrap async headers

10 of 30 new or added lines in 3 files covered. (33.33%)

20 existing lines in 3 files now uncovered.

1414 of 1522 relevant lines covered (92.9%)

3.71 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

63.46
/scope3ai/tracers/huggingface/text_to_image.py
1
import tiktoken
4✔
2
from dataclasses import dataclass
4✔
3
from typing import Any, Callable, Optional
4✔
4

5
from huggingface_hub import InferenceClient, AsyncInferenceClient  # type: ignore[import-untyped]
4✔
6
from huggingface_hub import TextToImageOutput as _TextToImageOutput
4✔
7

8
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
9
from scope3ai.api.typesgen import Task
4✔
10
from scope3ai.lib import Scope3AI
4✔
11
from scope3ai.tracers.huggingface.utils import hf_raise_for_status_capture
4✔
12

13
PROVIDER = "huggingface_hub"
4✔
14

15

16
@dataclass
4✔
17
class TextToImageOutput(_TextToImageOutput):
4✔
18
    scope3ai: Optional[Scope3AIContext] = None
4✔
19

20

21
def huggingface_text_to_image_wrapper_non_stream(
4✔
22
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
23
) -> TextToImageOutput:
24
    with hf_raise_for_status_capture() as capture_response:
4✔
25
        response = wrapped(*args, **kwargs)
4✔
26
        http_response = capture_response.get()
4✔
27
    model = kwargs.get("model") or instance.get_recommended_model("text-to-speech")
4✔
28
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
29
    if len(args) > 0:
4✔
30
        prompt = args[0]
×
31
    else:
32
        prompt = kwargs["prompt"]
4✔
33
    compute_time = http_response.headers.get("x-compute-time")
4✔
34
    input_tokens = len(encoder.encode(prompt))
4✔
35
    width, height = response.size
4✔
36
    scope3_row = ImpactRow(
4✔
37
        model=Model(id=model),
38
        input_tokens=input_tokens,
39
        task=Task.text_to_image,
40
        output_images=["{width}x{height}".format(width=width, height=height)],
41
        request_duration_ms=float(compute_time) * 1000,
42
        managed_service_id=PROVIDER,
43
    )
44

45
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
46
    result = TextToImageOutput(response)
4✔
47
    result.scope3ai = scope3_ctx
4✔
48
    return result
4✔
49

50

51
async def huggingface_text_to_image_wrapper_async_non_stream(
4✔
52
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
53
) -> TextToImageOutput:
NEW
54
    response = await wrapped(*args, **kwargs)
×
NEW
55
    with hf_raise_for_status_capture() as capture_response:
×
NEW
56
        response = wrapped(*args, **kwargs)
×
NEW
57
        http_response = capture_response.get()
×
NEW
58
    model = kwargs.get("model") or instance.get_recommended_model("text-to-speech")
×
NEW
59
    encoder = tiktoken.get_encoding("cl100k_base")
×
NEW
60
    if len(args) > 0:
×
NEW
61
        prompt = args[0]
×
62
    else:
NEW
63
        prompt = kwargs["prompt"]
×
NEW
64
    compute_time = http_response.headers.get("x-compute-time")
×
NEW
65
    input_tokens = len(encoder.encode(prompt))
×
NEW
66
    width, height = response.size
×
NEW
67
    scope3_row = ImpactRow(
×
68
        model=Model(id=model),
69
        input_tokens=input_tokens,
70
        task=Task.text_to_image,
71
        output_images=["{width}x{height}".format(width=width, height=height)],
72
        request_duration_ms=float(compute_time) * 1000,
73
        managed_service_id=PROVIDER,
74
    )
75

NEW
76
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
×
NEW
77
    result = TextToImageOutput(response)
×
NEW
78
    result.scope3ai = scope3_ctx
×
NEW
79
    return result
×
80

81

82
def huggingface_text_to_image_wrapper(
4✔
83
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
84
) -> TextToImageOutput:
85
    return huggingface_text_to_image_wrapper_non_stream(wrapped, instance, args, kwargs)
4✔
86

87

88
async def huggingface_text_to_image_wrapper_async(
4✔
89
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
90
) -> TextToImageOutput:
NEW
91
    return await huggingface_text_to_image_wrapper_async_non_stream(
×
92
        wrapped, instance, args, kwargs
93
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc