• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12600822014

03 Jan 2025 04:35PM UTC coverage: 94.022% (+13.5%) from 80.557%
12600822014

Pull #47

github

0248fe
kevdevg
fix: text-to-image async implementation
Pull Request #47: Kevdevg/huggingface

45 of 49 new or added lines in 3 files covered. (91.84%)

20 existing lines in 3 files now uncovered.

1447 of 1539 relevant lines covered (94.02%)

3.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.08
/scope3ai/tracers/huggingface/text_to_image.py
1
import tiktoken
4✔
2
from dataclasses import dataclass
4✔
3
from typing import Any, Callable, Optional
4✔
4

5
from huggingface_hub import InferenceClient, AsyncInferenceClient  # type: ignore[import-untyped]
4✔
6
from huggingface_hub import TextToImageOutput as _TextToImageOutput
4✔
7

8
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
9
from scope3ai.api.typesgen import Task
4✔
10
from scope3ai.lib import Scope3AI
4✔
11
from scope3ai.tracers.huggingface.utils import (
4✔
12
    hf_raise_for_status_capture,
13
    hf_async_raise_for_status_capture,
14
)
15

16
PROVIDER = "huggingface_hub"
4✔
17

18

19
@dataclass
4✔
20
class TextToImageOutput(_TextToImageOutput):
4✔
21
    scope3ai: Optional[Scope3AIContext] = None
4✔
22

23

24
def huggingface_text_to_image_wrapper_non_stream(
4✔
25
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
26
) -> TextToImageOutput:
27
    with hf_raise_for_status_capture() as capture_response:
4✔
28
        response = wrapped(*args, **kwargs)
4✔
29
        http_response = capture_response.get()
4✔
30
    model = kwargs.get("model") or instance.get_recommended_model("text-to-speech")
4✔
31
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
32
    if len(args) > 0:
4✔
33
        prompt = args[0]
×
34
    else:
35
        prompt = kwargs["prompt"]
4✔
36
    compute_time = http_response.headers.get("x-compute-time")
4✔
37
    input_tokens = len(encoder.encode(prompt))
4✔
38
    width, height = response.size
4✔
39
    scope3_row = ImpactRow(
4✔
40
        model=Model(id=model),
41
        input_tokens=input_tokens,
42
        task=Task.text_to_image,
43
        output_images=["{width}x{height}".format(width=width, height=height)],
44
        request_duration_ms=float(compute_time) * 1000,
45
        managed_service_id=PROVIDER,
46
    )
47

48
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
49
    result = TextToImageOutput(response)
4✔
50
    result.scope3ai = scope3_ctx
4✔
51
    return result
4✔
52

53

54
async def huggingface_text_to_image_wrapper_async_non_stream(
4✔
55
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
56
) -> TextToImageOutput:
57
    with hf_async_raise_for_status_capture() as capture_response:
4✔
58
        response = await wrapped(*args, **kwargs)
4✔
59
        http_response = capture_response.get()
4✔
60
    model = kwargs.get("model") or instance.get_recommended_model("text-to-speech")
4✔
61
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
62
    if len(args) > 0:
4✔
63
        prompt = args[0]
4✔
64
    else:
NEW
65
        prompt = kwargs["prompt"]
×
66
    compute_time = http_response.headers.get("x-compute-time")
4✔
67
    input_tokens = len(encoder.encode(prompt))
4✔
68
    width, height = response.size
4✔
69
    scope3_row = ImpactRow(
4✔
70
        model=Model(id=model),
71
        input_tokens=input_tokens,
72
        task=Task.text_to_image,
73
        output_images=["{width}x{height}".format(width=width, height=height)],
74
        request_duration_ms=float(compute_time) * 1000,
75
        managed_service_id=PROVIDER,
76
    )
77

78
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
79
    result = TextToImageOutput(response)
4✔
80
    result.scope3ai = scope3_ctx
4✔
81
    return result
4✔
82

83

84
def huggingface_text_to_image_wrapper(
4✔
85
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
86
) -> TextToImageOutput:
87
    return huggingface_text_to_image_wrapper_non_stream(wrapped, instance, args, kwargs)
4✔
88

89

90
async def huggingface_text_to_image_wrapper_async(
4✔
91
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
92
) -> TextToImageOutput:
93
    return await huggingface_text_to_image_wrapper_async_non_stream(
4✔
94
        wrapped, instance, args, kwargs
95
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc