• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12654253802

07 Jan 2025 03:30PM UTC coverage: 94.179% (+13.6%) from 80.557%
12654253802

Pull #51

github

ff7512
web-flow
Merge c02cd4794 into 05c19478b
Pull Request #51: docs: update README for openai support

1618 of 1718 relevant lines covered (94.18%)

3.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.67
/scope3ai/tracers/huggingface/text_to_image.py
1
from dataclasses import dataclass
4✔
2
from typing import Any, Callable, Optional
4✔
3

4
import tiktoken
4✔
5
from aiohttp import ClientResponse
4✔
6
from huggingface_hub import InferenceClient, AsyncInferenceClient  # type: ignore[import-untyped]
4✔
7
from huggingface_hub import TextToImageOutput as _TextToImageOutput
4✔
8
from requests import Response
4✔
9

10
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
11
from scope3ai.api.typesgen import Task
4✔
12
from scope3ai.lib import Scope3AI
4✔
13
from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture
4✔
14
from scope3ai.response_interceptor.requests_interceptor import requests_response_capture
4✔
15

16
PROVIDER = "huggingface_hub"
4✔
17

18

19
@dataclass
4✔
20
class TextToImageOutput(_TextToImageOutput):
4✔
21
    scope3ai: Optional[Scope3AIContext] = None
4✔
22

23

24
def huggingface_text_to_image_wrapper_non_stream(
4✔
25
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
26
) -> TextToImageOutput:
27
    http_response: Response | None = None
4✔
28
    with requests_response_capture() as responses:
4✔
29
        response = wrapped(*args, **kwargs)
4✔
30
        http_responses = responses.get()
4✔
31
        if len(http_responses) > 0:
4✔
32
            http_response = http_responses[-1]
4✔
33
    model = kwargs.get("model") or instance.get_recommended_model("text-to-image")
4✔
34
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
35
    if len(args) > 0:
4✔
36
        prompt = args[0]
×
37
    else:
38
        prompt = kwargs["prompt"]
4✔
39
    compute_time = http_response.headers.get("x-compute-time")
4✔
40
    input_tokens = len(encoder.encode(prompt))
4✔
41
    width, height = response.size
4✔
42
    scope3_row = ImpactRow(
4✔
43
        model=Model(id=model),
44
        input_tokens=input_tokens,
45
        task=Task.text_to_image,
46
        output_images=["{width}x{height}".format(width=width, height=height)],
47
        request_duration_ms=float(compute_time) * 1000,
48
        managed_service_id=PROVIDER,
49
    )
50

51
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
52
    result = TextToImageOutput(response)
4✔
53
    result.scope3ai = scope3_ctx
4✔
54
    return result
4✔
55

56

57
async def huggingface_text_to_image_wrapper_async_non_stream(
4✔
58
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
59
) -> TextToImageOutput:
60
    http_response: ClientResponse | None = None
4✔
61
    with aiohttp_response_capture() as responses:
4✔
62
        response = await wrapped(*args, **kwargs)
4✔
63
        http_responses = responses.get()
4✔
64
        if len(http_responses) > 0:
4✔
65
            http_response = http_responses[-1]
4✔
66
    model = kwargs.get("model") or instance.get_recommended_model("text-to-image")
4✔
67
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
68
    if len(args) > 0:
4✔
69
        prompt = args[0]
4✔
70
    else:
71
        prompt = kwargs["prompt"]
×
72
    compute_time = http_response.headers.get("x-compute-time")
4✔
73
    input_tokens = len(encoder.encode(prompt))
4✔
74
    width, height = response.size
4✔
75
    scope3_row = ImpactRow(
4✔
76
        model=Model(id=model),
77
        input_tokens=input_tokens,
78
        task=Task.text_to_image,
79
        output_images=["{width}x{height}".format(width=width, height=height)],
80
        request_duration_ms=float(compute_time) * 1000,
81
        managed_service_id=PROVIDER,
82
    )
83

84
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
85
    result = TextToImageOutput(response)
4✔
86
    result.scope3ai = scope3_ctx
4✔
87
    return result
4✔
88

89

90
def huggingface_text_to_image_wrapper(
4✔
91
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
92
) -> TextToImageOutput:
93
    return huggingface_text_to_image_wrapper_non_stream(wrapped, instance, args, kwargs)
4✔
94

95

96
async def huggingface_text_to_image_wrapper_async(
4✔
97
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
98
) -> TextToImageOutput:
99
    return await huggingface_text_to_image_wrapper_async_non_stream(
4✔
100
        wrapped, instance, args, kwargs
101
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc