• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12681022279

08 Jan 2025 11:43PM UTC coverage: 95.11% (+14.6%) from 80.557%
12681022279

Pull #56

github

da957e
kevdevg
fix: add try catch to pillow images
Pull Request #56: feat(huggingface): add support for image-to-image/ text-to-speech

155 of 169 new or added lines in 7 files covered. (91.72%)

26 existing lines in 4 files now uncovered.

1770 of 1861 relevant lines covered (95.11%)

3.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.0
/scope3ai/tracers/huggingface/image_to_image.py
1
import time
4✔
2
from dataclasses import dataclass
4✔
3
from typing import Any, Callable, Optional, Union
4✔
4

5
import tiktoken
4✔
6
from PIL import Image
4✔
7
from aiohttp import ClientResponse
4✔
8
from huggingface_hub import ImageToImageOutput as _ImageToImageOutput
4✔
9
from huggingface_hub import InferenceClient, AsyncInferenceClient  # type: ignore[import-untyped]
4✔
10
from requests import Response
4✔
11

12
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
13
from scope3ai.api.typesgen import Task
4✔
14
from scope3ai.constants import PROVIDERS
4✔
15
from scope3ai.lib import Scope3AI
4✔
16
from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture
4✔
17
from scope3ai.response_interceptor.requests_interceptor import requests_response_capture
4✔
18

19
PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value
4✔
20

21

22
@dataclass
4✔
23
class ImageToImageOutput(_ImageToImageOutput):
4✔
24
    scope3ai: Optional[Scope3AIContext] = None
4✔
25

26

27
def _hugging_face_image_to_image_wrapper(
4✔
28
    timer_start: Any,
29
    model: Any,
30
    response: Any,
31
    http_response: Union[ClientResponse, Response],
32
    args: Any,
33
    kwargs: Any,
34
) -> ImageToImageOutput:
35
    if http_response:
4✔
36
        compute_time = http_response.headers.get("x-compute-time")
4✔
37
        input_tokens = http_response.headers.get("x-compute-characters")
4✔
38
    else:
NEW
39
        compute_time = time.perf_counter() - timer_start
×
NEW
40
        encoder = tiktoken.get_encoding("cl100k_base")
×
NEW
41
        prompt = args[1] if len(args) > 1 else kwargs.get("prompt", "")
×
NEW
42
        input_tokens = len(encoder.encode(prompt)) if prompt != "" else 0
×
43
    input_images = None
4✔
44
    try:
4✔
45
        input_image = Image.open(args[0] if len(args) > 0 else kwargs["image"])
4✔
46
        input_width, input_height = input_image.size
4✔
47
        input_images = [
4✔
48
            ("{width}x{height}".format(width=input_width, height=input_height))
49
        ]
NEW
50
    except Exception:
×
NEW
51
        pass
×
52
    output_width, output_height = response.size
4✔
53
    scope3_row = ImpactRow(
4✔
54
        model=Model(id=model),
55
        input_tokens=input_tokens,
56
        task=Task.image_generation,
57
        request_duration_ms=float(compute_time) * 1000,
58
        managed_service_id=PROVIDER,
59
        output_images=[
60
            "{width}x{height}".format(width=output_width, height=output_height)
61
        ],
62
        input_images=input_images,
63
    )
64

65
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
66
    result = ImageToImageOutput(response)
4✔
67
    result.scope3ai = scope3_ctx
4✔
68
    return result
4✔
69

70

71
def huggingface_image_to_image_wrapper(
4✔
72
    wrapped: Callable, instance: InferenceClient, args: Any, kwargs: Any
73
) -> ImageToImageOutput:
74
    timer_start = time.perf_counter()
4✔
75
    http_response: Response | None = None
4✔
76
    with requests_response_capture() as responses:
4✔
77
        response = wrapped(*args, **kwargs)
4✔
78
        http_responses = responses.get()
4✔
79
        if len(http_responses) > 0:
4✔
80
            http_response = http_responses[-1]
4✔
81
    model = kwargs.get("model") or instance.get_recommended_model("image-to-image")
4✔
82
    return _hugging_face_image_to_image_wrapper(
4✔
83
        timer_start, model, response, http_response, args, kwargs
84
    )
85

86

87
async def huggingface_image_to_image_wrapper_async(
4✔
88
    wrapped: Callable, instance: AsyncInferenceClient, args: Any, kwargs: Any
89
) -> ImageToImageOutput:
90
    timer_start = time.perf_counter()
4✔
91
    http_response: ClientResponse | None = None
4✔
92
    with aiohttp_response_capture() as responses:
4✔
93
        response = await wrapped(*args, **kwargs)
4✔
94
        http_responses = responses.get()
4✔
95
        if len(http_responses) > 0:
4✔
96
            http_response = http_responses[-1]
4✔
97
    model = kwargs.get("model") or instance.get_recommended_model("image-to-image")
4✔
98
    return _hugging_face_image_to_image_wrapper(
4✔
99
        timer_start, model, response, http_response, args, kwargs
100
    )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc