• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12600829613

03 Jan 2025 04:36PM UTC coverage: 94.023% (+13.5%) from 80.557%
12600829613

Pull #45

github

4f8f3e
tito
fix(openai): implement and factorize the code for async
Pull Request #45: feat(openai): support for text-to-speech

57 of 62 new or added lines in 2 files covered. (91.94%)

20 existing lines in 3 files now uncovered.

1463 of 1556 relevant lines covered (94.02%)

3.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.67
/scope3ai/tracers/openai/text_to_speech.py
1
import importlib
4✔
2
import io
4✔
3
import logging
4✔
4
import time
4✔
5
from typing import Any, Callable, Optional
4✔
6

7
import tiktoken
4✔
8
from openai.resources.audio.speech import AsyncSpeech, Speech, _legacy_response
4✔
9

10
from scope3ai.api.types import ImpactRow, Model, Scope3AIContext
4✔
11
from scope3ai.lib import Scope3AI
4✔
12

13

14
def _lazy_import(module_name: str, class_name: str):
4✔
15
    def _imported():
4✔
16
        module = importlib.import_module(module_name)
4✔
17
        return getattr(module, class_name)
4✔
18

19
    return _imported
4✔
20

21

22
PROVIDER = "openai"
4✔
23
MUTAGEN_MAPPING = {
4✔
24
    "mp3": _lazy_import("mutagen.mp3", "MP3"),
25
    "aac": _lazy_import("mutagen.aac", "AAC"),
26
    "opus": _lazy_import("mutagen.oggopus", "OggOpus"),
27
    "flac": _lazy_import("mutagen.flac", "FLAC"),
28
    "wav": _lazy_import("mutagen.wave", "WAVE"),
29
}
30

31
logger = logging.getLogger(f"scope3ai.tracers.{__name__}")
4✔
32

33

34
class HttpxBinaryResponseContent(_legacy_response.HttpxBinaryResponseContent):
4✔
35
    scope3ai: Optional[Scope3AIContext] = None
4✔
36

37

38
def _get_audio_duration(format: str, content: bytes) -> Optional[float]:
4✔
39
    try:
4✔
40
        mutagen_cls = MUTAGEN_MAPPING.get(format)
4✔
41
        if mutagen_cls is None:
4✔
NEW
42
            logger.error(f"Unsupported audio format: {format}")
×
NEW
43
            return None
×
44
        else:
45
            mutagen_file = mutagen_cls()(io.BytesIO(content))
4✔
46
            duration = mutagen_file.info.length
4✔
NEW
47
    except Exception:
×
NEW
48
        logger.exception("Failed to estimate audio duration")
×
NEW
49
        return None
×
50

51
    if format == "wav":
4✔
52
        # bug in mutagen, it returns high number for wav files
53
        duration = len(content) * 8 / mutagen_file.info.bitrate
4✔
54

55
    return duration
4✔
56

57

58
def _openai_text_to_speech_submit(
4✔
59
    response: _legacy_response.HttpxBinaryResponseContent,
60
    request_latency: float,
61
    kwargs: Any,
62
) -> HttpxBinaryResponseContent:
63
    # try getting duration
64
    response_format = kwargs["response_format"]
4✔
65
    duration = _get_audio_duration(response_format, response.content)
4✔
66

67
    compute_time = response.response.headers.get("openai-processing-ms")
4✔
68
    content_length = response.response.headers.get("content-length")
4✔
69
    if compute_time:
4✔
70
        request_latency = float(compute_time)
4✔
71
    if content_length:
4✔
72
        input_tokens = int(content_length)
4✔
73

74
    model_requested = kwargs["model"]
4✔
75
    encoder = tiktoken.get_encoding("cl100k_base")
4✔
76
    input_tokens = len(encoder.encode(kwargs["input"]))
4✔
77

78
    scope3_row = ImpactRow(
4✔
79
        model=Model(id=model_requested),
80
        input_tokens=input_tokens,
81
        request_duration_ms=request_latency,
82
        provider=PROVIDER,
83
        audio_output_seconds=duration,
84
    )
85

86
    scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
87

88
    wrapped_response = HttpxBinaryResponseContent(
4✔
89
        response=response.response,
90
    )
91
    wrapped_response.scope3ai = scope3_ctx
4✔
92
    return wrapped_response
4✔
93

94

95
def openai_text_to_speech_wrapper(
4✔
96
    wrapped: Callable, instance: Speech, args: Any, kwargs: Any
97
) -> HttpxBinaryResponseContent:
98
    timer_start = time.perf_counter()
4✔
99
    response = wrapped(*args, **kwargs)
4✔
100
    request_latency = (time.perf_counter() - timer_start) * 1000
4✔
101
    return _openai_text_to_speech_submit(response, request_latency, kwargs)
4✔
102

103

104
async def openai_async_text_to_speech_wrapper(
4✔
105
    wrapped: Callable, instance: AsyncSpeech, args: Any, kwargs: Any
106
) -> HttpxBinaryResponseContent:
107
    timer_start = time.perf_counter()
4✔
108
    response = await wrapped(*args, **kwargs)
4✔
109
    request_latency = time.perf_counter() - timer_start
4✔
110
    return _openai_text_to_speech_submit(response, request_latency, kwargs)
4✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc