• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

scope3data / scope3ai-py / 12777439528

14 Jan 2025 10:21PM UTC coverage: 95.153% (+14.6%) from 80.557%
12777439528

Pull #62

github

3071c5
web-flow
Merge 20548d47d into 42c868843
Pull Request #62: feat: Hugging face - speech to text async fix

14 of 14 new or added lines in 1 file covered. (100.0%)

33 existing lines in 9 files now uncovered.

2022 of 2125 relevant lines covered (95.15%)

3.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.08
/scope3ai/tracers/anthropic/chat.py
1
import time
4✔
2
from collections.abc import AsyncIterator, Awaitable, Iterator
4✔
3
from types import TracebackType
4✔
4
from typing import Any, Callable, Generic, Optional, TypeVar, Union
4✔
5

6
from anthropic import Anthropic, AsyncAnthropic
4✔
7
from anthropic import Stream as _Stream, AsyncStream as _AsyncStream
4✔
8
from anthropic._streaming import _T
4✔
9
from anthropic.lib.streaming import AsyncMessageStream as _AsyncMessageStream
4✔
10
from anthropic.lib.streaming import MessageStream as _MessageStream
4✔
11
from anthropic.types import Message as _Message
4✔
12
from anthropic.types.message_delta_event import MessageDeltaEvent
4✔
13
from anthropic.types.message_start_event import MessageStartEvent
4✔
14
from anthropic.types.message_stop_event import MessageStopEvent
4✔
15
from anthropic.types.raw_message_delta_event import RawMessageDeltaEvent
4✔
16
from anthropic.types.raw_message_start_event import RawMessageStartEvent
4✔
17
from anthropic.types.raw_message_stream_event import RawMessageStreamEvent
4✔
18
from typing_extensions import override
4✔
19

20
from scope3ai.api.types import Scope3AIContext, Model, ImpactRow
4✔
21
from scope3ai.constants import PROVIDERS
4✔
22
from scope3ai.lib import Scope3AI
4✔
23

24
PROVIDER = PROVIDERS.ANTROPIC.value
4✔
25

26
MessageStreamT = TypeVar("MessageStreamT", bound=_MessageStream)
4✔
27
AsyncMessageStreamT = TypeVar("AsyncMessageStreamT", bound=_AsyncMessageStream)
4✔
28

29

30
class Message(_Message):
4✔
31
    scope3ai: Optional[Scope3AIContext] = None
4✔
32

33

34
class MessageStream(_MessageStream):
4✔
35
    scope3ai: Optional[Scope3AIContext] = None
4✔
36

37
    @override
4✔
38
    def __stream_text__(self) -> Iterator[str]:  # type: ignore[misc]
4✔
39
        timer_start = time.perf_counter()
4✔
40
        output_tokens = 0
4✔
41
        input_tokens = 0
4✔
42
        model_name = None
4✔
43
        for chunk in self:
4✔
44
            if type(chunk) is MessageStartEvent:
4✔
45
                message = chunk.message
4✔
46
                model_name = message.model
4✔
47
                input_tokens += message.usage.input_tokens
4✔
48
                output_tokens += message.usage.output_tokens
4✔
49
            elif type(chunk) is MessageDeltaEvent:
4✔
50
                output_tokens += chunk.usage.output_tokens
4✔
51
            elif (
4✔
52
                chunk.type == "content_block_delta" and chunk.delta.type == "text_delta"
53
            ):
54
                yield chunk.delta.text
4✔
55
            elif type(chunk) is MessageStopEvent:
4✔
56
                input_tokens = message.usage.input_tokens
×
UNCOV
57
                output_tokens = message.usage.output_tokens
×
58

59
        requests_latency = time.perf_counter() - timer_start
4✔
60
        if model_name is not None:
4✔
61
            scope3_row = ImpactRow(
4✔
62
                model=Model(id=model_name),
63
                input_tokens=input_tokens,
64
                output_tokens=output_tokens,
65
                request_duration_ms=requests_latency * 1000,
66
                managed_service_id=PROVIDER,
67
            )
68
            self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
69

70
    def __init__(self, parent) -> None:  # noqa: ANN001
4✔
71
        super().__init__(
4✔
72
            cast_to=parent._cast_to,  # noqa: SLF001
73
            response=parent.response,
74
            client=parent._client,  # noqa: SLF001
75
        )
76

77

78
class AsyncMessageStream(_AsyncMessageStream):
4✔
79
    scope3ai: Optional[Scope3AIContext] = None
4✔
80

81
    @override
4✔
82
    async def __stream_text__(self) -> AsyncIterator[str]:  # type: ignore[misc]
4✔
83
        timer_start = time.perf_counter()
4✔
84
        input_tokens = 0
4✔
85
        output_tokens = 0
4✔
86
        model_name = None
4✔
87
        async for chunk in self:
4✔
88
            if type(chunk) is MessageStartEvent:
4✔
89
                message = chunk.message
4✔
90
                model_name = message.model
4✔
91
                input_tokens += message.usage.input_tokens
4✔
92
                output_tokens += message.usage.output_tokens
4✔
93
            elif type(chunk) is MessageDeltaEvent:
4✔
94
                output_tokens += chunk.usage.output_tokens
4✔
95
            elif (
4✔
96
                chunk.type == "content_block_delta" and chunk.delta.type == "text_delta"
97
            ):
98
                yield chunk.delta.text
4✔
99
            elif type(chunk) is MessageStopEvent:
4✔
UNCOV
100
                input_tokens = message.usage.input_tokens
×
101
                output_tokens = message.usage.output_tokens
1✔
102
        requests_latency = time.perf_counter() - timer_start
4✔
103
        if model_name is not None:
4✔
104
            scope3_row = ImpactRow(
4✔
105
                model=Model(id=model_name),
106
                input_tokens=input_tokens,
107
                output_tokens=output_tokens,
108
                request_duration_ms=requests_latency * 1000,
109
                managed_service_id=PROVIDER,
110
            )
111
            self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
112

113
    def __init__(self, parent) -> None:  # noqa: ANN001
4✔
114
        super().__init__(
4✔
115
            cast_to=parent._cast_to,  # noqa: SLF001
116
            response=parent.response,
117
            client=parent._client,  # noqa: SLF001
118
        )
119

120

121
class MessageStreamManager(Generic[MessageStreamT]):
4✔
122
    def __init__(self, api_request: Callable[[], MessageStream]) -> None:
4✔
123
        self.__api_request = api_request
4✔
124

125
    def __enter__(self) -> MessageStream:
4✔
126
        self.__stream = self.__api_request()
4✔
127
        self.__stream = MessageStream(self.__stream)
4✔
128
        return self.__stream
4✔
129

130
    def __exit__(
4✔
131
        self,
132
        exc_type: Optional[type[BaseException]],
133
        exc: Optional[BaseException],
134
        exc_tb: Optional[TracebackType],
135
    ) -> None:
136
        if self.__stream is not None:
4✔
137
            self.__stream.close()
4✔
138

139

140
class AsyncMessageStreamManager(Generic[AsyncMessageStreamT]):
4✔
141
    def __init__(self, api_request: Awaitable[AsyncMessageStream]) -> None:
4✔
142
        self.__api_request = api_request
4✔
143

144
    async def __aenter__(self) -> AsyncMessageStream:
4✔
145
        self.__stream = await self.__api_request
4✔
146
        self.__stream = AsyncMessageStream(self.__stream)
4✔
147
        return self.__stream
4✔
148

149
    async def __aexit__(
4✔
150
        self,
151
        exc_type: Optional[type[BaseException]],
152
        exc: Optional[BaseException],
153
        exc_tb: Optional[TracebackType],
154
    ) -> None:
155
        if self.__stream is not None:
4✔
156
            await self.__stream.close()
4✔
157

158

159
class Stream(_Stream[_T]):
4✔
160
    scope3ai: Optional[Scope3AIContext] = None
4✔
161

162
    def __stream__(self) -> Iterator[_T]:
4✔
163
        timer_start = time.perf_counter()
4✔
164
        model = None
4✔
165
        input_tokens = output_tokens = request_latency = 0
4✔
166
        for event in super().__stream__():
4✔
167
            yield event
4✔
168
            if type(event) is RawMessageStartEvent:
4✔
169
                model = event.message.model
4✔
170
                input_tokens = event.message.usage.input_tokens
4✔
171
            elif type(event) is RawMessageDeltaEvent:
4✔
172
                output_tokens = event.usage.output_tokens
4✔
173
                request_latency = time.perf_counter() - timer_start
4✔
174

175
        scope3_row = ImpactRow(
4✔
176
            model=Model(id=model),
177
            input_tokens=input_tokens,
178
            output_tokens=output_tokens,
179
            request_duration_ms=request_latency * 1000,
180
            managed_service_id=PROVIDER,
181
        )
182
        self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
183

184
    def __init__(self, parent) -> None:  # noqa: ANN001
4✔
185
        super().__init__(
4✔
186
            cast_to=parent._cast_to,  # noqa: SLF001
187
            response=parent.response,
188
            client=parent._client,  # noqa: SLF001
189
        )
190

191

192
class AsyncStream(_AsyncStream[_T]):
4✔
193
    scope3ai: Optional[Scope3AIContext] = None
4✔
194

195
    async def __stream__(self) -> AsyncIterator[_T]:
4✔
196
        timer_start = time.perf_counter()
4✔
197
        model = None
4✔
198
        input_tokens = output_tokens = request_latency = 0
4✔
199
        async for event in super().__stream__():
4✔
200
            yield event
4✔
201
            if type(event) is RawMessageStartEvent:
4✔
202
                model = event.message.model
4✔
203
                input_tokens = event.message.usage.input_tokens
4✔
204
            elif type(event) is RawMessageDeltaEvent:
4✔
205
                output_tokens = event.usage.output_tokens
4✔
206
                request_latency = time.perf_counter() - timer_start
4✔
207

208
        scope3_row = ImpactRow(
4✔
209
            model=Model(id=model),
210
            input_tokens=input_tokens,
211
            output_tokens=output_tokens,
212
            request_duration_ms=request_latency * 1000,
213
            managed_service_id=PROVIDER,
214
        )
215
        self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
216

217
    def __init__(self, parent) -> None:  # noqa: ANN001
4✔
218
        super().__init__(
4✔
219
            cast_to=parent._cast_to,  # noqa: SLF001
220
            response=parent.response,
221
            client=parent._client,  # noqa: SLF001
222
        )
223

224

225
def _anthropic_chat_wrapper(response: Message, request_latency: float) -> Message:
4✔
226
    model_name = response.model
4✔
227
    scope3_row = ImpactRow(
4✔
228
        model=Model(id=model_name),
229
        input_tokens=response.usage.input_tokens,
230
        output_tokens=response.usage.output_tokens,
231
        request_duration_ms=request_latency * 1000,
232
        managed_service_id=PROVIDER,
233
    )
234
    scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
235
    if scope3ai_ctx is not None:
4✔
236
        return Message(**response.model_dump(), scope3ai=scope3ai_ctx)
4✔
237
    else:
UNCOV
238
        return response
×
239

240

241
def anthropic_chat_wrapper(
4✔
242
    wrapped: Callable,
243
    instance: Anthropic,
244
    args: Any,
245
    kwargs: Any,  # noqa: ARG001
246
) -> Union[Message, Stream[RawMessageStreamEvent]]:
247
    timer_start = time.perf_counter()
4✔
248
    response = wrapped(*args, **kwargs)
4✔
249
    request_latency = time.perf_counter() - timer_start
4✔
250

251
    is_stream = kwargs.get("stream", False)
4✔
252
    if is_stream:
4✔
253
        return Stream(response)
4✔
254
    return _anthropic_chat_wrapper(response, request_latency)
4✔
255

256

257
async def _anthropic_async_chat_wrapper(
4✔
258
    response: Message, request_latency: float
259
) -> Message:
260
    model_name = response.model
4✔
261
    scope3_row = ImpactRow(
4✔
262
        model=Model(id=model_name),
263
        input_tokens=response.usage.input_tokens,
264
        output_tokens=response.usage.output_tokens,
265
        request_duration_ms=request_latency * 1000,
266
        managed_service_id=PROVIDER,
267
    )
268
    scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
4✔
269
    if scope3ai_ctx is not None:
4✔
270
        return Message(**response.model_dump(), scope3ai=scope3ai_ctx)
4✔
271
    else:
UNCOV
272
        return response
×
273

274

275
async def anthropic_async_chat_wrapper(
4✔
276
    wrapped: Callable,
277
    instance: AsyncAnthropic,
278
    args: Any,
279
    kwargs: Any,  # noqa: ARG001
280
) -> Union[Message, AsyncStream[RawMessageStreamEvent]]:
281
    timer_start = time.perf_counter()
4✔
282
    response = await wrapped(*args, **kwargs)
4✔
283
    request_latency = time.perf_counter() - timer_start
4✔
284

285
    is_stream = kwargs.get("stream", False)
4✔
286
    if is_stream:
4✔
287
        return AsyncStream(response)
4✔
288
    return await _anthropic_async_chat_wrapper(response, request_latency)
4✔
289

290

291
def anthropic_stream_chat_wrapper(
4✔
292
    wrapped: Callable,
293
    instance: Anthropic,
294
    args: Any,
295
    kwargs: Any,  # noqa: ARG001
296
) -> MessageStreamManager:
297
    response = wrapped(*args, **kwargs)
4✔
298
    return MessageStreamManager(response._MessageStreamManager__api_request)  # noqa: SLF001
4✔
299

300

301
def anthropic_async_stream_chat_wrapper(
4✔
302
    wrapped: Callable,
303
    instance: AsyncAnthropic,
304
    args: Any,
305
    kwargs: Any,  # noqa: ARG001
306
) -> AsyncMessageStreamManager:
307
    response = wrapped(*args, **kwargs)
4✔
308
    return AsyncMessageStreamManager(response._AsyncMessageStreamManager__api_request)  # noqa: SLF001
4✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc