• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lucasliet / llm-telegram-bot / 23618512895

26 Mar 2026 09:17PM UTC coverage: 53.739% (-0.4%) from 54.15%
23618512895

push

github

lucasliet
fix: resolve TypeScript 5.9 build errors for new Deno Deploy

Uint8Array<ArrayBufferLike> is no longer assignable to BlobPart in
TypeScript 5.9 (Deno 2.x). Wrap with new Uint8Array() to ensure
ArrayBuffer type compatibility in File/Blob constructors.

Also add deploy config to deno.json for new Deno Deploy infra.

183 of 377 branches covered (48.54%)

Branch coverage included in aggregate %.

2 of 3 new or added lines in 2 files covered. (66.67%)

65 existing lines in 1 file now uncovered.

2993 of 5533 relevant lines covered (54.09%)

17.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

23.36
/src/service/ContextCompressorService.ts
1
import OpenAi from 'openai';
2
import { estimateTokens, shouldCompress } from '@/util/TokenEstimator.ts';
21✔
3

4
interface TextGenerationService {
5
        generateText(userKey: string, quote: string, prompt: string): Promise<{
6
                reader: ReadableStreamDefaultReader<Uint8Array>;
7
                responseMap?: (body: string) => string;
8
        }>;
9
}
10

11
/**
1✔
12
 * Service responsible for compressing conversation history
13
 * when it approaches the context window limit.
14
 */
21✔
15
export class ContextCompressorService {
21✔
16
        private static readonly COMPRESSION_PROMPT = `You are an expert context compressor. Your goal is to condense the following conversation history into a concise summary that preserves all critical information for an LLM to resume the conversation seamlessly.
21✔
17

18
Instructions:
19
1. Analyze the conversation history below.
20
2. Extract and preserve:
21
    - User preferences, personal details, and name (if mentioned).
22
    - Key decisions, agreed-upon plans, and established constraints.
23
    - Technical context: important code snippets (keep distinct), file paths, error messages, and configuration details.
24
    - Open questions, pending tasks, and unresolved issues.
25
3. Discard:
26
    - Phratic communication (greetings, small talk).
27
    - Redundant acknowledgments (e.g., "Okay", "I understand").
28
    - Resolved intermediate steps that are no longer relevant to the outcome.
29
4. Output the summary in Portuguese (pt-BR).
30
5. Use a structured format with bullet points.
31

32
---
33
Conversation History:
34
{HISTORY}
35
---
36
Summary (in Portuguese):`;
21✔
37

38
        /**
21✔
39
         * Compresses the entire conversation history into a single summary entry.
40
         * Works with any service that has generateText().
41
         */
×
42
        static async compressHistory(
×
43
                history: OpenAi.ChatCompletionMessageParam[],
×
44
                service: TextGenerationService,
×
45
                userKey: string,
×
46
        ): Promise<OpenAi.ChatCompletionMessageParam> {
×
UNCOV
47
                const historyText = this.formatHistory(history);
×
48
                const prompt = this.COMPRESSION_PROMPT.replace('{HISTORY}', historyText);
×
49

50
                const summary = await this.readStreamResponse(service, prompt, userKey);
×
51

52
                return {
×
53
                        role: 'assistant',
×
54
                        content: `[Resumo do contexto anterior]\n${summary}`,
×
UNCOV
55
                };
×
UNCOV
56
        }
×
57

58
        /**
21✔
59
         * Compresses the history forcefully without checking thresholds.
60
         * Returns statistics about the compression.
61
         */
×
62
        static async compressHistoryForce(
×
63
                history: OpenAi.ChatCompletionMessageParam[],
×
UNCOV
64
                service: TextGenerationService,
×
UNCOV
65
                userKey: string,
×
UNCOV
66
        ): Promise<{ history: OpenAi.ChatCompletionMessageParam[]; tokensBefore: number; tokensAfter: number }> {
×
UNCOV
67
                const tokensBefore = estimateTokens(history);
×
UNCOV
68
                const compressedEntry = await this.compressHistory(history, service, userKey);
×
UNCOV
69
                const compressedHistory = [compressedEntry];
×
UNCOV
70
                const tokensAfter = estimateTokens(compressedHistory);
×
71

UNCOV
72
                return {
×
UNCOV
73
                        history: compressedHistory,
×
UNCOV
74
                        tokensBefore,
×
UNCOV
75
                        tokensAfter,
×
UNCOV
76
                };
×
UNCOV
77
        }
×
78

79
        /**
21✔
80
         * Formats the history array into a readable text format for compression.
81
         */
×
82
        private static formatHistory(history: OpenAi.ChatCompletionMessageParam[]): string {
×
UNCOV
83
                return history
×
UNCOV
84
                        .map((msg) => `${msg.role}: ${typeof msg.content === 'string' ? msg.content : ''}`)
×
UNCOV
85
                        .join('\n\n');
×
UNCOV
86
        }
×
87

88
        /**
21✔
89
         * Reads a stream response and returns the full text.
UNCOV
90
         */
×
UNCOV
91
        private static async readStreamResponse(
×
UNCOV
92
                service: TextGenerationService,
×
UNCOV
93
                prompt: string,
×
UNCOV
94
                userKey: string,
×
UNCOV
95
        ): Promise<string> {
×
UNCOV
96
                const { reader, responseMap } = await service.generateText(userKey, '', prompt);
×
97

UNCOV
98
                let fullText = '';
×
UNCOV
99
                const decoder = new TextDecoder();
×
100

UNCOV
101
                while (true) {
×
UNCOV
102
                        const { done, value } = await reader.read();
×
UNCOV
103
                        if (done) break;
×
104

UNCOV
105
                        const chunk = decoder.decode(value);
×
UNCOV
106
                        if (responseMap) {
×
UNCOV
107
                                fullText += responseMap(chunk);
×
UNCOV
108
                        } else {
×
UNCOV
109
                                fullText += chunk;
×
UNCOV
110
                        }
×
UNCOV
111
                }
×
112

UNCOV
113
                return fullText || 'Não foi possível gerar resumo.';
×
UNCOV
114
        }
×
115

116
        /**
21✔
117
         * Checks if compression is needed and compresses if so.
118
         * Uses OpenAI client directly for backward compatibility with existing code.
119
         */
21✔
120
        static async compressIfNeeded(
21✔
121
                history: OpenAi.ChatCompletionMessageParam[],
21✔
122
                maxTokens: number,
21✔
123
                model: string,
21✔
124
                openai: OpenAi,
21✔
125
        ): Promise<{ history: OpenAi.ChatCompletionMessageParam[]; didCompress: boolean }> {
21✔
126
                const historyTokens = estimateTokens(history);
24✔
127
                if (!shouldCompress(historyTokens, maxTokens)) {
24✔
128
                        return { history, didCompress: false };
96✔
129
                }
24!
UNCOV
130
                console.log(`[ContextCompressorService] Context exceeds 80% limit (${historyTokens}/${maxTokens}), compressing...`);
×
131

UNCOV
132
                const compressedEntry = await this.compressWithOpenAI(history, model, openai);
×
UNCOV
133
                return { history: [compressedEntry], didCompress: true };
×
UNCOV
134
        }
×
135

136
        /**
21✔
137
         * Legacy method for backward compatibility with automatic compression.
UNCOV
138
         */
×
UNCOV
139
        private static async compressWithOpenAI(
×
UNCOV
140
                history: OpenAi.ChatCompletionMessageParam[],
×
UNCOV
141
                model: string,
×
UNCOV
142
                openai: OpenAi,
×
UNCOV
143
        ): Promise<OpenAi.ChatCompletionMessageParam> {
×
UNCOV
144
                const historyText = this.formatHistory(history);
×
UNCOV
145
                const prompt = this.COMPRESSION_PROMPT.replace('{HISTORY}', historyText);
×
146

UNCOV
147
                const response = await openai.chat.completions.create({
×
UNCOV
148
                        model,
×
UNCOV
149
                        messages: [{ role: 'user', content: prompt }],
×
UNCOV
150
                        max_tokens: 2000,
×
UNCOV
151
                        temperature: 0,
×
UNCOV
152
                });
×
153

UNCOV
154
                const summary = response.choices[0]?.message?.content || historyText.substring(0, 4000);
×
155

UNCOV
156
                return {
×
UNCOV
157
                        role: 'assistant',
×
UNCOV
158
                        content: `[Resumo do contexto anterior]\n${summary}`,
×
UNCOV
159
                };
×
UNCOV
160
        }
×
161
}
21✔
162

163
export const COMPRESSION_WARNING_MSG = '⚠️ Contexto comprimido para economizar espaço.';
21✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc