• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lucasliet / llm-telegram-bot / 23172248446

17 Mar 2026 12:19AM UTC coverage: 54.15% (-0.4%) from 54.504%
23172248446

push

github

lucasliet
feat: add Groq provider and provider-builder skill

183 of 377 branches covered (48.54%)

Branch coverage included in aggregate %.

21 of 37 new or added lines in 5 files covered. (56.76%)

201 existing lines in 7 files now uncovered.

2988 of 5479 relevant lines covered (54.54%)

18.09 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

37.74
/src/service/ContextCompressorService.ts
1
import OpenAi from 'openai';
2
import { estimateTokens, shouldCompress } from '@/util/TokenEstimator.ts';
21✔
3

4
/**
1✔
5
 * Service responsible for compressing conversation history
6
 * when it approaches the context window limit.
7
 */
21✔
8
export class ContextCompressorService {
21✔
9
        /**
21✔
10
         * Compresses the entire conversation history into a single summary entry.
11
         * Uses an LLM to extract only essential information.
UNCOV
12
         */
×
13
        static async compressHistory(
×
14
                history: OpenAi.ChatCompletionMessageParam[],
×
15
                model: string,
×
16
                openai: OpenAi,
×
17
        ): Promise<OpenAi.ChatCompletionMessageParam> {
×
18
                const historyText = this.formatHistory(history);
×
19
                const prompt = `You are an expert context compressor. Your goal is to condense the following conversation history into a concise summary that preserves all critical information for an LLM to resume the conversation seamlessly.
×
20

21
Instructions:
22
1. Analyze the conversation history below.
23
2. Extract and preserve:
24
    - User preferences, personal details, and name (if mentioned).
25
    - Key decisions, agreed-upon plans, and established constraints.
26
    - Technical context: important code snippets (keep distinct), file paths, error messages, and configuration details.
27
    - Open questions, pending tasks, and unresolved issues.
28
3. Discard:
29
    - Phratic communication (greetings, small talk).
30
    - Redundant acknowledgments (e.g., "Okay", "I understand").
31
    - Resolved intermediate steps that are no longer relevant to the outcome.
32
4. Output the summary in Portuguese (pt-BR).
33
5. Use a structured format with bullet points.
34

35
---
36
Conversation History:
UNCOV
37
${historyText}
×
38
---
UNCOV
39
Summary (in Portuguese):`;
×
40

UNCOV
41
                const response = await openai.chat.completions.create({
×
42
                        model,
×
43
                        messages: [{ role: 'user', content: prompt }],
×
44
                        max_tokens: 2000,
×
45
                        temperature: 0,
×
46
                });
×
47

UNCOV
48
                const summary = response.choices[0]?.message?.content || historyText.substring(0, 4000);
×
49

UNCOV
50
                return {
×
51
                        role: 'assistant',
×
52
                        content: `[Resumo do contexto anterior]\n${summary}`,
×
53
                };
×
54
        }
×
55

56
        /**
21✔
57
         * Formats the history array into a readable text format for compression.
UNCOV
58
         */
×
UNCOV
59
        private static formatHistory(history: OpenAi.ChatCompletionMessageParam[]): string {
×
60
                return history
×
61
                        .map((msg) => `${msg.role}: ${typeof msg.content === 'string' ? msg.content : ''}`)
×
62
                        .join('\n\n');
×
63
        }
×
64

65
        /**
21✔
66
         * Checks if compression is needed and compresses if so.
67
         * @returns Object containing the (possibly compressed) history and a boolean indicating if compression occurred.
68
         */
21✔
69
        static async compressIfNeeded(
21✔
70
                history: OpenAi.ChatCompletionMessageParam[],
21✔
71
                maxTokens: number,
21✔
72
                model: string,
21✔
73
                openai: OpenAi,
21✔
74
        ): Promise<{ history: OpenAi.ChatCompletionMessageParam[]; didCompress: boolean }> {
21✔
75
                const historyTokens = estimateTokens(history);
24✔
76
                if (!shouldCompress(historyTokens, maxTokens)) {
24✔
77
                        return { history, didCompress: false };
96✔
78
                }
24!
UNCOV
79
                console.log(`[ContextCompressorService] Context exceeds 80% limit (${historyTokens}/${maxTokens}), compressing...`);
×
UNCOV
80
                const compressedEntry = await this.compressHistory(history, model, openai);
×
81
                return { history: [compressedEntry], didCompress: true };
×
82
        }
×
83
}
21✔
84

85
export const COMPRESSION_WARNING_MSG = '⚠️ Contexto comprimido para economizar espaço.';
21✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc