• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lucasliet / llm-telegram-bot / 17309262078

28 Aug 2025 10:19PM UTC coverage: 33.696% (+2.2%) from 31.514%
17309262078

push

github

lucasliet
refactor: services and improve code consistency

- Updated import statements for consistency across services.
- Refactored image generation URL construction in PollinationsService.
- Cleaned up formatting and spacing in TelegramService for better readability.
- Streamlined function definitions and error handling in ToolService.
- Enhanced GeminiService and GithubCopilotService for improved readability and consistency.
- Removed unnecessary comments and improved documentation in ChatConfigUtil.
- Added detailed instructions for applying patches and prompts in resources.

47 of 82 branches covered (57.32%)

Branch coverage included in aggregate %.

24 of 302 new or added lines in 17 files covered. (7.95%)

9 existing lines in 6 files now uncovered.

1160 of 3500 relevant lines covered (33.14%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

34.62
/src/util/ChatConfigUtil.ts
1
import OpenAi from 'npm:openai';
2
import { ExpirableContent } from '@/repository/ChatRepository.ts';
3

4
/**
5
 * Interface for StreamReplyResponse
6
 */
7
export interface StreamReplyResponse {
8
        /** Reader for the streaming response */
9
        reader: ReadableStreamDefaultReader<Uint8Array>;
10

11
        /** Callback function to execute when the response is complete */
12
        onComplete: (completedAnswer: string) => Promise<void>;
13

14
        /** Optional function to map response body to a different format */
15
        responseMap?: (responseBody: string) => string;
16
}
17

18
/**
19
 * Message format for Responses API
20
 */
21
export type ResponsesMessage = {
22
        type: 'message';
23
        role: 'user' | 'assistant';
24
        content: Array<{ type: 'input_text' | 'output_text' | 'text'; text: string }>;
25
};
26

27
/**
1✔
28
 * Convert Gemini history format to OpenAI/GPT format
29
 *
30
 * @param history - History in Gemini format
31
 * @returns History in OpenAI format
32
 */
3✔
33
export function convertGeminiHistoryToGPT(
3✔
34
        history: ExpirableContent[],
3✔
35
): OpenAi.ChatCompletionMessageParam[] {
NEW
36
        return history.map((content) => {
×
NEW
37
                return {
×
NEW
38
                        role: content.role === 'user' ? 'user' : 'assistant',
×
NEW
39
                        content: content.parts.map((part) => part.text).join(' '),
×
NEW
40
                };
×
NEW
41
        });
×
42
}
5✔
43

44
/**
1✔
45
 * Maps an array of OpenAI ChatCompletionTool objects to an array of OpenAI.Responses.Tool objects.
46
 * This function is used to adapt tool schemas for the Responses API.
47
 * @param tools - An optional array of OpenAI ChatCompletionTool objects.
48
 * @returns An array of OpenAI.Responses.Tool objects.
49
 */
×
NEW
50
export function mapChatToolsToResponsesTools(
×
NEW
51
        tools?: OpenAi.Chat.Completions.ChatCompletionTool[],
×
52
): OpenAi.Responses.Tool[] {
NEW
53
        if (!tools || tools.length === 0) {
×
NEW
54
                return [];
×
NEW
55
        }
×
56

NEW
57
        return tools.map((t): OpenAi.Responses.Tool => {
×
NEW
58
                if (t.type === 'function' && t.function) {
×
NEW
59
                        const params = t.function.parameters || {};
×
NEW
60
                        const props = params.properties || {};
×
NEW
61
                        const required = Object.keys(props);
×
NEW
62
                        return {
×
NEW
63
                                type: 'function',
×
NEW
64
                                name: t.function.name,
×
NEW
65
                                description: t.function.description ?? '',
×
NEW
66
                                parameters: {
×
NEW
67
                                        type: 'object',
×
NEW
68
                                        additionalProperties: false,
×
NEW
69
                                        ...params,
×
NEW
70
                                        properties: props,
×
NEW
71
                                        required,
×
NEW
72
                                },
×
NEW
73
                                strict: (t as any).strict ?? true,
×
NEW
74
                        } as OpenAi.Responses.Tool;
×
NEW
75
                }
×
NEW
76
                throw new Error('Unsupported tool type');
×
NEW
77
        });
×
UNCOV
78
}
×
79

80
/**
1✔
81
 * Replace configuration variables in a Gemini prompt template
82
 *
83
 * @param chatName - Name of the chat service
84
 * @param model - Model name
85
 * @param maxTokens - Maximum tokens for generation
86
 * @returns Modified prompt with updated values
87
 */
3✔
88
export function getSystemPrompt(
3✔
89
        chatName: string,
3✔
90
        model: string,
3✔
91
        maxTokens: number,
3✔
92
): string {
93
        return systemPrompt(chatName, model, maxTokens);
5✔
94
}
5✔
95

96
const systemPrompt = (chatName: string, model: string, maxTokens: number) => `
3✔
97
        Você é ${chatName}, um modelo de linguagem de IA muito prestativo. Está usando o modelo ${model} 
5✔
98
        e está hospedado em um bot do cliente de mensagens Telegram.
99
        Então tentará manter suas respostas curtas e diretas para obter melhores resultados 
100
        com o máximo de ${maxTokens} tokens de saída,
5✔
101
        Pode usar à vontade as estilizações de texto e emojis para tornar a conversa mais agradável e natural.
102

103
        Deve sempre respeitar a linguagem de marcação Markdown, evitando abrir marcações sem fecha-las.
104

105
        Caso tenha buscado informações atualizadas na internet, indique suas fontes de informação.
106
    `;
3✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc