• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lucasliet / llm-telegram-bot / 21687984441

04 Feb 2026 08:53PM UTC coverage: 59.131% (-2.3%) from 61.43%
21687984441

push

github

web-flow
feat: agent tool call loop (#22)

# Agent Loop Implementation - Resumo

## 📦 Estrutura Criada

```
src/service/openai/
├── agent/
│   ├── AgentLoopConfig.ts      # Configurações e tipos
│   ├── AgentLoopExecutor.ts    # Orquestrador do loop
│   └── index.ts                # Exports
├── stream/
│   ├── StreamProcessor.ts      # Interface base
│   ├── ChatCompletionsStream.ts    # Processador Chat API
│   ├── ResponsesAPIStream.ts       # Processador Responses API
│   └── index.ts                # Exports
└── OpenAIService.ts            # Serviço refatorado
```

## 🎯 Mudança Fundamental

### Antes (comportamento original)
```typescript
if (tool_calls.length > 0) {
  // Executa tools UMA vez
  // Retorna resposta
}
```

### Depois (comportamento agente)
```typescript
while (tool_calls.length > 0) {
  // Executa tools
  // Acumula resultados no contexto
  // Faz nova chamada ao modelo
  // Repete até o modelo retornar apenas mensagem de texto
}
```

## 🔧 Componentes Principais

### 1. AgentLoopConfig
- `maxIterations`: Limite de iterações (padrão: 10)
- `maxContextTokens`: Limite de tokens no contexto (padrão: 100000)
- `toolExecutionTimeout`: Timeout por tool (padrão: 30000ms)
- Callbacks opcionais para observabilidade

### 2. StreamProcessor (interface)
- `processStream()`: Processa stream e extrai tool calls
- `formatToolResultsForNextCall()`: Formata resultados para próxima chamada

### 3. ChatCompletionsStreamProcessor
- Implementa `StreamProcessor` para Chat Completions API
- Acumula tool calls que vêm em chunks
- Formata como `ChatCompletionMessageParam[]`

### 4. ResponsesAPIStreamProcessor
- Implementa `StreamProcessor` para Responses API
- Processa eventos SSE (Server-Sent Events)
- Formata como `ResponseInputItem[]`

### 5. AgentLoopExecutor
- Orquestra o loop recursivo
- Executa tools em paralelo
- Gerencia estado e proteções
- Mantém streaming ativo para o usuário

## ✅ Compatibilidade

### Com Subclasses
- `GithubCopilotService`
- `Perplexit... (continued)

113 of 269 branches covered (42.01%)

Branch coverage included in aggregate %.

584 of 1242 new or added lines in 17 files covered. (47.02%)

7 existing lines in 4 files now uncovered.

2228 of 3690 relevant lines covered (60.38%)

15.18 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

56.63
/src/prototype/ContextExtensionPrototype.ts
1
import { Context } from 'grammy';
1✔
2
import { Audio, Message, ParseMode, PhotoSize, Voice } from 'grammy-types';
3
import { transcribeAudio } from '@/service/TelegramService.ts';
1✔
4

5
const MARKDOWN_ERROR_MESSAGE = 'Error on markdown parse_mode, message:';
1✔
6

7
declare module 'grammy' {
8
        interface Context {
9
                replyWithQuote(
10
                        output: string,
11
                        config?: { parse_mode: ParseMode },
12
                ): Promise<Message.TextMessage>;
13

14
                replyWithVisionNotSupportedByModel(): Promise<Message.TextMessage>;
15

16
                replyOnLongAnswer(): number;
17

18
                replyInChunks(output: string): void;
19

20
                streamReply(
21
                        reader: ReadableStreamDefaultReader<Uint8Array>,
22
                        onComplete: (completedAnswer: string) => Promise<void>,
23
                        responseMap?: (responseBody: string) => string,
24
                        lastResult?: string,
25
                ): Promise<void>;
26

27
                extractContextKeys(): Promise<{
28
                        userId: number;
29
                        userKey: string;
30
                        contextMessage?: string;
31
                        audio?: Voice | Audio;
32
                        photos?: PhotoSize[];
33
                        caption?: string;
34
                        quote?: string;
35
                }>;
36
        }
37
}
38

39
/**
1✔
40
 * Reply to a message with quoting the original message
41
 */
×
42
Context.prototype.replyWithQuote = function (
×
43
        this: Context,
44
        output: string,
×
45
        config?: { parse_mode: ParseMode },
×
46
) {
47
        return this.reply(output, {
×
48
                reply_to_message_id: this.message?.message_id,
×
49
                ...config,
×
50
        });
×
51
};
×
52

53
/**
1✔
54
 * Reply that the model doesn't support vision capabilities
55
 */
1✔
56
Context.prototype.replyWithVisionNotSupportedByModel = function (
1✔
57
        this: Context,
58
) {
59
        return this.replyWithQuote('esse modelo não suporta leitura de foto');
2✔
60
};
1✔
61

62
/**
1✔
63
 * Set a timeout to reply if the answer takes too long
64
 */
1✔
65
Context.prototype.replyOnLongAnswer = function (this: Context): number {
1✔
66
        return setTimeout(() => {
×
67
                console.info('Request is taking too long, sending processing message...');
×
68
                this.replyWithQuote(
×
69
                        'Estou processando sua solicitação, aguarde um momento...',
×
70
                );
71
        }, 12000);
×
72
};
1✔
73

74
/**
1✔
75
 * Split a large response into multiple message chunks
76
 */
1✔
77
Context.prototype.replyInChunks = function (
1✔
78
        this: Context,
79
        output: string,
1✔
80
): void {
81
        if (output.length > 4096) {
3✔
82
                const outputChunks = output.match(/[\s\S]{1,4093}/g)!;
4✔
83

84
                outputChunks.forEach((chunk, index) => {
4✔
85
                        const isLastChunk = index === outputChunks.length - 1;
6✔
86
                        const chunkOutput = `${chunk}${isLastChunk ? '' : '...'}`;
6✔
87

88
                        this.replyWithQuote(chunkOutput, { parse_mode: 'Markdown' })
12✔
89
                                .catch(() => {
×
90
                                        console.warn(MARKDOWN_ERROR_MESSAGE, chunkOutput);
×
91
                                        this.replyWithQuote(chunkOutput);
×
92
                                });
×
93
                });
4✔
94
                return;
4✔
95
        }
4✔
96

97
        this.replyWithQuote(output, { parse_mode: 'Markdown' })
12✔
98
                .catch(() => {
4✔
99
                        console.warn(MARKDOWN_ERROR_MESSAGE, output);
5✔
100
                        this.replyWithQuote(output);
5✔
101
                });
4✔
102
};
1✔
103

104
/**
1✔
105
 * Stream a response to the user with periodic updates
106
 */
1✔
107
Context.prototype.streamReply = async function (
1✔
108
        this: Context,
109
        reader: ReadableStreamDefaultReader<Uint8Array>,
1✔
110
        onComplete: (completedAnswer: string) => Promise<void>,
1✔
111
        responseMap?: (responseBody: string) => string,
1✔
112
        lastResult?: string,
1✔
113
): Promise<void> {
114
        const { message_id } = await this.replyWithQuote('processando...');
2✔
115
        let result = lastResult || '';
2✔
116
        let lastUpdate = Date.now();
2✔
117
        let lastSentMessage = '';
2✔
118

119
        while (true) {
2✔
120
                const { done, value } = await reader.read();
5✔
121
                if (done) break;
5✔
122

123
                const chunk = decodeStreamResponseText(value, responseMap);
7✔
124
                result += chunk;
7✔
125

126
                if (result.length > 4093) {
×
127
                        result = result.removeThinkingChatCompletion()
×
128
                                .convertBlackBoxWebSearchSourcesToMarkdown();
×
129

130
                        if (result.length > 4093) {
×
131
                                const remainingChunk = result.substring(4093) + chunk;
×
132
                                result = result.substring(0, 4093);
×
133

NEW
134
                                const updateResult = await editMessageWithCompletionEvery3Seconds(
×
135
                                        this,
×
136
                                        message_id,
×
137
                                        result,
×
138
                                        lastUpdate,
×
NEW
139
                                        lastSentMessage,
×
UNCOV
140
                                        true,
×
141
                                );
NEW
142
                                lastUpdate = updateResult.timestamp;
×
NEW
143
                                lastSentMessage = updateResult.lastMessage;
×
144
                                onComplete(result);
×
145
                                return this.streamReply(
×
146
                                        reader,
×
147
                                        onComplete,
×
148
                                        responseMap,
×
149
                                        remainingChunk,
×
150
                                );
151
                        }
×
152
                }
✔
153

154
                const updateResult = await editMessageWithCompletionEvery3Seconds(
7✔
155
                        this,
7✔
156
                        message_id,
7✔
157
                        result,
7✔
158
                        lastUpdate,
7✔
159
                        lastSentMessage,
7✔
160
                );
161
                lastUpdate = updateResult.timestamp;
7✔
162
                lastSentMessage = updateResult.lastMessage;
7✔
163
        }
7✔
164

165
        let sanitizedResult = result.removeThinkingChatCompletion()
2✔
166
                .convertBlackBoxWebSearchSourcesToMarkdown();
2✔
167

168
        if (sanitizedResult.length > 4093) {
×
169
                const remainingChunk = sanitizedResult.substring(4093);
×
170
                sanitizedResult = sanitizedResult.substring(0, 4093) + '...';
×
171
                this.replyInChunks(remainingChunk);
×
172
        }
×
173

174
        this.api.editMessageText(this.chat!.id, message_id, sanitizedResult, {
2✔
175
                parse_mode: 'Markdown',
2✔
176
        })
×
177
                .catch(() => {
×
178
                        console.warn(MARKDOWN_ERROR_MESSAGE, sanitizedResult);
×
179
                        this.api.editMessageText(this.chat!.id, message_id, sanitizedResult);
×
180
                });
×
181

182
        onComplete(result);
2✔
183
};
1✔
184

185
/**
1✔
186
 * Extract common context keys from the message
187
 */
1✔
188
Context.prototype.extractContextKeys = async function (this: Context) {
1✔
189
        const userId = this.from?.id!;
2✔
190
        const userKey = `user:${userId}`;
2✔
191
        const audio = this.message?.voice || this.message?.audio;
2✔
192
        const contextMessage = await getTextMessage(userId, userKey, this, audio);
2✔
193
        const photos = this.message?.photo;
2✔
194
        const caption = this.message?.caption;
2✔
195
        const quote = this.message?.reply_to_message?.text;
×
196

197
        return { userId, userKey, contextMessage, audio, photos, caption, quote };
18✔
198
};
1✔
199

200
/**
1✔
201
 * Helper function to get text from a message, transcribing audio if needed
202
 */
1✔
203
function getTextMessage(
1✔
204
        userId: number,
1✔
205
        userKey: string,
1✔
206
        ctx: Context,
1✔
207
        audio?: Voice,
1✔
208
): Promise<string | undefined> {
209
        if (audio) {
×
210
                return transcribeAudio(userId, userKey, ctx, audio);
×
211
        }
×
212
        return Promise.resolve(ctx.message?.text);
2✔
213
}
2✔
214

215
/**
1✔
216
 * Decode text from the response stream
217
 */
1✔
218
function decodeStreamResponseText(
1✔
219
        responseMessage: Uint8Array,
1✔
220
        responseMap?: (responseBody: string) => string,
1✔
221
): string {
222
        const decoder = new TextDecoder();
3✔
223
        const decodedText = decoder.decode(responseMessage);
3✔
224
        return responseMap ? responseMap(decodedText) : decodedText;
×
225
}
3✔
226

227
/**
1✔
228
 * Edit a message with updated content, respecting rate limits
229
 * Avoid hitting Telegram API rate limit https://core.telegram.org/bots/faq#broadcasting-to-users
230
 */
1✔
231
async function editMessageWithCompletionEvery3Seconds(
1✔
232
        ctx: Context,
1✔
233
        messageId: number,
1✔
234
        message: string,
1✔
235
        lastUpdate: number,
1✔
236
        lastSentMessage: string,
1✔
237
        isLastMessage = false,
1✔
238
): Promise<{ timestamp: number; lastMessage: string }> {
239
        const now = Date.now();
3✔
240
        const has2SecondsPassed = now - lastUpdate >= 2000;
3✔
NEW
241
        const displayMessage = message + (isLastMessage ? '' : '...');
×
242

NEW
243
        if ((isLastMessage || has2SecondsPassed) && displayMessage !== lastSentMessage) {
×
244
                try {
×
245
                        await ctx.api.editMessageText(ctx.chat!.id, messageId, displayMessage, {
×
246
                                parse_mode: 'Markdown',
×
247
                        });
×
NEW
248
                        return { timestamp: now, lastMessage: displayMessage };
×
NEW
249
                } catch (error) {
×
NEW
250
                        if (error instanceof Error && error.message.includes('message is not modified')) {
×
NEW
251
                                return { timestamp: lastUpdate, lastMessage: lastSentMessage };
×
NEW
252
                        }
×
253
                        console.warn(MARKDOWN_ERROR_MESSAGE, displayMessage);
×
NEW
254
                        try {
×
NEW
255
                                await ctx.api.editMessageText(ctx.chat!.id, messageId, displayMessage);
×
NEW
256
                                return { timestamp: now, lastMessage: displayMessage };
×
NEW
257
                        } catch (fallbackError) {
×
NEW
258
                                console.error(`Failed to edit message ${messageId} in chat ${ctx.chat!.id}:`, fallbackError);
×
NEW
259
                                return { timestamp: lastUpdate, lastMessage: lastSentMessage };
×
NEW
260
                        }
×
UNCOV
261
                }
×
UNCOV
262
        }
×
263

264
        return { timestamp: lastUpdate, lastMessage: lastSentMessage };
12✔
265
}
3✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc