• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

VolvoxLLC / volvox-bot / 23501660927

24 Mar 2026 04:51PM UTC coverage: 90.174% (+0.3%) from 89.826%
23501660927

push

github

BillChirico
chore: sync lockfiles after dependabot bumps (diff, next, react-hook-form, lucide-react, @sentry/node, @anthropic-ai/claude-code)

6326 of 7409 branches covered (85.38%)

Branch coverage included in aggregate %.

10753 of 11531 relevant lines covered (93.25%)

226.08 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.7
/src/modules/aiAutoMod.js
1
/**
2
 * AI Auto-Moderation Module
3
 * Uses Claude SDK to analyze messages for toxicity, spam, and harassment.
4
 * Supports configurable thresholds, per-guild settings, and multiple actions:
5
 * warn, timeout, kick, ban, or flag for review.
6
 */
7

8
import Anthropic from '@anthropic-ai/sdk';
9
import { EmbedBuilder } from 'discord.js';
10
import { info, error as logError, warn } from '../logger.js';
11
import { fetchChannelCached } from '../utils/discordCache.js';
12
import { isExempt } from '../utils/modExempt.js';
13
import { safeSend } from '../utils/safeSend.js';
14
import { createCase } from './moderation.js';
15

16
/** Default config when none is provided */
17
const DEFAULTS = {
8✔
18
  enabled: false,
19
  model: 'claude-haiku-4-5',
20
  thresholds: {
21
    toxicity: 0.7,
22
    spam: 0.8,
23
    harassment: 0.7,
24
  },
25
  actions: {
26
    toxicity: 'flag',
27
    spam: 'delete',
28
    harassment: 'warn',
29
  },
30
  timeoutDurationMs: 5 * 60 * 1000,
31
  flagChannelId: null,
32
  autoDelete: true,
33
  exemptRoleIds: [],
34
};
35

36
/** Anthropic client (lazy initialized) */
37
let _client = null;
8✔
38

39
/**
40
 * Get or create the Anthropic client.
41
 * @returns {Anthropic}
42
 */
43
function getClient() {
44
  if (!_client) {
16!
45
    _client = new Anthropic();
16✔
46
  }
47
  return _client;
16✔
48
}
49

50
/**
51
 * Reset the Anthropic client (for testing).
52
 */
53
export function resetClient() {
54
  _client = null;
23✔
55
}
56

57
/**
58
 * Get the merged AI auto-mod config for a guild.
59
 * @param {Object} config - Guild config
60
 * @returns {Object} Merged AI auto-mod config
61
 */
62
export function getAiAutoModConfig(config) {
63
  const raw = config?.aiAutoMod ?? {};
49✔
64
  return {
49✔
65
    ...DEFAULTS,
66
    ...raw,
67
    thresholds: { ...DEFAULTS.thresholds, ...(raw.thresholds ?? {}) },
89✔
68
    actions: { ...DEFAULTS.actions, ...(raw.actions ?? {}) },
88✔
69
  };
70
}
71

72
/**
73
 * Analyze a message using Claude AI.
74
 * Returns scores and recommendations for moderation actions.
75
 *
76
 * @param {string} content - Message content to analyze
77
 * @param {Object} autoModConfig - AI auto-mod config
78
 * @returns {Promise<{flagged: boolean, scores: Object, categories: string[], reason: string, action: string}>}
79
 */
80
export async function analyzeMessage(content, autoModConfig) {
81
  const mergedConfig = autoModConfig ?? DEFAULTS;
17!
82

83
  if (!content || content.trim().length < 3) {
17✔
84
    return {
1✔
85
      flagged: false,
86
      scores: { toxicity: 0, spam: 0, harassment: 0 },
87
      categories: [],
88
      reason: 'Message too short',
89
      action: 'none',
90
    };
91
  }
92

93
  const client = getClient();
16✔
94

95
  const prompt = `You are a content moderation assistant. Analyze the following Discord message and rate it on three dimensions.
16✔
96

97
Message to analyze:
98
<message>
99
${content.slice(0, 2000)}
100
</message>
101

102
Rate the message on a scale of 0.0 to 1.0 for each category:
103
- toxicity: Hateful language, slurs, extreme negativity targeting groups or individuals
104
- spam: Repetitive content, advertisements, scam links, flooding
105
- harassment: Targeted attacks on specific individuals, threats, bullying, doxxing
106

107
Respond ONLY with valid JSON in this exact format:
108
{
109
  "toxicity": 0.0,
110
  "spam": 0.0,
111
  "harassment": 0.0,
112
  "reason": "brief explanation of main concern or 'clean' if none"
113
}`;
114

115
  const response = await client.messages.create({
16✔
116
    model: mergedConfig.model ?? DEFAULTS.model,
16!
117
    max_tokens: 256,
118
    messages: [{ role: 'user', content: prompt }],
119
  });
120

121
  const text = response.content[0]?.text ?? '{}';
14!
122

123
  let parsed;
124
  try {
17✔
125
    const jsonMatch = text.match(/\{[\s\S]*\}/);
17✔
126
    parsed = jsonMatch ? JSON.parse(jsonMatch[0]) : {};
17✔
127
  } catch {
128
    logError('AI auto-mod: failed to parse Claude response', { text });
×
129
    return {
×
130
      flagged: false,
131
      scores: { toxicity: 0, spam: 0, harassment: 0 },
132
      categories: [],
133
      reason: 'Parse error',
134
      action: 'none',
135
    };
136
  }
137

138
  const scores = {
14✔
139
    toxicity: Math.min(1, Math.max(0, Number(parsed.toxicity) || 0)),
15✔
140
    spam: Math.min(1, Math.max(0, Number(parsed.spam) || 0)),
18✔
141
    harassment: Math.min(1, Math.max(0, Number(parsed.harassment) || 0)),
18✔
142
  };
143

144
  const thresholds = mergedConfig.thresholds;
17✔
145
  const triggeredCategories = [];
17✔
146

147
  if (scores.toxicity >= thresholds.toxicity) triggeredCategories.push('toxicity');
17✔
148
  if (scores.spam >= thresholds.spam) triggeredCategories.push('spam');
14✔
149
  if (scores.harassment >= thresholds.harassment) triggeredCategories.push('harassment');
14✔
150

151
  const flagged = triggeredCategories.length > 0;
14✔
152

153
  const actionPriority = { ban: 5, kick: 4, timeout: 3, warn: 2, delete: 2, flag: 1, none: -1 };
14✔
154
  let action = 'none';
14✔
155
  for (const categoryName of triggeredCategories) {
14✔
156
    const categoryAction = mergedConfig.actions[categoryName] ?? 'flag';
17!
157
    if ((actionPriority[categoryAction] ?? 0) > (actionPriority[action] ?? -1)) {
17!
158
      action = categoryAction;
15✔
159
    }
160
  }
161

162
  return {
14✔
163
    flagged,
164
    scores,
165
    categories: triggeredCategories,
166
    reason: parsed.reason ?? 'No reason provided',
15✔
167
    action,
168
  };
169
}
170

171
/**
172
 * Send a flag embed to the moderation review channel.
173
 *
174
 * @param {import('discord.js').Message} message - The flagged Discord message
175
 * @param {import('discord.js').Client} client - Discord client
176
 * @param {Object} result - Analysis result
177
 * @param {Object} autoModConfig - AI auto-mod config
178
 */
179
async function sendFlagEmbed(message, client, result, autoModConfig) {
180
  const channelId = autoModConfig.flagChannelId;
7✔
181
  if (!channelId) return;
7!
182

183
  const flagChannel = await fetchChannelCached(client, channelId).catch(() => null);
×
184
  if (!flagChannel) return;
×
185

186
  const scoreBar = (score) => {
×
187
    const filled = Math.round(score * 10);
×
188
    return `${'█'.repeat(filled)}${'░'.repeat(10 - filled)} ${Math.round(score * 100)}%`;
×
189
  };
190

191
  const embed = new EmbedBuilder()
×
192
    .setColor(0xff6b6b)
193
    .setTitle('🤖 AI Auto-Mod Flag')
194
    .setDescription(`**Message flagged for review**\nAction taken: \`${result.action}\``)
195
    .addFields(
196
      { name: 'Author', value: `<@${message.author.id}> (${message.author.tag})`, inline: true },
197
      { name: 'Channel', value: `<#${message.channel.id}>`, inline: true },
198
      { name: 'Categories', value: result.categories.join(', ') || 'none', inline: true },
×
199
      { name: 'Message', value: (message.content || '*[no text]*').slice(0, 1024) },
7!
200
      {
201
        name: 'AI Scores',
202
        value: [
203
          `Toxicity:   ${scoreBar(result.scores.toxicity)}`,
204
          `Spam:       ${scoreBar(result.scores.spam)}`,
205
          `Harassment: ${scoreBar(result.scores.harassment)}`,
206
        ].join('\n'),
207
      },
208
      { name: 'Reason', value: result.reason.slice(0, 512) },
209
      { name: 'Jump Link', value: `[View Message](${message.url})` },
210
    )
211
    .setFooter({ text: `Message ID: ${message.id}` })
212
    .setTimestamp();
213

214
  await safeSend(flagChannel, { embeds: [embed] });
7✔
215
}
216

217
/**
218
 * Execute the moderation action on the offending message/member.
219
 *
220
 * @param {import('discord.js').Message} message - The flagged message
221
 * @param {import('discord.js').Client} client - Discord client
222
 * @param {Object} result - Analysis result
223
 * @param {Object} autoModConfig - AI auto-mod config
224
 * @param {Object} guildConfig - Full guild config
225
 */
226
async function executeAction(message, client, result, autoModConfig, _guildConfig) {
227
  const { member, guild } = message;
7✔
228

229
  const reason = `AI Auto-Mod: ${result.categories.join(', ')} — ${result.reason}`;
7✔
230
  const botId = client.user?.id ?? 'bot';
7!
231
  const botTag = client.user?.tag ?? 'Bot#0000';
7!
232

233
  if (autoModConfig.autoDelete) {
7✔
234
    await message.delete().catch(() => {});
1✔
235
  }
236

237
  switch (result.action) {
7✔
238
    case 'warn':
239
      if (!member || !guild) break;
1!
240
      await createCase(guild.id, {
1✔
241
        action: 'warn',
242
        targetId: member.user.id,
243
        targetTag: member.user.tag,
244
        moderatorId: botId,
245
        moderatorTag: botTag,
246
        reason,
247
      }).catch((err) => logError('AI auto-mod: createCase (warn) failed', { error: err?.message }));
×
248
      break;
1✔
249

250
    case 'timeout': {
251
      if (!member || !guild) break;
1!
252
      const durationMs = autoModConfig.timeoutDurationMs ?? DEFAULTS.timeoutDurationMs;
1!
253
      await member
1✔
254
        .timeout(durationMs, reason)
255
        .catch((err) =>
256
          logError('AI auto-mod: timeout failed', { userId: member.user.id, error: err?.message }),
×
257
        );
258
      await createCase(guild.id, {
1✔
259
        action: 'timeout',
260
        targetId: member.user.id,
261
        targetTag: member.user.tag,
262
        moderatorId: botId,
263
        moderatorTag: botTag,
264
        reason,
265
        duration: `${durationMs}ms`,
266
      }).catch((err) =>
267
        logError('AI auto-mod: createCase (timeout) failed', { error: err?.message }),
×
268
      );
269
      break;
1✔
270
    }
271

272
    case 'kick':
273
      if (!member || !guild) break;
1!
274
      await member
1✔
275
        .kick(reason)
276
        .catch((err) =>
277
          logError('AI auto-mod: kick failed', { userId: member.user.id, error: err?.message }),
×
278
        );
279
      await createCase(guild.id, {
1✔
280
        action: 'kick',
281
        targetId: member.user.id,
282
        targetTag: member.user.tag,
283
        moderatorId: botId,
284
        moderatorTag: botTag,
285
        reason,
286
      }).catch((err) => logError('AI auto-mod: createCase (kick) failed', { error: err?.message }));
×
287
      break;
1✔
288

289
    case 'ban':
290
      if (!member || !guild) break;
1!
291
      await guild.members
1✔
292
        .ban(member.user.id, { reason, deleteMessageSeconds: 0 })
293
        .catch((err) =>
294
          logError('AI auto-mod: ban failed', { userId: member.user.id, error: err?.message }),
×
295
        );
296
      await createCase(guild.id, {
1✔
297
        action: 'ban',
298
        targetId: member.user.id,
299
        targetTag: member.user.tag,
300
        moderatorId: botId,
301
        moderatorTag: botTag,
302
        reason,
303
      }).catch((err) => logError('AI auto-mod: createCase (ban) failed', { error: err?.message }));
×
304
      break;
1✔
305

306
    case 'delete':
307
      await message.delete().catch(() => {});
2✔
308
      break;
2✔
309

310
    default:
311
      break;
1✔
312
  }
313

314
  await sendFlagEmbed(message, client, result, autoModConfig).catch((err) =>
7✔
315
    logError('AI auto-mod: sendFlagEmbed failed', { error: err?.message }),
×
316
  );
317
}
318

319
/**
320
 * Check a Discord message with AI auto-moderation.
321
 * Returns early (no action) for bots, exempt users, or disabled config.
322
 *
323
 * @param {import('discord.js').Message} message - Incoming Discord message
324
 * @param {import('discord.js').Client} client - Discord client
325
 * @param {Object} guildConfig - Merged guild config
326
 * @returns {Promise<{flagged: boolean, action?: string, categories?: string[]}>}
327
 */
328
export async function checkAiAutoMod(message, client, guildConfig) {
329
  const autoModConfig = getAiAutoModConfig(guildConfig);
39✔
330

331
  if (!autoModConfig.enabled) {
39✔
332
    return { flagged: false };
27✔
333
  }
334

335
  if (message.author.bot) {
12✔
336
    return { flagged: false };
1✔
337
  }
338

339
  if (isExempt(message, guildConfig)) {
11✔
340
    return { flagged: false };
1✔
341
  }
342

343
  const exemptRoleIds = autoModConfig.exemptRoleIds ?? [];
10!
344
  if (exemptRoleIds.length > 0 && message.member) {
39✔
345
    const hasExemptRole = message.member.roles.cache.some((memberRole) =>
1✔
346
      exemptRoleIds.includes(memberRole.id),
×
347
    );
348
    if (hasExemptRole) return { flagged: false };
1!
349
  }
350

351
  if (!message.content || message.content.trim().length === 0) {
9✔
352
    return { flagged: false };
1✔
353
  }
354

355
  try {
8✔
356
    const result = await analyzeMessage(message.content, autoModConfig);
8✔
357

358
    if (!result.flagged) {
7!
359
      return { flagged: false };
×
360
    }
361

362
    warn('AI auto-mod: flagged message', {
7✔
363
      userId: message.author.id,
364
      guildId: message.guild?.id,
365
      categories: result.categories,
366
      action: result.action,
367
      scores: result.scores,
368
    });
369

370
    info('AI auto-mod: executing action', {
39✔
371
      action: result.action,
372
      userId: message.author.id,
373
    });
374

375
    await executeAction(message, client, result, autoModConfig, guildConfig);
39✔
376

377
    return { flagged: true, action: result.action, categories: result.categories };
7✔
378
  } catch (err) {
379
    logError('AI auto-mod: analysis failed', {
1✔
380
      channelId: message.channel.id,
381
      userId: message.author.id,
382
      error: err?.message,
383
    });
384
    return { flagged: false };
1✔
385
  }
386
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc