diff --git a/discord/.env.example b/discord/.env.example
index 48f22f2..052ff42 100644
--- a/discord/.env.example
+++ b/discord/.env.example
@@ -25,3 +25,9 @@ THROWBACK_CHANNEL="123456789012345678"
ENABLE_LOSER=1
LOSER_CHANNEL="123456789012345678"
+
+# Real name mappings (format: "username:FirstName,username2:FirstName2")
+REAL_NAMES=""
+
+# Whitelist of first names for biggest loser announcement
+LOSER_WHITELIST="James,Vincent,Myles,Sam"
diff --git a/discord/__tests__/bot.test.ts b/discord/__tests__/bot.test.ts
index 7ef03ca..428dc51 100644
--- a/discord/__tests__/bot.test.ts
+++ b/discord/__tests__/bot.test.ts
@@ -66,7 +66,7 @@ function formatLoadingMessage(emoji: string, reasoning: string): string {
];
const phrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
- let content = `${emoji} ${phrase}`;
+ let content = `${emoji}\n${phrase}`;
if (reasoning && reasoning.trim().length > 0) {
const displayReasoning =
reasoning.length > 500 ? reasoning.slice(0, 500) + '...' : reasoning;
@@ -158,8 +158,10 @@ describe('bot.ts helper functions', () => {
});
describe('isGoodResponse', () => {
+ const MAX_RESPONSE_LENGTH = 4000;
+
function isGoodResponse(response: string): boolean {
- return response.length > 0;
+ return response.length > 0 && response.length <= MAX_RESPONSE_LENGTH;
}
it('should return true for non-empty responses', () => {
@@ -170,6 +172,21 @@ describe('bot.ts helper functions', () => {
it('should return false for empty responses', () => {
expect(isGoodResponse('')).toBe(false);
});
+
+ it('should return true for responses at exactly 4000 characters', () => {
+ const response = 'a'.repeat(4000);
+ expect(isGoodResponse(response)).toBe(true);
+ });
+
+ it('should return false for responses exceeding 4000 characters', () => {
+ const response = 'a'.repeat(4001);
+ expect(isGoodResponse(response)).toBe(false);
+ });
+
+ it('should return false for responses significantly exceeding 4000 characters', () => {
+ const response = 'a'.repeat(5000);
+ expect(isGoodResponse(response)).toBe(false);
+ });
});
describe('parseLoadingEmojis', () => {
diff --git a/discord/__tests__/util.test.ts b/discord/__tests__/util.test.ts
index ffaf52c..8a01946 100644
--- a/discord/__tests__/util.test.ts
+++ b/discord/__tests__/util.test.ts
@@ -4,7 +4,15 @@
*/
import { MessageReaction, User, Message, Attachment } from 'discord.js';
-import { openDb, recordReaction, serializeMessageHistory, REAL_NAMES } from '../util';
+import {
+ openDb,
+ recordReaction,
+ serializeMessageHistory,
+ REAL_NAMES,
+ LOSER_WHITELIST,
+ parseRealNames,
+ parseLoserWhitelist,
+} from '../util';
// Mock discord.js
jest.mock('discord.js', () => {
@@ -38,11 +46,60 @@ describe('util.ts', () => {
await openDb();
});
+ describe('parseRealNames', () => {
+ it('should parse REAL_NAMES from environment variable', () => {
+ const result = parseRealNames('user1:James,user2:Vincent,user3:Myles');
+ expect(result).toEqual({
+ user1: 'James',
+ user2: 'Vincent',
+ user3: 'Myles',
+ });
+ });
+
+ it('should return empty object when input is empty', () => {
+ const result = parseRealNames('');
+ expect(result).toEqual({});
+ });
+
+ it('should handle whitespace in entries', () => {
+ const result = parseRealNames(' user1:James , user2:Vincent ');
+ expect(result).toEqual({
+ user1: 'James',
+ user2: 'Vincent',
+ });
+ });
+
+ it('should skip malformed entries', () => {
+ const result = parseRealNames('user1:James,invalidEntry,user2:Vincent');
+ expect(result).toEqual({
+ user1: 'James',
+ user2: 'Vincent',
+ });
+ });
+ });
+
+ describe('parseLoserWhitelist', () => {
+ it('should parse LOSER_WHITELIST from environment variable', () => {
+ const result = parseLoserWhitelist('James,Vincent,Myles,Sam');
+ expect(result).toEqual(['James', 'Vincent', 'Myles', 'Sam']);
+ });
+
+ it('should return empty array when input is empty', () => {
+ const result = parseLoserWhitelist('');
+ expect(result).toEqual([]);
+ });
+
+ it('should handle whitespace in entries', () => {
+ const result = parseLoserWhitelist(' James , Vincent , Myles ');
+ expect(result).toEqual(['James', 'Vincent', 'Myles']);
+ });
+ });
+
describe('REAL_NAMES', () => {
it('should contain expected username mappings', () => {
- expect(REAL_NAMES.vinso1445).toBe('Vincent Iannelli');
- expect(REAL_NAMES.scoliono).toBe('James Shiffer');
- expect(REAL_NAMES.gnuwu).toBe('David Zheng');
+ expect(REAL_NAMES.vinso1445).toBe('Vincent');
+ expect(REAL_NAMES.scoliono).toBe('James');
+ expect(REAL_NAMES.gnuwu).toBe('David');
});
it('should include Hatsune Miku', () => {
@@ -50,6 +107,21 @@ describe('util.ts', () => {
});
});
+ describe('LOSER_WHITELIST', () => {
+ it('should contain the whitelisted first names', () => {
+ expect(LOSER_WHITELIST).toContain('James');
+ expect(LOSER_WHITELIST).toContain('Vincent');
+ expect(LOSER_WHITELIST).toContain('Myles');
+ expect(LOSER_WHITELIST).toContain('Sam');
+ });
+
+ it('should not contain non-whitelisted names', () => {
+ expect(LOSER_WHITELIST).not.toContain('David');
+ expect(LOSER_WHITELIST).not.toContain('Adam');
+ expect(LOSER_WHITELIST).not.toContain('Jake');
+ });
+ });
+
describe('serializeMessageHistory', () => {
it('should return undefined for messages without content', async () => {
const mockMessage = {
@@ -93,7 +165,7 @@ describe('util.ts', () => {
const result = await serializeMessageHistory(mockMessage);
- expect(result?.name).toBe('Vincent Iannelli');
+ expect(result?.name).toBe('Vincent');
});
it('should serialize reactions', async () => {
diff --git a/discord/bot.ts b/discord/bot.ts
index 76a8a14..9bded3d 100644
--- a/discord/bot.ts
+++ b/discord/bot.ts
@@ -8,6 +8,7 @@ import {
AttachmentBuilder,
Client,
Collection,
+ EmbedBuilder,
Events,
GatewayIntentBits,
Interaction,
@@ -37,6 +38,7 @@ import {
serializeMessageHistory,
sync,
REAL_NAMES,
+ LOSER_WHITELIST,
} from './util';
import 'dotenv/config';
import { LLMConfig } from './commands/types';
@@ -73,6 +75,18 @@ function getRandomLoadingEmoji(): string {
return emojis[Math.floor(Math.random() * emojis.length)];
}
+/**
+ * Create an embed for status updates during LLM generation
+ */
+function createStatusEmbed(emoji: string, phrase: string, status: string): EmbedBuilder {
+ // Miku teal color
+ return new EmbedBuilder()
+ .setColor(0x39c5bb)
+ .setAuthor({ name: phrase })
+ .setDescription(`${emoji}\n${status}`)
+ .setTimestamp();
+}
+
/**
* Format the loading message with emoji and reasoning content
*/
@@ -171,8 +185,10 @@ function textOnlyMessages(message: Message) {
);
}
+const MAX_RESPONSE_LENGTH = 4000;
+
function isGoodResponse(response: string) {
- return response.length > 0;
+ return response.length > 0 && response.length <= MAX_RESPONSE_LENGTH;
}
async function onNewMessage(message: Message) {
@@ -233,17 +249,36 @@ async function onNewMessage(message: Message) {
// Pick a random loading emoji for this generation
const loadingEmoji = getRandomLoadingEmoji();
- // Send initial loading message
- const loadingMsg = await message.reply(formatLoadingMessage(loadingEmoji, ''));
+ // Send initial loading message with embed
+ const kawaiiPhrases = [
+ 'Hmm... let me think~ ♪',
+ 'Processing nyaa~',
+ 'Miku is thinking...',
+ 'Calculating with magic ✨',
+ 'Pondering desu~',
+ 'Umm... one moment! ♪',
+ 'Brain go brrr~',
+ 'Assembling thoughts... ♪',
+ 'Loading Miku-brain...',
+ 'Thinking hard senpai~',
+ ];
+ const loadingPhrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
+ const loadingEmbed = createStatusEmbed(loadingEmoji, loadingPhrase, 'Starting...');
+ const loadingMsg = await message.reply({ embeds: [loadingEmbed] });
// Check if provider supports streaming
const provider = state.provider!();
+ logInfo(
+ `[bot] Provider: ${provider.name()}, streaming supported: ${!!provider.requestLLMResponseStreaming}`
+ );
+
if (provider.requestLLMResponseStreaming) {
- // Use streaming with reasoning updates
+ // Use streaming - accumulate all chunks, show only the delta (newest piece) in embed
let lastUpdateTime = Date.now();
- const updateIntervalMs = 3000; // Update every ~3 seconds
- let latestReasoning = '';
- let finalContent = '';
+ const updateIntervalMs = 1500; // Update every ~1.5 seconds
+ let fullContent = '';
+ let previousContent = '';
+ let chunkCount = 0;
try {
const stream = provider.requestLLMResponseStreaming(
@@ -253,49 +288,62 @@ async function onNewMessage(message: Message) {
);
for await (const chunk of stream) {
- // Update reasoning if present
- if (chunk.reasoning) {
- latestReasoning = chunk.reasoning;
- }
-
- // Track final content
+ chunkCount++;
+ // Accumulate all content for final response
if (chunk.content) {
- finalContent = chunk.content;
+ fullContent = chunk.content;
}
- // Update message periodically (only if reasoning changed and interval passed)
+ // Update embed periodically if we have new content
const now = Date.now();
- if (latestReasoning && now - lastUpdateTime >= updateIntervalMs) {
- await loadingMsg.edit(formatLoadingMessage(loadingEmoji, latestReasoning));
- lastUpdateTime = now;
+ if (fullContent && now - lastUpdateTime >= updateIntervalMs) {
+ // Get only the delta (new piece since last update)
+ const delta = fullContent.slice(previousContent.length);
+ if (delta) {
+ // Strip newlines and show delta in code block within embed
+ const singleLine = delta.replace(/\n/g, ' ');
+ const statusEmbed = createStatusEmbed(
+ loadingEmoji,
+ loadingPhrase,
+ `Generating response...\n\`\`\`${singleLine}\`\`\``
+ );
+ await loadingMsg.edit({ embeds: [statusEmbed] });
+ lastUpdateTime = now;
+ previousContent = fullContent;
+ }
}
}
- // Generation complete - check if we got stuck in reasoning
- if (latestReasoning && !isGoodResponse(finalContent)) {
- // Token budget exhausted during reasoning, never produced final content
- const errorMsg = 'Oops! I thought so hard I ran out of tokens... owo';
- logError(
- `[bot] Token budget exhausted during reasoning! Reasoning length: ${latestReasoning.length} chars, no final content produced.`
- );
- // Show the end of the reasoning trace (where it got stuck)
- const reasoningTail =
- latestReasoning.length > 300
- ? '...' + latestReasoning.slice(-300)
- : latestReasoning;
- await loadingMsg.edit(
- `${loadingEmoji} ${errorMsg}\n\n*Reasoning trace (end):*\n> ${reasoningTail}`
- );
- } else if (isGoodResponse(finalContent)) {
- // Success - edit message with final response (no reasoning)
- await loadingMsg.edit(finalContent);
- } else {
- logWarn(`[bot] Burning bad response: "${finalContent}"`);
+ logInfo(
+ `[bot] Streaming complete: ${chunkCount} chunks, content=${fullContent.length} chars`
+ );
+
+ // Extract final response by stripping ... blocks
+ const finalResponse = fullContent.replace(/[\s\S]*?<\/think>/g, '').trim();
+
+ // Generation complete - update status and send final response
+ if (isGoodResponse(finalResponse)) {
+ // Success - delete loading embed and send final response as plaintext reply
await loadingMsg.delete();
+ await message.reply(finalResponse);
+ } else {
+ // Response exceeded max length - update embed with error message
+ const errorEmbed = createStatusEmbed(
+ loadingEmoji,
+ loadingPhrase,
+ 'Oops! The voices in my head rambled on for too long... 😭\n(Reasoning trace exceeded max token budget)'
+ );
+ await loadingMsg.edit({ embeds: [errorEmbed] });
+ logWarn(`[bot] Burning bad response: "${finalResponse}"`);
}
} catch (streamErr) {
logError(`[bot] Streaming error: ${streamErr}`);
- await loadingMsg.edit('Oops! Something went wrong while I was thinking... owo');
+ const errorEmbed = createStatusEmbed(
+ loadingEmoji,
+ loadingPhrase,
+ `Oops! Something went wrong while I was thinking... 😭\n\`${streamErr}\``
+ );
+ await loadingMsg.edit({ embeds: [errorEmbed] });
}
} else {
// Fallback to non-streaming method
@@ -306,10 +354,17 @@ async function onNewMessage(message: Message) {
);
if (isGoodResponse(response)) {
- await loadingMsg.edit(response);
- } else {
- logWarn(`[bot] Burning bad response: "${response}"`);
await loadingMsg.delete();
+ await message.reply(response);
+ } else {
+ // Response exceeded max length - update embed with error message
+ const errorEmbed = createStatusEmbed(
+ loadingEmoji,
+ loadingPhrase,
+ 'Oops! The voices in my head rambled on for too long... 😭\n(Reasoning trace exceeded max token budget)'
+ );
+ await loadingMsg.edit({ embeds: [errorEmbed] });
+ logWarn(`[bot] Burning bad response: "${response}"`);
}
}
} catch (err) {
@@ -505,10 +560,9 @@ async function scheduleBiggestLoser(firstTime = false) {
const startId = dateToSnowflake(yesterdayStart);
const endId = dateToSnowflake(yesterdayEnd);
- const deadNames = ['Adam Kazerounian', 'Jake Wong', 'David Zheng', 'Hatsune Miku'];
const realNameToCount = new Map();
for (const realName of new Set(Object.values(REAL_NAMES))) {
- if (!deadNames.includes(realName as string)) {
+ if (LOSER_WHITELIST.includes(realName as string)) {
realNameToCount.set(realName as string, 0);
}
}
diff --git a/discord/provider/openai.ts b/discord/provider/openai.ts
index f01b52e..7862aa6 100644
--- a/discord/provider/openai.ts
+++ b/discord/provider/openai.ts
@@ -144,13 +144,19 @@ export class OpenAIProvider implements LLMProvider {
let fullContent = '';
let reasoningContent = '';
+ let chunkCount = 0;
for await (const chunk of stream) {
+ chunkCount++;
const delta = chunk.choices[0]?.delta;
// Handle reasoning content if present (some models include it)
- if ('reasoning_content' in delta && delta.reasoning_content) {
- reasoningContent += delta.reasoning_content;
+ // Also check for 'reasoning' field which some OpenAI-compatible APIs use
+ const reasoningDelta =
+ ('reasoning_content' in delta && delta.reasoning_content) ||
+ ('reasoning' in delta && delta.reasoning);
+ if (reasoningDelta) {
+ reasoningContent += reasoningDelta;
yield { reasoning: reasoningContent };
}
@@ -161,6 +167,10 @@ export class OpenAIProvider implements LLMProvider {
}
}
+ logInfo(
+ `[openai] Streaming complete: ${chunkCount} chunks, ${fullContent.length} chars`
+ );
+
// Strip tags if present
if (fullContent.lastIndexOf('') > -1) {
fullContent = fullContent.slice(fullContent.lastIndexOf('') + 8);
diff --git a/discord/util.ts b/discord/util.ts
index 041562b..bae6871 100644
--- a/discord/util.ts
+++ b/discord/util.ts
@@ -26,21 +26,47 @@ import { LLMDiscordMessage } from './provider/provider';
const reactionEmojis: string[] = process.env.REACTIONS.split(',');
let db: Database = null;
-const REAL_NAMES = {
- // username to real name mapping
- vinso1445: 'Vincent Iannelli',
- scoliono: 'James Shiffer',
- drugseller88: 'James Shiffer',
- gnuwu: 'David Zheng',
- f0oby: 'Myles Linden',
- bapazheng: 'Myles Linden',
- bapabakshi: 'Myles Linden',
- keliande27: 'Myles Linden',
- '1thinker': 'Samuel Habib',
- adam28405: 'Adam Kazerounian',
- 'shibe.mp4': 'Jake Wong',
- 'Hatsune Miku': 'Hatsune Miku',
-};
+/**
+ * Parse REAL_NAMES from environment variable
+ * Format: "username:Name,username2:Name2,..."
+ */
+function parseRealNames(input?: string): Record {
+ const realNamesStr = input !== undefined ? input : process.env.REAL_NAMES || '';
+ if (!realNamesStr.trim()) {
+ return {};
+ }
+ const realNames: Record = {};
+ realNamesStr.split(',').forEach((entry) => {
+ const parts = entry.split(':');
+ if (parts.length === 2) {
+ const username = parts[0].trim();
+ const name = parts[1].trim();
+ if (username && name) {
+ realNames[username] = name;
+ }
+ }
+ });
+ return realNames;
+}
+
+const REAL_NAMES = parseRealNames();
+
+/**
+ * Parse LOSER_WHITELIST from environment variable
+ * Format: "Name1,Name2,Name3,..."
+ */
+function parseLoserWhitelist(input?: string): string[] {
+ const whitelistStr = input !== undefined ? input : process.env.LOSER_WHITELIST || '';
+ if (!whitelistStr.trim()) {
+ return [];
+ }
+ return whitelistStr
+ .split(',')
+ .map((name) => name.trim())
+ .filter((name) => name.length > 0);
+}
+
+const LOSER_WHITELIST = parseLoserWhitelist();
async function openDb() {
db = await open({
@@ -282,4 +308,7 @@ export {
serializeMessageHistory,
sync,
REAL_NAMES,
+ LOSER_WHITELIST,
+ parseRealNames,
+ parseLoserWhitelist,
};