Files
FemScoreboard/discord/bot.ts

554 lines
19 KiB
TypeScript

/**
* bot.ts
* Scans the chat for reactions and updates the leaderboard database.
*/
import {
AttachmentBuilder,
Client,
Collection,
EmbedBuilder,
Events,
GatewayIntentBits,
Interaction,
Message,
MessageFlags,
MessageReaction,
MessageType,
PartialMessageReaction,
Partials,
SlashCommandBuilder,
TextChannel,
User,
} from 'discord.js';
import fs = require('node:fs');
import path = require('node:path');
import fetch, { Blob as NodeFetchBlob } from 'node-fetch';
import tmp = require('tmp');
import { JSDOM } from 'jsdom';
import { logError, logInfo, logWarn } from '../logging';
import {
db,
openDb,
reactionEmojis,
recordReaction,
requestRVCResponse,
requestTTSResponse,
serializeMessageHistory,
sync,
REAL_NAMES,
LOSER_WHITELIST,
} from './util';
import 'dotenv/config';
import { LLMConfig } from './commands/types';
import { LLMProvider, StreamingChunk } from './provider/provider';
import {
createStatusEmbed,
getRandomLoadingEmoji,
getRandomKawaiiPhrase,
KAWAII_PHRASES,
fetchMotd,
dateToSnowflake,
sendBiggestLoserAnnouncement,
triggerThrowback,
} from './commands/helpers';
interface State {
llmconf?(): LLMConfig;
provider?(): LLMProvider;
sysprompt?(): string;
config?(): LLMConfig;
}
const state: State = {};
/**
* Parse loading emojis from environment variable
* Format: "<:clueless:123>,<a:hachune:456>,..."
* Re-exported from helpers for backwards compatibility
*/
function parseLoadingEmojis(): string[] {
const emojiStr = process.env.LOADING_EMOJIS || '';
if (!emojiStr.trim()) {
// Default fallback emojis if not configured
return ['🤔', '✨', '🎵'];
}
return emojiStr
.split(',')
.map((e) => e.trim())
.filter((e) => e.length > 0);
}
/**
* Parse reaction guild IDs from environment variable
* Format: "123456789,987654321,..."
*/
function parseReactionGuilds(): Set<string> {
const guildsStr = process.env.REACTION_GUILDS || process.env.GUILD || '';
if (!guildsStr.trim()) {
logWarn('[bot] No REACTION_GUILDS or GUILD configured, reactions will not be counted.');
return new Set();
}
const guilds = new Set<string>();
guildsStr.split(',').forEach((id) => {
const trimmed = id.trim();
if (trimmed) {
guilds.add(trimmed);
}
});
logInfo(`[bot] Configured reaction guilds: ${[...guilds].join(', ')}`);
return guilds;
}
const reactionGuilds = parseReactionGuilds();
interface CommandClient extends Client {
commands?: Collection<
string,
{ data: SlashCommandBuilder; execute: (interaction: Interaction) => Promise<void> }
>;
llmconf?: () => LLMConfig;
provider?: () => LLMProvider;
sysprompt?: () => string;
}
const client: CommandClient = new Client({
intents: [
GatewayIntentBits.Guilds,
GatewayIntentBits.GuildMessages,
GatewayIntentBits.GuildMessageReactions,
GatewayIntentBits.MessageContent,
],
partials: [Partials.Message, Partials.Channel, Partials.Reaction],
});
client.commands = new Collection();
client.once(Events.ClientReady, async () => {
logInfo('[bot] Ready.');
for (let i = 0; i < reactionEmojis.length; ++i) {
// Extract emoji name from config (handle both unicode and custom emoji formats)
const emojiConfig = reactionEmojis[i];
const emojiName = emojiConfig.includes(':') ? emojiConfig.split(':')[1] : emojiConfig;
logInfo(`[bot] util: reaction_${i + 1} = ${emojiName}`);
}
const loadingEmojis = parseLoadingEmojis();
logInfo(`[bot] Loaded ${loadingEmojis.length} loading emojis: ${loadingEmojis.join(', ')}`);
});
async function onMessageReactionChanged(
reaction: MessageReaction | PartialMessageReaction,
user: User
) {
// When a reaction is received, check if the structure is partial
if (reaction.partial) {
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
try {
await reaction.fetch();
} catch (error) {
logError('[bot] Something went wrong when fetching the reaction:', error);
// Return as `reaction.message.author` may be undefined/null
return;
}
}
if (reaction.message.partial) {
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
try {
await reaction.message.fetch();
} catch (error) {
logError('[bot] Something went wrong when fetching the message:', error);
// Return as `reaction.message.author` may be undefined/null
return;
}
}
// Only count reactions from the configured guilds
if (!reactionGuilds.has(reaction.message.guildId)) {
return;
}
// Now the message has been cached and is fully available
logInfo(
`[bot] ${reaction.message.author?.id}'s message reaction count changed: ${reaction.emoji.name}x${reaction.count}`
);
await recordReaction(<MessageReaction>reaction);
}
function textOnlyMessages(message: Message) {
return (
message.cleanContent.length > 0 &&
(message.type === MessageType.Default || message.type === MessageType.Reply)
);
}
const MAX_RESPONSE_LENGTH = 4000;
function isGoodResponse(response: string) {
return response.length > 0 && response.length <= MAX_RESPONSE_LENGTH;
}
async function onNewMessage(message: Message) {
if (message.author.bot) {
return;
}
/** First, handle audio messages */
if (message.flags.has(MessageFlags.IsVoiceMessage)) {
try {
const audio = await requestRVCResponse(message.attachments.first()!);
const audioBuf = await audio.arrayBuffer();
const audioFile = new AttachmentBuilder(Buffer.from(audioBuf)).setName('mikuified.wav');
await message.reply({
files: [audioFile],
});
} catch (err) {
logError(`[bot] Failed to generate audio message reply: ${err}`);
}
}
/** Text messages */
if (!textOnlyMessages(message)) {
return;
}
// Miku must reply when spoken to
const mustReply =
message.mentions.has(process.env.CLIENT!) ||
message.cleanContent.toLowerCase().includes('miku');
const history = await message.channel.messages.fetch({
limit: state.llmconf!().msg_context - 1,
before: message.id,
});
// change Miku's message probability depending on current message frequency
const historyMessages = [...history.values()].reverse();
//const historyTimes = historyMessages.map((m: Message) => m.createdAt.getTime());
//const historyAvgDelayMins = (historyTimes[historyTimes.length - 1] - historyTimes[0]) / 60000;
const replyChance = Math.floor((Math.random() * 1) / Number(process.env.REPLY_CHANCE)) === 0;
const willReply = mustReply || replyChance;
if (!willReply) {
return;
}
/*
const cleanHistory = historyMessages.filter(textOnlyMessages);
const cleanHistoryList = [
...cleanHistory,
message
];
*/
const cleanHistoryList = [...historyMessages, message];
try {
// Pick a random loading emoji and phrase for this generation
const loadingEmoji = getRandomLoadingEmoji();
const loadingPhrase = getRandomKawaiiPhrase();
const loadingEmbed = createStatusEmbed(loadingEmoji, loadingPhrase, 'Starting...');
const loadingMsg = await message.reply({ embeds: [loadingEmbed] });
// Check if provider supports streaming
const provider = state.provider!();
const useStreaming = provider.requestLLMResponseStreaming && state.llmconf!().streaming;
logInfo(
`[bot] Provider: ${provider.name()}, streaming supported: ${!!provider.requestLLMResponseStreaming}, streaming enabled: ${useStreaming}`
);
if (useStreaming) {
// Use streaming - accumulate all chunks, show only the delta (newest piece) in embed
let lastUpdateTime = Date.now();
const updateIntervalMs = 1500; // Update every ~1.5 seconds
let fullContent = '';
let previousContent = '';
let chunkCount = 0;
try {
const stream = provider.requestLLMResponseStreaming(
cleanHistoryList,
state.sysprompt!(),
state.llmconf!()
);
for await (const chunk of stream) {
chunkCount++;
// Accumulate all content for final response
if (chunk.content) {
fullContent = chunk.content;
}
// Update embed periodically if we have new content
const now = Date.now();
if (fullContent && now - lastUpdateTime >= updateIntervalMs) {
// Get only the delta (new piece since last update)
const delta = fullContent.slice(previousContent.length);
if (delta) {
// Strip newlines and show delta in code block within embed
const singleLine = delta.replace(/\n/g, ' ');
const statusEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Generating response...\n\`\`\`${singleLine}\`\`\``
);
await loadingMsg.edit({ embeds: [statusEmbed] });
lastUpdateTime = now;
previousContent = fullContent;
}
}
}
logInfo(
`[bot] Streaming complete: ${chunkCount} chunks, content=${fullContent.length} chars`
);
// Extract final response by stripping <think>...</think> blocks
const finalResponse = fullContent.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Generation complete - update status and send final response
if (isGoodResponse(finalResponse)) {
// Success - delete loading embed and send final response as plaintext reply
await loadingMsg.delete();
await message.reply(finalResponse);
} else {
// Response exceeded max length - update embed with error message
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
'Oops! The voices in my head rambled on for too long... 😭\n(Reasoning trace exceeded max token budget)'
);
await loadingMsg.edit({ embeds: [errorEmbed] });
logWarn(`[bot] Burning bad response: "${finalResponse}"`);
}
} catch (streamErr) {
logError(`[bot] Streaming error: ${streamErr}`);
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Oops! Something went wrong while I was thinking... 😭\n\`${streamErr}\``
);
await loadingMsg.edit({ embeds: [errorEmbed] });
}
} else {
// Fallback to non-streaming method
const response = await provider.requestLLMResponse(
cleanHistoryList,
state.sysprompt!(),
state.llmconf!()
);
if (isGoodResponse(response)) {
await loadingMsg.delete();
await message.reply(response);
} else {
// Response exceeded max length - update embed with error message
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
'Oops! The voices in my head rambled on for too long... 😭\n(Reasoning trace exceeded max token budget)'
);
await loadingMsg.edit({ embeds: [errorEmbed] });
logWarn(`[bot] Burning bad response: "${response}"`);
}
}
} catch (err) {
logError(`[bot] Error while generating LLM response: ${err}`);
}
}
async function scheduleRandomMessage(firstTime = false) {
if (!firstTime) {
if (!process.env.MOTD_CHANNEL) {
return;
}
const channel = <TextChannel>await client.channels.fetch(process.env.MOTD_CHANNEL);
if (!channel) {
logWarn(`[bot] Channel ${process.env.MOTD_CHANNEL} not found, disabling MOTD.`);
return;
}
const randomMessage = await fetchMotd();
if (randomMessage) {
try {
const audio = await requestTTSResponse(randomMessage);
const audioBuf = await audio.arrayBuffer();
const audioFile = new AttachmentBuilder(Buffer.from(audioBuf)).setName(
'mikuified.wav'
);
await channel.send({
content: randomMessage,
files: [audioFile],
});
logInfo(`[bot] Sent MOTD + TTS: ${randomMessage}`);
} catch (err) {
await channel.send(randomMessage);
logWarn(`[bot] Could not fetch MOTD TTS: ${err}`);
logInfo(`[bot] Send text MOTD: ${randomMessage}`);
}
} else {
logWarn(`[bot] Could not fetch MOTD.`);
}
}
// wait between 2-8 hours
const timeoutMins = Math.random() * 360 + 120;
const scheduledTime = new Date();
scheduledTime.setMinutes(scheduledTime.getMinutes() + timeoutMins);
logInfo(`[bot] Next MOTD: ${scheduledTime.toLocaleTimeString()}`);
setTimeout(scheduleRandomMessage, timeoutMins * 60 * 1000);
}
async function scheduleThrowback(firstTime = false) {
if (!firstTime) {
if (!process.env.THROWBACK_CHANNEL) {
logWarn('[bot] THROWBACK_CHANNEL not configured, disabling throwback.');
return;
}
const channel = <TextChannel>await client.channels.fetch(process.env.THROWBACK_CHANNEL);
if (!channel) {
logWarn(
`[bot] Channel ${process.env.THROWBACK_CHANNEL} not found, disabling throwback.`
);
return;
}
try {
await triggerThrowback(
client,
channel,
channel,
state.provider!(),
state.sysprompt!(),
state.llmconf!()
);
} catch (err) {
logError(`[bot] Error fetching throwback message: ${err}`);
}
}
// Schedule next throwback in ~24 hours (with some randomness: 22-26 hours)
const timeoutHours = 22 + Math.random() * 4;
const scheduledTime = new Date();
scheduledTime.setHours(scheduledTime.getHours() + timeoutHours);
logInfo(`[bot] Next throwback: ${scheduledTime.toLocaleString()}`);
setTimeout(scheduleThrowback, timeoutHours * 60 * 60 * 1000);
}
async function scheduleBiggestLoser(firstTime = false) {
if (!firstTime) {
if (!process.env.LOSER_CHANNEL) {
logWarn('[bot] LOSER_CHANNEL not configured, disabling biggest loser announcement.');
return;
}
const channel = <TextChannel>await client.channels.fetch(process.env.LOSER_CHANNEL);
if (channel) {
try {
const declaration = await sendBiggestLoserAnnouncement(
client,
channel,
channel.guild.id
);
logInfo(`[bot] Declaring biggest loser: ${declaration}`);
await channel.send(declaration);
await channel.send(
'https://tenor.com/view/klajumas-spit-skreplis-klajumas-skreplis-gif-13538828554330887910'
);
} catch (err) {
logError(`[bot] Error finding biggest loser: ${err}`);
}
}
}
const now = new Date();
const next9AM = new Date();
next9AM.setHours(9, 0, 0, 0);
if (now.getTime() >= next9AM.getTime()) {
next9AM.setDate(next9AM.getDate() + 1);
}
const timeout = next9AM.getTime() - now.getTime();
logInfo(`[bot] Next biggest loser announcement: ${next9AM.toLocaleString()}`);
setTimeout(scheduleBiggestLoser, timeout);
}
client.on(Events.InteractionCreate, async (interaction) => {
if (!interaction.isChatInputCommand()) return;
});
client.on(Events.MessageCreate, onNewMessage);
client.on(Events.MessageReactionAdd, onMessageReactionChanged);
client.on(Events.MessageReactionRemove, onMessageReactionChanged);
client.on(Events.InteractionCreate, async (interaction) => {
if (!interaction.isChatInputCommand()) return;
const client: CommandClient = interaction.client;
const command = client.commands?.get(interaction.commandName);
if (!command) {
logError(`[bot] No command matching ${interaction.commandName} was found.`);
return;
}
try {
await command.execute(interaction);
} catch (error) {
logError(error);
if (interaction.replied || interaction.deferred) {
await interaction.followUp({
content: 'There was an error while executing this command!',
ephemeral: true,
});
} else {
await interaction.reply({
content: 'There was an error while executing this command!',
ephemeral: true,
});
}
}
});
// startup
(async () => {
tmp.setGracefulCleanup();
logInfo('[db] Opening...');
await openDb();
logInfo('[db] Migrating...');
await db.migrate();
logInfo('[db] Ready.');
logInfo('[bot] Loading commands...');
const foldersPath = path.join(__dirname, 'commands');
const commandFolders = fs.readdirSync(foldersPath, { withFileTypes: true });
for (const folder of commandFolders) {
if (!folder.isDirectory()) {
continue;
}
const commandsPath = path.join(foldersPath, folder.name);
const commandFiles = fs.readdirSync(commandsPath).filter((file) => file.endsWith('.js'));
for (const file of commandFiles) {
const filePath = path.join(commandsPath, file);
const command = require(filePath);
client.commands?.set(command.data.name, command);
if (command.state) {
state[command.data.name] = command.state;
}
logInfo(`[bot] Found command: /${command.data.name}`);
}
}
// Attach shared state to client for commands to access
client.llmconf = () => state.llmconf?.() ?? state.config?.();
client.provider = () => state.provider?.();
client.sysprompt = () => state.sysprompt?.();
logInfo('[bot] Logging in...');
await client.login(process.env.TOKEN);
if (process.env.ENABLE_MOTD) {
await scheduleRandomMessage(true);
}
if (process.env.ENABLE_THROWBACK) {
await scheduleThrowback(true);
}
if (process.env.ENABLE_LOSER) {
await scheduleBiggestLoser(true);
}
await sync(client.guilds);
})();