Files
FemScoreboard/discord/bot.ts

816 lines
32 KiB
TypeScript

/**
* bot.ts
* Scans the chat for reactions and updates the leaderboard database.
*/
import {
Attachment,
AttachmentBuilder,
Client,
Collection,
EmbedBuilder,
Events,
GatewayIntentBits,
Interaction,
Message,
MessageFlags,
MessageReaction,
MessageType,
PartialMessageReaction,
Partials,
SlashCommandBuilder,
TextChannel,
User,
} from 'discord.js';
import fs = require('node:fs');
import path = require('node:path');
import fetch, { Blob as NodeFetchBlob } from 'node-fetch';
import FormData = require('form-data');
import tmp = require('tmp');
import { JSDOM } from 'jsdom';
import { logError, logInfo, logWarn } from '../logging';
import {
db,
openDb,
reactionEmojis,
recordReaction,
requestTTSResponse,
serializeMessageHistory,
sync,
REAL_NAMES,
LOSER_WHITELIST,
} from './util';
import 'dotenv/config';
import { LLMConfig } from './commands/types';
import { LLMProvider, StreamingChunk } from './provider/provider';
interface State {
llmconf?(): LLMConfig;
provider?(): LLMProvider;
sysprompt?(): string;
}
const state: State = {};
/**
* Parse loading emojis from environment variable
* Format: "<:clueless:123>,<a:hachune:456>,..."
*/
function parseLoadingEmojis(): string[] {
const emojiStr = process.env.LOADING_EMOJIS || '';
if (!emojiStr.trim()) {
// Default fallback emojis if not configured
return ['🤔', '✨', '🎵'];
}
return emojiStr
.split(',')
.map((e) => e.trim())
.filter((e) => e.length > 0);
}
/**
* Pick a random loading emoji from the configured list
*/
function getRandomLoadingEmoji(): string {
const emojis = parseLoadingEmojis();
return emojis[Math.floor(Math.random() * emojis.length)];
}
/**
* Create an embed for status updates during LLM generation
*/
function createStatusEmbed(emoji: string, phrase: string, status: string): EmbedBuilder {
// Miku teal color
return new EmbedBuilder()
.setColor(0x39c5bb)
.setAuthor({ name: phrase })
.setDescription(`${emoji}\n${status}`)
.setTimestamp();
}
/**
* Format the loading message with emoji and reasoning content
*/
function formatLoadingMessage(emoji: string, reasoning: string): string {
const kawaiiPhrases = [
'Hmm... let me think~ ♪',
'Processing nyaa~',
'Miku is thinking...',
'Calculating with magic ✨',
'Pondering desu~',
'Umm... one moment! ♪',
'Brain go brrr~',
'Assembling thoughts... ♪',
'Loading Miku-brain...',
'Thinking hard senpai~',
];
const phrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
let content = `${emoji} ${phrase}`;
if (reasoning && reasoning.trim().length > 0) {
// Truncate reasoning if too long for display
const displayReasoning =
reasoning.length > 500 ? reasoning.slice(0, 500) + '...' : reasoning;
content += `\n\n> ${displayReasoning}`;
}
return content;
}
interface CommandClient extends Client {
commands?: Collection<
string,
{ data: SlashCommandBuilder; execute: (interaction: Interaction) => Promise<void> }
>;
}
const client: CommandClient = new Client({
intents: [
GatewayIntentBits.Guilds,
GatewayIntentBits.GuildMessages,
GatewayIntentBits.GuildMessageReactions,
GatewayIntentBits.MessageContent,
],
partials: [Partials.Message, Partials.Channel, Partials.Reaction],
});
client.commands = new Collection();
client.once(Events.ClientReady, async () => {
logInfo('[bot] Ready.');
for (let i = 0; i < reactionEmojis.length; ++i) {
// Extract emoji name from config (handle both unicode and custom emoji formats)
const emojiConfig = reactionEmojis[i];
const emojiName = emojiConfig.includes(':') ? emojiConfig.split(':')[1] : emojiConfig;
logInfo(`[bot] util: reaction_${i + 1} = ${emojiName}`);
}
const loadingEmojis = parseLoadingEmojis();
logInfo(`[bot] Loaded ${loadingEmojis.length} loading emojis: ${loadingEmojis.join(', ')}`);
});
async function onMessageReactionChanged(
reaction: MessageReaction | PartialMessageReaction,
user: User
) {
// When a reaction is received, check if the structure is partial
if (reaction.partial) {
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
try {
await reaction.fetch();
} catch (error) {
logError('[bot] Something went wrong when fetching the reaction:', error);
// Return as `reaction.message.author` may be undefined/null
return;
}
}
if (reaction.message.partial) {
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
try {
await reaction.message.fetch();
} catch (error) {
logError('[bot] Something went wrong when fetching the message:', error);
// Return as `reaction.message.author` may be undefined/null
return;
}
}
// Now the message has been cached and is fully available
logInfo(
`[bot] ${reaction.message.author?.id}'s message reaction count changed: ${reaction.emoji.name}x${reaction.count}`
);
await recordReaction(<MessageReaction>reaction);
}
function textOnlyMessages(message: Message) {
return (
message.cleanContent.length > 0 &&
(message.type === MessageType.Default || message.type === MessageType.Reply)
);
}
const MAX_RESPONSE_LENGTH = 4000;
function isGoodResponse(response: string) {
return response.length > 0 && response.length <= MAX_RESPONSE_LENGTH;
}
async function onNewMessage(message: Message) {
if (message.author.bot) {
return;
}
/** First, handle audio messages */
if (message.flags.has(MessageFlags.IsVoiceMessage)) {
try {
const audio = await requestRVCResponse(message.attachments.first()!);
const audioBuf = await audio.arrayBuffer();
const audioFile = new AttachmentBuilder(Buffer.from(audioBuf)).setName('mikuified.wav');
await message.reply({
files: [audioFile],
});
} catch (err) {
logError(`[bot] Failed to generate audio message reply: ${err}`);
}
}
/** Text messages */
if (!textOnlyMessages(message)) {
return;
}
// Miku must reply when spoken to
const mustReply =
message.mentions.has(process.env.CLIENT!) ||
message.cleanContent.toLowerCase().includes('miku');
const history = await message.channel.messages.fetch({
limit: state.llmconf!().msg_context - 1,
before: message.id,
});
// change Miku's message probability depending on current message frequency
const historyMessages = [...history.values()].reverse();
//const historyTimes = historyMessages.map((m: Message) => m.createdAt.getTime());
//const historyAvgDelayMins = (historyTimes[historyTimes.length - 1] - historyTimes[0]) / 60000;
const replyChance = Math.floor((Math.random() * 1) / Number(process.env.REPLY_CHANCE)) === 0;
const willReply = mustReply || replyChance;
if (!willReply) {
return;
}
/*
const cleanHistory = historyMessages.filter(textOnlyMessages);
const cleanHistoryList = [
...cleanHistory,
message
];
*/
const cleanHistoryList = [...historyMessages, message];
try {
// Pick a random loading emoji for this generation
const loadingEmoji = getRandomLoadingEmoji();
// Send initial loading message with embed
const kawaiiPhrases = [
'Hmm... let me think~ ♪',
'Processing nyaa~',
'Miku is thinking...',
'Calculating with magic ✨',
'Pondering desu~',
'Umm... one moment! ♪',
'Brain go brrr~',
'Assembling thoughts... ♪',
'Loading Miku-brain...',
'Thinking hard senpai~',
];
const loadingPhrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
const loadingEmbed = createStatusEmbed(loadingEmoji, loadingPhrase, 'Starting...');
const loadingMsg = await message.reply({ embeds: [loadingEmbed] });
// Check if provider supports streaming
const provider = state.provider!();
logInfo(
`[bot] Provider: ${provider.name()}, streaming supported: ${!!provider.requestLLMResponseStreaming}`
);
if (provider.requestLLMResponseStreaming) {
// Use streaming - accumulate all chunks, show only the delta (newest piece) in embed
let lastUpdateTime = Date.now();
const updateIntervalMs = 1500; // Update every ~1.5 seconds
let fullContent = '';
let previousContent = '';
let chunkCount = 0;
try {
const stream = provider.requestLLMResponseStreaming(
cleanHistoryList,
state.sysprompt!(),
state.llmconf!()
);
for await (const chunk of stream) {
chunkCount++;
// Accumulate all content for final response
if (chunk.content) {
fullContent = chunk.content;
}
// Update embed periodically if we have new content
const now = Date.now();
if (fullContent && now - lastUpdateTime >= updateIntervalMs) {
// Get only the delta (new piece since last update)
const delta = fullContent.slice(previousContent.length);
if (delta) {
// Strip newlines and show delta in code block within embed
const singleLine = delta.replace(/\n/g, ' ');
const statusEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Generating response...\n\`\`\`${singleLine}\`\`\``
);
await loadingMsg.edit({ embeds: [statusEmbed] });
lastUpdateTime = now;
previousContent = fullContent;
}
}
}
logInfo(
`[bot] Streaming complete: ${chunkCount} chunks, content=${fullContent.length} chars`
);
// Extract final response by stripping <think>...</think> blocks
const finalResponse = fullContent.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Generation complete - update status and send final response
if (isGoodResponse(finalResponse)) {
// Success - delete loading embed and send final response as plaintext reply
await loadingMsg.delete();
await message.reply(finalResponse);
} else {
// Response exceeded max length - update embed with error message
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
'Oops! The voices in my head rambled on for too long... 😭\n(Reasoning trace exceeded max token budget)'
);
await loadingMsg.edit({ embeds: [errorEmbed] });
logWarn(`[bot] Burning bad response: "${finalResponse}"`);
}
} catch (streamErr) {
logError(`[bot] Streaming error: ${streamErr}`);
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Oops! Something went wrong while I was thinking... 😭\n\`${streamErr}\``
);
await loadingMsg.edit({ embeds: [errorEmbed] });
}
} else {
// Fallback to non-streaming method
const response = await provider.requestLLMResponse(
cleanHistoryList,
state.sysprompt!(),
state.llmconf!()
);
if (isGoodResponse(response)) {
await loadingMsg.delete();
await message.reply(response);
} else {
// Response exceeded max length - update embed with error message
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
'Oops! The voices in my head rambled on for too long... 😭\n(Reasoning trace exceeded max token budget)'
);
await loadingMsg.edit({ embeds: [errorEmbed] });
logWarn(`[bot] Burning bad response: "${response}"`);
}
}
} catch (err) {
logError(`[bot] Error while generating LLM response: ${err}`);
}
}
async function fetchMotd() {
try {
const res = await fetch(process.env.MOTD_HREF);
const xml = await res.text();
const parser = new JSDOM(xml);
const doc = parser.window.document;
const el = doc.querySelector(process.env.MOTD_QUERY);
return el ? el.textContent : null;
} catch (err) {
logWarn('[bot] Failed to fetch MOTD; is the booru down?');
}
}
async function requestRVCResponse(src: Attachment): Promise<NodeFetchBlob> {
logInfo(`[bot] Downloading audio message ${src.url}`);
const srcres = await fetch(src.url);
const srcbuf = await srcres.arrayBuffer();
const tmpFile = tmp.fileSync();
const tmpFileName = tmpFile.name;
fs.writeFileSync(tmpFileName, Buffer.from(srcbuf));
logInfo(`[bot] Got audio file: ${srcbuf.byteLength} bytes`);
const queryParams = new URLSearchParams();
queryParams.append('token', process.env.LLM_TOKEN || '');
const fd = new FormData();
fd.append('file', fs.readFileSync(tmpFileName), 'voice-message.ogg');
const rvcEndpoint = `${process.env.RVC_HOST}/rvc?${queryParams.toString()}`;
logInfo(`[bot] Requesting RVC response for ${src.id}`);
const res = await fetch(rvcEndpoint, {
method: 'POST',
body: fd,
});
const resContents = await res.blob();
return resContents;
}
async function scheduleRandomMessage(firstTime = false) {
if (!firstTime) {
if (!process.env.MOTD_CHANNEL) {
return;
}
const channel = <TextChannel>await client.channels.fetch(process.env.MOTD_CHANNEL);
if (!channel) {
logWarn(`[bot] Channel ${process.env.MOTD_CHANNEL} not found, disabling MOTD.`);
return;
}
const randomMessage = await fetchMotd();
if (randomMessage) {
try {
const audio = await requestTTSResponse(randomMessage);
const audioBuf = await audio.arrayBuffer();
const audioFile = new AttachmentBuilder(Buffer.from(audioBuf)).setName(
'mikuified.wav'
);
await channel.send({
content: randomMessage,
files: [audioFile],
});
logInfo(`[bot] Sent MOTD + TTS: ${randomMessage}`);
} catch (err) {
await channel.send(randomMessage);
logWarn(`[bot] Could not fetch MOTD TTS: ${err}`);
logInfo(`[bot] Send text MOTD: ${randomMessage}`);
}
} else {
logWarn(`[bot] Could not fetch MOTD.`);
}
}
// wait between 2-8 hours
const timeoutMins = Math.random() * 360 + 120;
const scheduledTime = new Date();
scheduledTime.setMinutes(scheduledTime.getMinutes() + timeoutMins);
logInfo(`[bot] Next MOTD: ${scheduledTime.toLocaleTimeString()}`);
setTimeout(scheduleRandomMessage, timeoutMins * 60 * 1000);
}
/**
* Convert a Date to a Discord snowflake ID (approximate)
* Discord epoch: 2015-01-01T00:00:00.000Z
*/
function dateToSnowflake(date: Date): string {
const DISCORD_EPOCH = 1420070400000n;
const timestamp = BigInt(date.getTime());
const snowflake = (timestamp - DISCORD_EPOCH) << 22n;
return snowflake.toString();
}
async function scheduleThrowback(firstTime = false) {
if (!firstTime) {
if (!process.env.THROWBACK_CHANNEL) {
logWarn('[bot] THROWBACK_CHANNEL not configured, disabling throwback.');
return;
}
const channel = <TextChannel>await client.channels.fetch(process.env.THROWBACK_CHANNEL);
if (!channel) {
logWarn(
`[bot] Channel ${process.env.THROWBACK_CHANNEL} not found, disabling throwback.`
);
return;
}
try {
// Calculate date from 1 year ago
const oneYearAgo = new Date();
oneYearAgo.setFullYear(oneYearAgo.getFullYear() - 1);
// Convert to approximate snowflake ID
const aroundSnowflake = dateToSnowflake(oneYearAgo);
logInfo(
`[bot] Fetching messages around ${oneYearAgo.toISOString()} (snowflake: ${aroundSnowflake})`
);
// Fetch messages around that time
const messages = await channel.messages.fetch({
around: aroundSnowflake,
limit: 50,
});
// Filter to only text messages from non-bots
const textMessages = messages.filter(
(m) =>
!m.author.bot &&
m.cleanContent.length > 0 &&
(m.type === MessageType.Default || m.type === MessageType.Reply)
);
if (textMessages.size === 0) {
logWarn('[bot] No messages found from 1 year ago, skipping throwback.');
} else {
// Pick a random message
const messagesArray = [...textMessages.values()];
const randomMsg = messagesArray[Math.floor(Math.random() * messagesArray.length)];
logInfo(
`[bot] Selected throwback message from ${randomMsg.author.username}: "${randomMsg.cleanContent}"`
);
// Generate LLM response using the standard system prompt
if ('sendTyping' in channel) {
await channel.sendTyping();
}
const llmResponse = await state.provider!().requestLLMResponse(
[randomMsg],
state.sysprompt!(),
state.llmconf!()
);
// Reply directly to the original message
await randomMsg.reply(llmResponse);
logInfo(`[bot] Sent throwback reply: ${llmResponse}`);
}
} catch (err) {
logError(`[bot] Error fetching throwback message: ${err}`);
}
}
// Schedule next throwback in ~24 hours (with some randomness: 22-26 hours)
const timeoutHours = 22 + Math.random() * 4;
const scheduledTime = new Date();
scheduledTime.setHours(scheduledTime.getHours() + timeoutHours);
logInfo(`[bot] Next throwback: ${scheduledTime.toLocaleString()}`);
setTimeout(scheduleThrowback, timeoutHours * 60 * 60 * 1000);
}
async function scheduleBiggestLoser(firstTime = false) {
if (!firstTime) {
if (!process.env.LOSER_CHANNEL) {
logWarn('[bot] LOSER_CHANNEL not configured, disabling biggest loser announcement.');
return;
}
const channel = <TextChannel>await client.channels.fetch(process.env.LOSER_CHANNEL);
if (channel) {
try {
const yesterdayStart = new Date();
yesterdayStart.setDate(yesterdayStart.getDate() - 1);
yesterdayStart.setHours(0, 0, 0, 0);
const yesterdayEnd = new Date();
yesterdayEnd.setHours(0, 0, 0, 0);
const startId = dateToSnowflake(yesterdayStart);
const endId = dateToSnowflake(yesterdayEnd);
const realNameToCount = new Map<string, number>();
for (const realName of new Set(Object.values(REAL_NAMES))) {
if (LOSER_WHITELIST.includes(realName as string)) {
realNameToCount.set(realName as string, 0);
}
}
const guild = await client.guilds.fetch(process.env.GUILD as string);
if (guild) {
const channels = await guild.channels.fetch();
const textChannels = channels.filter((c: any) => c && c.isTextBased());
for (const [_, textChannel] of textChannels) {
let lastId = startId;
while (true) {
try {
const messages = await (textChannel as any).messages.fetch({
after: lastId,
limit: 100,
});
if (messages.size === 0) break;
let maxId = lastId;
for (const [msgId, msg] of messages) {
if (BigInt(msgId) > BigInt(maxId)) maxId = msgId;
if (BigInt(msgId) >= BigInt(endId)) continue;
if (
!msg.author.bot &&
(REAL_NAMES as any)[msg.author.username]
) {
const realName = (REAL_NAMES as any)[msg.author.username];
if (realNameToCount.has(realName)) {
realNameToCount.set(
realName,
realNameToCount.get(realName)! + 1
);
}
}
}
lastId = maxId;
if (BigInt(lastId) >= BigInt(endId) || messages.size < 100) break;
} catch (e) {
logWarn(`[bot] Error fetching from channel: ${e}`);
break;
}
}
}
}
let minCount = Infinity;
let biggestLosers: string[] = [];
for (const [realName, count] of realNameToCount.entries()) {
if (count < minCount) {
minCount = count;
biggestLosers = [realName];
} else if (count === minCount) {
biggestLosers.push(realName);
}
}
if (biggestLosers.length > 0) {
biggestLosers.sort();
// Track individual streaks per person
const streakFile = path.join(__dirname, 'biggest_loser_streaks.json');
let streaks: Record<string, number> = {};
if (fs.existsSync(streakFile)) {
try {
streaks = JSON.parse(fs.readFileSync(streakFile, 'utf8'));
} catch (e) {
logWarn(`[bot] Failed to read streak data: ${e}`);
streaks = {};
}
}
// Update streaks: continue if this person was in yesterday's losers, otherwise reset to 1
const newStreaks: Record<string, number> = {};
for (const name of biggestLosers) {
newStreaks[name] = (streaks[name] || 0) + 1;
}
fs.writeFileSync(streakFile, JSON.stringify(newStreaks));
const firstNames = biggestLosers.map((n) => n.split(' ')[0]);
let joinedNames = firstNames[0];
if (firstNames.length === 2) {
joinedNames = `${firstNames[0]} and ${firstNames[1]}`;
} else if (firstNames.length > 2) {
joinedNames = `${firstNames.slice(0, -1).join(', ')}, and ${firstNames[firstNames.length - 1]}`;
}
// Build message with individual streak info
const isPlural = biggestLosers.length > 1;
const loserWord = isPlural ? 'losers' : 'loser';
const isAre = isPlural ? 'are' : 'is';
let declaration: string;
if (isPlural) {
// For multiple losers, list each with their streak
const streakParts = biggestLosers.map((name, idx) => {
const firstName = firstNames[idx];
const dayWord = newStreaks[name] === 1 ? 'day' : 'days';
return `${firstName} (${newStreaks[name]} ${dayWord} in a row)`;
});
let streakDetails = streakParts[0];
if (streakParts.length === 2) {
streakDetails = `${streakParts[0]} and ${streakParts[1]}`;
} else if (streakParts.length > 2) {
streakDetails = `${streakParts.slice(0, -1).join(', ')}, and ${streakParts[streakParts.length - 1]}`;
}
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! Streaks: ${streakDetails}.`;
} else {
const dayWord = newStreaks[biggestLosers[0]] === 1 ? 'day' : 'days';
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! They have been the biggest ${loserWord} for ${newStreaks[biggestLosers[0]]} ${dayWord} in a row.`;
}
try {
let pingTags: string[] = [];
if (guild) {
// Build a reverse map from real name to Discord user IDs
const realNameToUserIds = new Map<string, string[]>();
for (const [username, realName] of Object.entries(REAL_NAMES)) {
if (!realNameToUserIds.has(realName)) {
realNameToUserIds.set(realName, []);
}
realNameToUserIds.get(realName)!.push(username);
}
// Fetch members for the usernames we need to ping
const usernamesToCheck = new Set<string>();
for (const realName of biggestLosers) {
const usernames = realNameToUserIds.get(realName);
if (usernames) {
usernames.forEach((u) => usernamesToCheck.add(u));
}
}
// Try to fetch members (with a shorter timeout to avoid hanging)
const members = await guild.members.fetch({ time: 10000 });
for (const [_, member] of members) {
const username = member.user.username;
if (usernamesToCheck.has(username)) {
const tag = `<@${member.user.id}>`;
if (!pingTags.includes(tag)) {
pingTags.push(tag);
}
}
}
}
if (pingTags.length > 0) {
declaration += `\n${pingTags.join(' ')}`;
}
} catch (e) {
logWarn(`[bot] Error fetching members for ping: ${e}`);
}
logInfo(`[bot] Declaring biggest loser: ${declaration}`);
await channel.send(declaration);
await channel.send(
'https://tenor.com/view/klajumas-spit-skreplis-klajumas-skreplis-gif-13538828554330887910'
);
}
} catch (err) {
logError(`[bot] Error finding biggest loser: ${err}`);
}
}
}
const now = new Date();
const next9AM = new Date();
next9AM.setHours(9, 0, 0, 0);
if (now.getTime() >= next9AM.getTime()) {
next9AM.setDate(next9AM.getDate() + 1);
}
const timeout = next9AM.getTime() - now.getTime();
logInfo(`[bot] Next biggest loser announcement: ${next9AM.toLocaleString()}`);
setTimeout(scheduleBiggestLoser, timeout);
}
client.on(Events.InteractionCreate, async (interaction) => {
if (!interaction.isChatInputCommand()) return;
});
client.on(Events.MessageCreate, onNewMessage);
client.on(Events.MessageReactionAdd, onMessageReactionChanged);
client.on(Events.MessageReactionRemove, onMessageReactionChanged);
client.on(Events.InteractionCreate, async (interaction) => {
if (!interaction.isChatInputCommand()) return;
const client: CommandClient = interaction.client;
const command = client.commands?.get(interaction.commandName);
if (!command) {
logError(`[bot] No command matching ${interaction.commandName} was found.`);
return;
}
try {
await command.execute(interaction);
} catch (error) {
logError(error);
if (interaction.replied || interaction.deferred) {
await interaction.followUp({
content: 'There was an error while executing this command!',
ephemeral: true,
});
} else {
await interaction.reply({
content: 'There was an error while executing this command!',
ephemeral: true,
});
}
}
});
// startup
(async () => {
tmp.setGracefulCleanup();
logInfo('[db] Opening...');
await openDb();
logInfo('[db] Migrating...');
await db.migrate();
logInfo('[db] Ready.');
logInfo('[bot] Loading commands...');
const foldersPath = path.join(__dirname, 'commands');
const commandFolders = fs.readdirSync(foldersPath, { withFileTypes: true });
for (const folder of commandFolders) {
if (!folder.isDirectory()) {
continue;
}
const commandsPath = path.join(foldersPath, folder.name);
const commandFiles = fs.readdirSync(commandsPath).filter((file) => file.endsWith('.js'));
for (const file of commandFiles) {
const filePath = path.join(commandsPath, file);
const command = require(filePath);
client.commands?.set(command.data.name, command);
if (command.state) {
state[command.data.name] = command.state;
}
logInfo(`[bot] Found command: /${command.data.name}`);
}
}
logInfo('[bot] Logging in...');
await client.login(process.env.TOKEN);
if (process.env.ENABLE_MOTD) {
await scheduleRandomMessage(true);
}
if (process.env.ENABLE_THROWBACK) {
await scheduleThrowback(true);
}
if (process.env.ENABLE_LOSER) {
await scheduleBiggestLoser(true);
}
await sync(client.guilds);
})();