FemScoreboard/discord/bot.ts

167 lines
5.4 KiB
TypeScript
Raw Normal View History

2023-10-07 22:46:02 -07:00
/**
* bot.ts
* Scans the chat for reactions and updates the leaderboard database.
*/
2023-10-08 18:59:04 -07:00
import {
Client,
2024-02-06 16:45:55 -08:00
Collection,
2023-10-08 18:59:04 -07:00
Events,
GatewayIntentBits,
2024-02-06 16:45:55 -08:00
Message,
2023-10-08 18:59:04 -07:00
MessageReaction,
PartialMessageReaction,
Partials,
TextChannel,
User
} from 'discord.js';
2024-02-06 16:45:55 -08:00
import { ChatMessage, llamacpp, streamText } from 'modelfusion';
2024-02-06 15:23:26 -08:00
import fetch from 'node-fetch';
import { JSDOM } from 'jsdom';
2023-10-08 19:20:00 -07:00
import { logError, logInfo } from '../logging';
2023-10-08 19:10:47 -07:00
import {
db,
openDb,
reactionEmojis,
recordReaction,
sync
} from './util';
2023-10-07 22:46:02 -07:00
const client = new Client({
intents: [GatewayIntentBits.Guilds, GatewayIntentBits.GuildMessages, GatewayIntentBits.GuildMessageReactions],
partials: [Partials.Message, Partials.Channel, Partials.Reaction],
});
2024-02-06 16:45:55 -08:00
const llamaCppServer = llamacpp.Api({
baseUrl: {
host: "localhost",
port: process.env.LLAMACPP_PORT,
}
});
2023-10-07 22:46:02 -07:00
client.once(Events.ClientReady, async () => {
2023-10-08 19:10:47 -07:00
logInfo('[bot] Ready.');
2023-10-07 22:46:02 -07:00
for (let i = 0; i < reactionEmojis.length; ++i)
2023-10-08 19:10:47 -07:00
logInfo(`[bot] config: reaction_${i + 1} = ${reactionEmojis[i]}`);
2023-10-07 22:46:02 -07:00
});
2023-10-08 19:10:47 -07:00
2023-10-07 22:46:02 -07:00
async function onMessageReactionChanged(reaction: MessageReaction | PartialMessageReaction, user: User)
{
// When a reaction is received, check if the structure is partial
if (reaction.partial) {
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
try {
await reaction.fetch();
} catch (error) {
2023-10-08 19:10:47 -07:00
logError('[bot] Something went wrong when fetching the reaction:', error);
2023-10-07 22:46:02 -07:00
// Return as `reaction.message.author` may be undefined/null
return;
}
}
if (reaction.message.partial) {
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
try {
await reaction.message.fetch();
} catch (error) {
2023-10-08 19:10:47 -07:00
logError('[bot] Something went wrong when fetching the message:', error);
2023-10-07 22:46:02 -07:00
// Return as `reaction.message.author` may be undefined/null
return;
}
}
// Now the message has been cached and is fully available
2023-10-08 19:10:47 -07:00
logInfo(`[bot] ${reaction.message.author.id}'s message reaction count changed: ${reaction.emoji.name}x${reaction.count}`);
2023-10-07 22:46:02 -07:00
await recordReaction(<MessageReaction> reaction);
}
2023-10-08 18:59:04 -07:00
async function fetchMotd()
{
const res = await fetch(process.env.MOTD_HREF);
const xml = await res.text();
2024-02-06 15:23:26 -08:00
const parser = new JSDOM(xml);
const doc = parser.window.document;
2023-10-08 18:59:04 -07:00
return doc.querySelector(process.env.MOTD_QUERY).textContent;
}
async function scheduleRandomMessage(firstTime = false)
{
if (!firstTime) {
const channel = <TextChannel> await client.channels.fetch(process.env.MOTD_CHANNEL);
if (!channel) {
console.warn(`[bot] Channel ${process.env.MOTD_CHANNEL} not found, disabling MOTD.`);
return;
}
2023-10-08 18:59:04 -07:00
const randomMessage = await fetchMotd();
await channel.send(randomMessage);
2023-10-08 19:10:47 -07:00
logInfo(`[bot] Sent MOTD: ${randomMessage}`);
2023-10-08 18:59:04 -07:00
}
// wait between 2-8 hours
const timeoutMins = Math.random() * 360 + 120;
const scheduledTime = new Date();
scheduledTime.setMinutes(scheduledTime.getMinutes() + timeoutMins);
2023-10-08 19:15:00 -07:00
logInfo(`[bot] Next MOTD: ${scheduledTime.toLocaleTimeString()}`);
2023-10-08 18:59:04 -07:00
setTimeout(scheduleRandomMessage, timeoutMins * 60 * 1000);
}
2024-02-06 16:45:55 -08:00
async function llamaChat(history: Collection<string, Message>)
{
const textStream = await streamText({
model: llamacpp
.CompletionTextGenerator({
promptTemplate: llamacpp.prompt.Llama2,
maxGenerationTokens: 400,
temperature: 0.7,
api: llamaCppServer
})
.withChatPrompt(),
prompt: {
system: "You are Hatsune Miku, the famous 16-year-old Japanese virtual singer from Crypton Future Media. You have a penchant for politically incorrect humor, and are making banter with your like-minded friends.",
messages: history.map<ChatMessage>(msg => ({
"role": msg.author.id === client.user.id ? "assistant" : "user",
"content": msg.content
}))
}
});
let outMsg: Message;
try {
for await (const textPart of textStream) {
if (!outMsg) {
outMsg = await history.first().channel.send(textPart);
} else {
await outMsg.edit(outMsg.content + textPart);
}
}
} catch (err) {
console.error(err);
await history.first().channel.send(err.toString());
}
}
client.on(Events.InteractionCreate, async interaction => {
if (!interaction.isChatInputCommand()) return;
console.log(interaction);
const history = await interaction.channel.messages.fetch({ limit: 5 });
await llamaChat(history);
});
2023-10-07 22:46:02 -07:00
client.on(Events.MessageReactionAdd, onMessageReactionChanged);
client.on(Events.MessageReactionRemove, onMessageReactionChanged);
async function startup() {
2023-10-08 19:10:47 -07:00
logInfo("[db] Opening...");
2023-10-07 22:46:02 -07:00
await openDb();
2023-10-08 19:10:47 -07:00
logInfo("[db] Migrating...");
2023-10-07 22:46:02 -07:00
await db.migrate();
2023-10-08 19:10:47 -07:00
logInfo("[db] Ready.");
logInfo("[bot] Logging in...");
2023-10-07 22:46:02 -07:00
await client.login(process.env.TOKEN);
await sync(client.guilds);
2023-10-08 18:59:04 -07:00
if (process.env.ENABLE_MOTD) {
await scheduleRandomMessage(true);
}
2023-10-07 22:46:02 -07:00
}
startup();