mirror of
https://git.femboyfinancial.jp/james/FemScoreboard.git
synced 2024-11-22 02:32:02 -08:00
165 lines
5.3 KiB
TypeScript
165 lines
5.3 KiB
TypeScript
/**
|
|
* bot.ts
|
|
* Scans the chat for reactions and updates the leaderboard database.
|
|
*/
|
|
|
|
import {
|
|
Client,
|
|
Collection,
|
|
Events,
|
|
GatewayIntentBits,
|
|
Message,
|
|
MessageReaction,
|
|
PartialMessageReaction,
|
|
Partials,
|
|
TextChannel,
|
|
User
|
|
} from 'discord.js';
|
|
import { ChatMessage, llamacpp, streamText } from 'modelfusion';
|
|
import { logError, logInfo } from '../logging';
|
|
import {
|
|
db,
|
|
openDb,
|
|
reactionEmojis,
|
|
recordReaction,
|
|
sync
|
|
} from './util';
|
|
|
|
|
|
const client = new Client({
|
|
intents: [GatewayIntentBits.Guilds, GatewayIntentBits.GuildMessages, GatewayIntentBits.GuildMessageReactions],
|
|
partials: [Partials.Message, Partials.Channel, Partials.Reaction],
|
|
});
|
|
|
|
const llamaCppServer = llamacpp.Api({
|
|
baseUrl: {
|
|
host: "localhost",
|
|
port: process.env.LLAMACPP_PORT,
|
|
}
|
|
});
|
|
|
|
client.once(Events.ClientReady, async () => {
|
|
logInfo('[bot] Ready.');
|
|
for (let i = 0; i < reactionEmojis.length; ++i)
|
|
logInfo(`[bot] config: reaction_${i + 1} = ${reactionEmojis[i]}`);
|
|
});
|
|
|
|
|
|
async function onMessageReactionChanged(reaction: MessageReaction | PartialMessageReaction, user: User)
|
|
{
|
|
// When a reaction is received, check if the structure is partial
|
|
if (reaction.partial) {
|
|
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
|
|
try {
|
|
await reaction.fetch();
|
|
} catch (error) {
|
|
logError('[bot] Something went wrong when fetching the reaction:', error);
|
|
// Return as `reaction.message.author` may be undefined/null
|
|
return;
|
|
}
|
|
}
|
|
if (reaction.message.partial) {
|
|
// If the message this reaction belongs to was removed, the fetching might result in an API error which should be handled
|
|
try {
|
|
await reaction.message.fetch();
|
|
} catch (error) {
|
|
logError('[bot] Something went wrong when fetching the message:', error);
|
|
// Return as `reaction.message.author` may be undefined/null
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Now the message has been cached and is fully available
|
|
logInfo(`[bot] ${reaction.message.author.id}'s message reaction count changed: ${reaction.emoji.name}x${reaction.count}`);
|
|
await recordReaction(<MessageReaction> reaction);
|
|
}
|
|
|
|
async function fetchMotd()
|
|
{
|
|
const res = await fetch(process.env.MOTD_HREF);
|
|
const xml = await res.text();
|
|
const parser = new DOMParser();
|
|
const doc = parser.parseFromString(xml, 'text/html');
|
|
return doc.querySelector(process.env.MOTD_QUERY).textContent;
|
|
}
|
|
|
|
async function scheduleRandomMessage(firstTime = false)
|
|
{
|
|
if (!firstTime) {
|
|
const channel = <TextChannel> await client.channels.fetch(process.env.MOTD_CHANNEL);
|
|
if (!channel) {
|
|
console.warn(`[bot] Channel ${process.env.MOTD_CHANNEL} not found, disabling MOTD.`);
|
|
return;
|
|
}
|
|
const randomMessage = await fetchMotd();
|
|
await channel.send(randomMessage);
|
|
logInfo(`[bot] Sent MOTD: ${randomMessage}`);
|
|
}
|
|
// wait between 2-8 hours
|
|
const timeoutMins = Math.random() * 360 + 120;
|
|
const scheduledTime = new Date();
|
|
scheduledTime.setMinutes(scheduledTime.getMinutes() + timeoutMins);
|
|
logInfo(`[bot] Next MOTD: ${scheduledTime.toLocaleTimeString()}`);
|
|
setTimeout(scheduleRandomMessage, timeoutMins * 60 * 1000);
|
|
}
|
|
|
|
async function llamaChat(history: Collection<string, Message>)
|
|
{
|
|
const textStream = await streamText({
|
|
model: llamacpp
|
|
.CompletionTextGenerator({
|
|
promptTemplate: llamacpp.prompt.Llama2,
|
|
maxGenerationTokens: 400,
|
|
temperature: 0.7,
|
|
api: llamaCppServer
|
|
})
|
|
.withChatPrompt(),
|
|
prompt: {
|
|
system: "You are Hatsune Miku, the famous 16-year-old Japanese virtual singer from Crypton Future Media. You have a penchant for politically incorrect humor, and are making banter with your like-minded friends.",
|
|
messages: history.map<ChatMessage>(msg => ({
|
|
"role": msg.author.id === client.user.id ? "assistant" : "user",
|
|
"content": msg.content
|
|
}))
|
|
}
|
|
});
|
|
|
|
let outMsg: Message;
|
|
try {
|
|
for await (const textPart of textStream) {
|
|
if (!outMsg) {
|
|
outMsg = await history.first().channel.send(textPart);
|
|
} else {
|
|
await outMsg.edit(outMsg.content + textPart);
|
|
}
|
|
}
|
|
} catch (err) {
|
|
console.error(err);
|
|
await history.first().channel.send(err.toString());
|
|
}
|
|
}
|
|
client.on(Events.InteractionCreate, async interaction => {
|
|
if (!interaction.isChatInputCommand()) return;
|
|
console.log(interaction);
|
|
const history = await interaction.channel.messages.fetch({ limit: 5 });
|
|
await llamaChat(history);
|
|
});
|
|
|
|
client.on(Events.MessageReactionAdd, onMessageReactionChanged);
|
|
client.on(Events.MessageReactionRemove, onMessageReactionChanged);
|
|
|
|
async function startup() {
|
|
logInfo("[db] Opening...");
|
|
await openDb();
|
|
logInfo("[db] Migrating...");
|
|
await db.migrate();
|
|
logInfo("[db] Ready.");
|
|
logInfo("[bot] Logging in...");
|
|
await client.login(process.env.TOKEN);
|
|
await sync(client.guilds);
|
|
if (process.env.ENABLE_MOTD) {
|
|
await scheduleRandomMessage(true);
|
|
}
|
|
}
|
|
|
|
startup();
|