Files
FemScoreboard/discord/commands/config/config.ts

95 lines
3.4 KiB
TypeScript

import { ChatInputCommandInteraction, SlashCommandBuilder } from 'discord.js';
import { LLMConfig } from '../types';
import 'dotenv/config';
const config: LLMConfig = {
max_new_tokens: 1500,
min_new_tokens: 1,
temperature: 0.8,
top_p: 0.6,
msg_context: 8,
frequency_penalty: 0.0,
presence_penalty: 0.0,
streaming: false,
};
async function configCommand(interaction: ChatInputCommandInteraction) {
if (interaction.user.id !== process.env.ADMIN) {
await interaction.reply('You are not authorized to change model settings');
return;
}
config.max_new_tokens =
interaction.options.getInteger('max_new_tokens') ?? config.max_new_tokens;
config.min_new_tokens =
interaction.options.getInteger('min_new_tokens') ?? config.min_new_tokens;
config.msg_context = interaction.options.getInteger('msg_context') ?? config.msg_context;
config.temperature = interaction.options.getNumber('temperature') ?? config.temperature;
config.top_p = interaction.options.getNumber('top_p') ?? config.top_p;
config.frequency_penalty =
interaction.options.getNumber('frequency_penalty') ?? config.frequency_penalty;
config.presence_penalty =
interaction.options.getNumber('presence_penalty') ?? config.presence_penalty;
config.streaming = interaction.options.getBoolean('streaming') ?? config.streaming;
await interaction.reply(`
\`\`\`
max_new_tokens = ${config.max_new_tokens}
min_new_tokens = ${config.min_new_tokens}
msg_context = ${config.msg_context}
temperature = ${config.temperature}
top_p = ${config.top_p}
frequency_penalty = ${config.frequency_penalty}
presence_penalty = ${config.presence_penalty}
streaming = ${config.streaming}
\`\`\`
`);
}
export = {
data: new SlashCommandBuilder()
.setName('llmconf')
.setDescription('Change model inference settings')
.addNumberOption((opt) =>
opt
.setName('temperature')
.setDescription('Temperature; not recommended w/ top_p (default: 0.7)')
)
.addNumberOption((opt) =>
opt
.setName('top_p')
.setDescription(
'Cumulative prob. of min. token set to sample from; not recommended w/ temperature (default: 0.9)'
)
)
.addNumberOption((opt) =>
opt
.setName('frequency_penalty')
.setDescription(
'[unused] Penalize tokens from reappearing multiple times; ranges from -2 to 2 (default: 0.0)'
)
)
.addNumberOption((opt) =>
opt
.setName('presence_penalty')
.setDescription(
'[unused] Penalize a token from reappearing; ranges from -2 to 2 (default: 0.0)'
)
)
.addIntegerOption((opt) =>
opt.setName('max_new_tokens').setDescription('Max. new tokens (default: 100)')
)
.addIntegerOption((opt) =>
opt.setName('min_new_tokens').setDescription('Min. new tokens (default: 1)')
)
.addIntegerOption((opt) =>
opt.setName('msg_context').setDescription('Num. messages in context (default: 8)')
)
.addBooleanOption((opt) =>
opt
.setName('streaming')
.setDescription('Enable/disable streaming responses (default: true)')
),
execute: configCommand,
state: () => config,
};