Change chat cmd max tokens, replies

This commit is contained in:
James Shiffer 2024-02-06 18:50:55 -08:00
parent 19346fb2c3
commit 8346f52f23
2 changed files with 12 additions and 17 deletions

View File

@ -3,6 +3,7 @@ REACTIONS="💀,💯,😭"
CLIENT="123456789012345678" CLIENT="123456789012345678"
GUILD="123456789012345678" GUILD="123456789012345678"
LLAMACPP_HOST=127.0.0.1
LLAMACPP_PORT=9999 LLAMACPP_PORT=9999
ENABLE_MOTD=1 ENABLE_MOTD=1

View File

@ -1,17 +1,13 @@
import { import {
ChatInputCommandInteraction, ChatInputCommandInteraction,
Interaction,
InteractionResponse,
Message,
MessageComponentInteraction,
SlashCommandBuilder SlashCommandBuilder
} from 'discord.js'; } from 'discord.js';
import { ChatMessage, llamacpp, streamText } from 'modelfusion'; import { llamacpp, streamText } from 'modelfusion';
import { logInfo, logError } from '../../../logging'; import { logInfo, logError } from '../../../logging';
const llamaCppServer = llamacpp.Api({ const llamaCppServer = llamacpp.Api({
baseUrl: { baseUrl: {
host: "localhost", host: process.env.LLAMACPP_HOST,
port: process.env.LLAMACPP_PORT, port: process.env.LLAMACPP_PORT,
} }
}); });
@ -25,34 +21,32 @@ async function llamaChat(interaction: ChatInputCommandInteraction)
model: llamacpp model: llamacpp
.CompletionTextGenerator({ .CompletionTextGenerator({
promptTemplate: llamacpp.prompt.Llama2, promptTemplate: llamacpp.prompt.Llama2,
maxGenerationTokens: 400, maxGenerationTokens: 16,
temperature: 0.7, temperature: 0.7,
api: llamaCppServer api: llamaCppServer
}) })
.withChatPrompt(), .withChatPrompt(),
prompt: { prompt: {
system: "You are Hatsune Miku, the famous 16-year-old Japanese virtual singer from Crypton Future Media. You have a penchant for politically incorrect humor, and are making banter with your like-minded friends.", system: 'You are Hatsune Miku, the famous 16-year-old Japanese virtual singer from Crypton Future Media. You have a penchant for politically incorrect humor, and are making banter with your like-minded friends.',
messages: [{ messages: [{
"role": "user", 'role': 'user',
"content": interaction.options.getString('prompt') 'content': interaction.options.getString('prompt')
}] }]
} }
}); });
let outMsg: InteractionResponse;
let allText = ''; let allText = '';
await interaction.deferReply();
try { try {
for await (const textPart of textStream) { for await (const textPart of textStream) {
logInfo(`[chat] Added to LLaMA response: ${textPart}`);
allText += textPart; allText += textPart;
if (!outMsg) { await interaction.editReply(allText);
outMsg = await interaction.reply(allText);
} else {
await outMsg.edit(allText);
}
} }
logInfo(`[chat] Final LLaMA response: ${allText}`);
} catch (err) { } catch (err) {
logError(err); logError(err);
await interaction.reply(err.toString()); await interaction.editReply(err.toString());
} }
} }