From 4b6f9fc468749830b31df615c8b44bd7d3188b66 Mon Sep 17 00:00:00 2001 From: James Shiffer Date: Wed, 20 Nov 2024 09:16:56 +0000 Subject: [PATCH] Include message reply context in prompt --- discord/bot.ts | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/discord/bot.ts b/discord/bot.ts index 70f8f40..1a981f6 100644 --- a/discord/bot.ts +++ b/discord/bot.ts @@ -214,23 +214,45 @@ async function requestLLMResponse(messages) queryParams.append(field, config["llmconf"].llmSettings[field]); } const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`; - const messageList = messages.map((m: Message) => { + let messageList = await Promise.all( + messages.map(async (m: Message) => { let role = 'user'; if (m.author.id === process.env.CLIENT) { role = 'assistant'; } else if (m.author.bot) { return null; - } else if (KNOWN_USERNAMES.includes(m.author.username)) { - role = m.author.username; + /* } else if (KNOWN_USERNAMES.includes(m.author.username)) { + role = m.author.username; */ } - return { role, content: m.cleanContent }; - }); + // fetch replied-to message, if there is one, and prompt it as such + let cleanContent = m.cleanContent; + if (m.type == MessageType.Reply && m.reference) { + // what about deeply nested replies? could possibly be recursive? + const repliedToMsg = await m.fetchReference(); + if (repliedToMsg) { + const repliedToMsgLines = repliedToMsg.cleanContent.split('\n'); + cleanContent = `> ${repliedToMsgLines.join('\n> ')}\n${cleanContent}`; + } + } + + return { role, content: cleanContent }; + }) + ); + messageList = messageList.filter(x => !!x); + + // at the beginning, inject the system prompt + // at the end, start our text generation as a reply to the most recent msg from history + const replyContext = `> ${messageList[messageList.length - 1].content.split('\n').join('\n> ')}\n`; const reqBody = [ { "role": "system", "content": config["llmconf"].sys_prompt }, - ...messageList.filter(x => x) + ...messageList, + { + "role": "assistant", + "content": replyContext + } ]; logInfo("[bot] Requesting LLM response with message list: " + reqBody.map(m => m.content)); const res = await fetch(llmEndpoint, {