mirror of
https://git.femboyfinancial.jp/james/FemScoreboard.git
synced 2024-11-21 10:12:02 -08:00
Include message reply context in prompt
This commit is contained in:
parent
c0d48a92dc
commit
4b6f9fc468
@ -214,23 +214,45 @@ async function requestLLMResponse(messages)
|
|||||||
queryParams.append(field, config["llmconf"].llmSettings[field]);
|
queryParams.append(field, config["llmconf"].llmSettings[field]);
|
||||||
}
|
}
|
||||||
const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`;
|
const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`;
|
||||||
const messageList = messages.map((m: Message) => {
|
let messageList = await Promise.all(
|
||||||
|
messages.map(async (m: Message) => {
|
||||||
let role = 'user';
|
let role = 'user';
|
||||||
if (m.author.id === process.env.CLIENT) {
|
if (m.author.id === process.env.CLIENT) {
|
||||||
role = 'assistant';
|
role = 'assistant';
|
||||||
} else if (m.author.bot) {
|
} else if (m.author.bot) {
|
||||||
return null;
|
return null;
|
||||||
} else if (KNOWN_USERNAMES.includes(m.author.username)) {
|
/* } else if (KNOWN_USERNAMES.includes(m.author.username)) {
|
||||||
role = m.author.username;
|
role = m.author.username; */
|
||||||
}
|
}
|
||||||
return { role, content: m.cleanContent };
|
// fetch replied-to message, if there is one, and prompt it as such
|
||||||
});
|
let cleanContent = m.cleanContent;
|
||||||
|
if (m.type == MessageType.Reply && m.reference) {
|
||||||
|
// what about deeply nested replies? could possibly be recursive?
|
||||||
|
const repliedToMsg = await m.fetchReference();
|
||||||
|
if (repliedToMsg) {
|
||||||
|
const repliedToMsgLines = repliedToMsg.cleanContent.split('\n');
|
||||||
|
cleanContent = `> ${repliedToMsgLines.join('\n> ')}\n${cleanContent}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { role, content: cleanContent };
|
||||||
|
})
|
||||||
|
);
|
||||||
|
messageList = messageList.filter(x => !!x);
|
||||||
|
|
||||||
|
// at the beginning, inject the system prompt
|
||||||
|
// at the end, start our text generation as a reply to the most recent msg from history
|
||||||
|
const replyContext = `> ${messageList[messageList.length - 1].content.split('\n').join('\n> ')}\n`;
|
||||||
const reqBody = [
|
const reqBody = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": config["llmconf"].sys_prompt
|
"content": config["llmconf"].sys_prompt
|
||||||
},
|
},
|
||||||
...messageList.filter(x => x)
|
...messageList,
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": replyContext
|
||||||
|
}
|
||||||
];
|
];
|
||||||
logInfo("[bot] Requesting LLM response with message list: " + reqBody.map(m => m.content));
|
logInfo("[bot] Requesting LLM response with message list: " + reqBody.map(m => m.content));
|
||||||
const res = await fetch(llmEndpoint, {
|
const res = await fetch(llmEndpoint, {
|
||||||
|
Loading…
Reference in New Issue
Block a user