mirror of
				https://git.femboyfinancial.jp/james/FemScoreboard.git
				synced 2025-11-04 05:13:43 -08:00 
			
		
		
		
	Include message reply context in prompt
This commit is contained in:
		@@ -214,23 +214,45 @@ async function requestLLMResponse(messages)
 | 
			
		||||
        queryParams.append(field, config["llmconf"].llmSettings[field]);
 | 
			
		||||
    }
 | 
			
		||||
    const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`;
 | 
			
		||||
    const messageList = messages.map((m: Message) => {
 | 
			
		||||
    let messageList = await Promise.all(
 | 
			
		||||
        messages.map(async (m: Message) => {
 | 
			
		||||
	    let role = 'user';
 | 
			
		||||
	    if (m.author.id === process.env.CLIENT) {
 | 
			
		||||
		    role = 'assistant';
 | 
			
		||||
	    } else if (m.author.bot) {
 | 
			
		||||
		    return null;
 | 
			
		||||
	    } else if (KNOWN_USERNAMES.includes(m.author.username)) {
 | 
			
		||||
		    role = m.author.username;
 | 
			
		||||
	    /* } else if (KNOWN_USERNAMES.includes(m.author.username)) {
 | 
			
		||||
	           role = m.author.username; */
 | 
			
		||||
	    }
 | 
			
		||||
	    return { role, content: m.cleanContent };
 | 
			
		||||
    });
 | 
			
		||||
	    // fetch replied-to message, if there is one, and prompt it as such
 | 
			
		||||
		let cleanContent = m.cleanContent;
 | 
			
		||||
		if (m.type == MessageType.Reply && m.reference) {
 | 
			
		||||
			// what about deeply nested replies? could possibly be recursive?
 | 
			
		||||
			const repliedToMsg = await m.fetchReference();
 | 
			
		||||
			if (repliedToMsg) {
 | 
			
		||||
				const repliedToMsgLines = repliedToMsg.cleanContent.split('\n');
 | 
			
		||||
				cleanContent = `> ${repliedToMsgLines.join('\n> ')}\n${cleanContent}`;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
	    return { role, content: cleanContent };
 | 
			
		||||
        })
 | 
			
		||||
    );
 | 
			
		||||
    messageList = messageList.filter(x => !!x);
 | 
			
		||||
    
 | 
			
		||||
    // at the beginning, inject the system prompt
 | 
			
		||||
    // at the end, start our text generation as a reply to the most recent msg from history
 | 
			
		||||
    const replyContext = `> ${messageList[messageList.length - 1].content.split('\n').join('\n> ')}\n`;
 | 
			
		||||
    const reqBody = [
 | 
			
		||||
        {
 | 
			
		||||
            "role": "system",
 | 
			
		||||
            "content": config["llmconf"].sys_prompt
 | 
			
		||||
        },
 | 
			
		||||
        ...messageList.filter(x => x)
 | 
			
		||||
        ...messageList,
 | 
			
		||||
        {
 | 
			
		||||
            "role": "assistant",
 | 
			
		||||
            "content": replyContext
 | 
			
		||||
        }
 | 
			
		||||
    ];
 | 
			
		||||
    logInfo("[bot] Requesting LLM response with message list: " + reqBody.map(m => m.content));
 | 
			
		||||
    const res = await fetch(llmEndpoint, {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user