mirror of
https://git.femboyfinancial.jp/james/FemScoreboard.git
synced 2026-02-19 02:13:08 -08:00
91 lines
3.3 KiB
TypeScript
91 lines
3.3 KiB
TypeScript
import { Message } from 'discord.js';
|
|
import { LLMProvider } from './provider';
|
|
import { LMStudioClient } from '@lmstudio/sdk';
|
|
import 'dotenv/config';
|
|
import { serializeMessageHistory } from '../util';
|
|
import { logError, logInfo } from '../../logging';
|
|
import { LLMConfig } from '../commands/types';
|
|
|
|
const USER_PROMPT = `Continue the following Discord conversation by completing the next message, playing the role of Hatsune Miku. The conversation must progress forward, and you must avoid repeating yourself.
|
|
|
|
Each message is represented as a line of JSON. Refer to other users by their "name" instead of their "author" field whenever possible.
|
|
|
|
The conversation is as follows. The last line is the message you have to complete. Please ONLY return the string contents of the "content" field, that go in place of the ellipses. Do not include the enclosing quotation marks in your response.
|
|
|
|
`;
|
|
|
|
export class LMStudioProvider implements LLMProvider {
|
|
private client: LMStudioClient;
|
|
|
|
constructor() {
|
|
this.client = new LMStudioClient({
|
|
baseUrl: process.env.LMSTUDIO_HOST
|
|
});
|
|
}
|
|
|
|
name() {
|
|
return 'LM Studio';
|
|
}
|
|
|
|
setModel(id: string) {
|
|
// LM Studio uses the model currently loaded in the GUI
|
|
// This is provided for interface compatibility
|
|
logInfo(`[lmstudio] setModel called with: ${id} (LM Studio uses the model loaded in its GUI)`);
|
|
}
|
|
|
|
async requestLLMResponse(history: Message[], sysprompt: string, params: LLMConfig): Promise<string> {
|
|
let messageList = await Promise.all(
|
|
history.map(serializeMessageHistory)
|
|
);
|
|
messageList = messageList.filter(x => !!x);
|
|
|
|
if (messageList.length === 0) {
|
|
throw new TypeError("No messages with content provided in history!");
|
|
}
|
|
|
|
// dummy message for last line of prompt
|
|
const lastMsg = messageList[messageList.length - 1];
|
|
|
|
// advance by 5 seconds
|
|
let newDate = new Date(lastMsg!.timestamp);
|
|
newDate.setSeconds(newDate.getSeconds() + 5);
|
|
|
|
let templateMsgTxt = JSON.stringify({
|
|
timestamp: newDate.toUTCString(),
|
|
author: "Hatsune Miku",
|
|
name: "Hatsune Miku",
|
|
context: lastMsg!.content,
|
|
content: "..."
|
|
});
|
|
|
|
const messageHistoryTxt = messageList.map(msg => JSON.stringify(msg)).join('\n') + '\n' + templateMsgTxt;
|
|
logInfo(`[lmstudio] Requesting response for message history: ${messageHistoryTxt}`);
|
|
|
|
try {
|
|
// Get the currently loaded model from LM Studio
|
|
const model = await this.client.llm.model();
|
|
|
|
const response = await model.respond([
|
|
{ role: "system", content: sysprompt },
|
|
{ role: "user", content: USER_PROMPT + messageHistoryTxt }
|
|
], {
|
|
temperature: params?.temperature || 0.5,
|
|
topPSampling: params?.top_p || 0.9,
|
|
maxTokens: params?.max_new_tokens || 128,
|
|
});
|
|
|
|
const content = response.content;
|
|
logInfo(`[lmstudio] API response: ${content}`);
|
|
|
|
if (!content) {
|
|
throw new TypeError("LM Studio API returned no message.");
|
|
}
|
|
|
|
return content;
|
|
} catch (err) {
|
|
logError(`[lmstudio] API Error: ` + err);
|
|
throw err;
|
|
}
|
|
}
|
|
}
|