Files
FemScoreboard/discord/provider/openai.ts
2026-02-27 02:48:25 -08:00

105 lines
3.6 KiB
TypeScript

import { Message } from 'discord.js';
import { LLMProvider } from './provider';
import { OpenAI } from 'openai';
import 'dotenv/config';
import { serializeMessageHistory } from '../util';
import { logError, logInfo } from '../../logging';
import { LLMConfig } from '../commands/types';
const USER_PROMPT = `Continue the following Discord conversation by completing the next message, playing the role of Hatsune Miku. The conversation must progress forward, and you must avoid repeating yourself.
Each message is represented as a line of JSON. Refer to other users by their "name" instead of their "author" field whenever possible.
The conversation is as follows. The last line is the message you have to complete. Please ONLY return the string contents of the "content" field, that go in place of the ellipses. Do not include the enclosing quotation marks in your response.
`;
export class OpenAIProvider implements LLMProvider {
private client: OpenAI;
private model: string;
constructor(
token: string | undefined = process.env.LLM_TOKEN,
model = 'zai-org/glm-4.7-flash'
) {
if (!token) {
throw new TypeError(
'LLM token was not passed in, and environment variable LLM_TOKEN was unset!'
);
}
this.client = new OpenAI({
baseURL: process.env.OPENAI_HOST,
apiKey: token,
});
this.model = model;
}
name() {
return `OpenAI (${this.model})`;
}
setModel(model: string) {
this.model = model;
}
async requestLLMResponse(
history: Message[],
sysprompt: string,
params: LLMConfig
): Promise<string> {
let messageList = await Promise.all(history.map(serializeMessageHistory));
messageList = messageList.filter((x) => !!x);
if (messageList.length === 0) {
throw new TypeError('No messages with content provided in history!');
}
// dummy message for last line of prompt
const lastMsg = messageList[messageList.length - 1];
// advance by 5 seconds
let newDate = new Date(lastMsg!.timestamp);
newDate.setSeconds(newDate.getSeconds() + 5);
let templateMsgTxt = JSON.stringify({
timestamp: newDate.toUTCString(),
author: 'Hatsune Miku',
name: 'Hatsune Miku',
context: lastMsg!.content,
content: '...',
});
const messageHistoryTxt =
messageList.map((msg) => JSON.stringify(msg)).join('\n') + '\n' + templateMsgTxt;
logInfo(`[openai] Requesting response for message history: ${messageHistoryTxt}`);
try {
const response = await this.client.chat.completions.create({
model: this.model,
messages: [
{ role: 'system', content: sysprompt },
{ role: 'user', content: USER_PROMPT + messageHistoryTxt },
],
temperature: params?.temperature || 0.5,
top_p: params?.top_p || 0.9,
max_tokens: params?.max_new_tokens || 128,
});
let content = response.choices[0].message.content;
if (content.lastIndexOf('</think>') > -1) {
content = content.slice(content.lastIndexOf('</think>') + 8);
}
logInfo(`[openai] API response: ${content}`);
if (!content) {
throw new TypeError('OpenAI API returned no message.');
}
return content;
} catch (err) {
logError(`[openai] API Error: ` + err);
throw err;
}
}
}