Loading msgs, unit tests, change DB column types (breaking change)
This commit is contained in:
@@ -4,12 +4,17 @@ CLIENT="123456789012345678"
|
||||
GUILD="123456789012345678"
|
||||
ADMIN="123456789012345678"
|
||||
|
||||
# Custom emojis for loading states (format: <a:name:id> or <:name:id>)
|
||||
LOADING_EMOJIS="<:clueless:1476853248135790643>,<a:hachune:1476838658169503878>,<a:chairspin:1476838586929119234>,<a:nekodance:1476838199019049056>"
|
||||
|
||||
HF_TOKEN=""
|
||||
LLM_HOST="http://127.0.0.1:8000"
|
||||
LLM_TOKEN="dfsl;kjsdl;kfja"
|
||||
OPENAI_HOST="http://localhost:1234/v1"
|
||||
REPLY_CHANCE=0.2
|
||||
|
||||
RVC_HOST="http://127.0.0.1:8001"
|
||||
|
||||
ENABLE_MOTD=1
|
||||
MOTD_CHANNEL="123456789012345678"
|
||||
MOTD_HREF="https://fembooru.jp/post/list"
|
||||
|
||||
234
discord/__tests__/bot.test.ts
Normal file
234
discord/__tests__/bot.test.ts
Normal file
@@ -0,0 +1,234 @@
|
||||
/**
|
||||
* Tests for bot.ts helper functions
|
||||
*/
|
||||
|
||||
// Mock dependencies before importing bot
|
||||
jest.mock('../util', () => {
|
||||
const actual = jest.requireActual('../util');
|
||||
return {
|
||||
...actual,
|
||||
openDb: jest.fn(),
|
||||
db: {
|
||||
migrate: jest.fn(),
|
||||
get: jest.fn(),
|
||||
run: jest.fn(),
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('node-fetch', () => jest.fn());
|
||||
jest.mock('tmp', () => ({
|
||||
fileSync: jest.fn(() => ({ name: '/tmp/test' })),
|
||||
setGracefulCleanup: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('fs', () => ({
|
||||
...jest.requireActual('fs'),
|
||||
writeFileSync: jest.fn(),
|
||||
readFileSync: jest.fn(),
|
||||
existsSync: jest.fn(),
|
||||
}));
|
||||
|
||||
// Mock environment variables
|
||||
const mockEnv = {
|
||||
LOADING_EMOJIS: '<:clueless:123>,<a:hachune:456>,<a:chairspin:789>,<a:nekodance:012>',
|
||||
};
|
||||
|
||||
// Helper functions for testing
|
||||
function parseLoadingEmojis(): string[] {
|
||||
const emojiStr = mockEnv.LOADING_EMOJIS || '';
|
||||
if (!emojiStr.trim()) {
|
||||
return ['🤔', '✨', '🎵'];
|
||||
}
|
||||
return emojiStr
|
||||
.split(',')
|
||||
.map((e) => e.trim())
|
||||
.filter((e) => e.length > 0);
|
||||
}
|
||||
|
||||
function getRandomLoadingEmoji(): string {
|
||||
const emojis = parseLoadingEmojis();
|
||||
return emojis[Math.floor(Math.random() * emojis.length)];
|
||||
}
|
||||
|
||||
function formatLoadingMessage(emoji: string, reasoning: string): string {
|
||||
const kawaiiPhrases = [
|
||||
'Hmm... let me think~ ♪',
|
||||
'Processing nyaa~',
|
||||
'Miku is thinking...',
|
||||
'Calculating with magic ✨',
|
||||
'Pondering desu~',
|
||||
'Umm... one moment! ♪',
|
||||
'Brain go brrr~',
|
||||
'Assembling thoughts... ♪',
|
||||
'Loading Miku-brain...',
|
||||
'Thinking hard senpai~',
|
||||
];
|
||||
const phrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
|
||||
|
||||
let content = `${emoji} ${phrase}`;
|
||||
if (reasoning && reasoning.trim().length > 0) {
|
||||
const displayReasoning =
|
||||
reasoning.length > 500 ? reasoning.slice(0, 500) + '...' : reasoning;
|
||||
content += `\n\n> ${displayReasoning}`;
|
||||
}
|
||||
return content;
|
||||
}
|
||||
|
||||
describe('bot.ts helper functions', () => {
|
||||
/**
|
||||
* Convert a Date to a Discord snowflake ID (approximate)
|
||||
* Discord epoch: 2015-01-01T00:00:00.000Z
|
||||
*/
|
||||
function dateToSnowflake(date: Date): string {
|
||||
const DISCORD_EPOCH = 1420070400000n;
|
||||
const timestamp = BigInt(date.getTime());
|
||||
const snowflake = (timestamp - DISCORD_EPOCH) << 22n;
|
||||
return snowflake.toString();
|
||||
}
|
||||
|
||||
describe('dateToSnowflake', () => {
|
||||
it('should convert Discord epoch to snowflake 0', () => {
|
||||
const discordEpoch = new Date('2015-01-01T00:00:00.000Z');
|
||||
const result = dateToSnowflake(discordEpoch);
|
||||
expect(result).toBe('0');
|
||||
});
|
||||
|
||||
it('should convert a known date to snowflake', () => {
|
||||
// Test with a known date
|
||||
const testDate = new Date('2024-01-01T00:00:00.000Z');
|
||||
const result = dateToSnowflake(testDate);
|
||||
expect(result).toMatch(/^\d+$/); // Should be a numeric string
|
||||
expect(result.length).toBeGreaterThan(10); // Snowflakes are large numbers
|
||||
});
|
||||
|
||||
it('should produce increasing snowflakes for increasing dates', () => {
|
||||
const date1 = new Date('2024-01-01T00:00:00.000Z');
|
||||
const date2 = new Date('2024-01-02T00:00:00.000Z');
|
||||
const snowflake1 = dateToSnowflake(date1);
|
||||
const snowflake2 = dateToSnowflake(date2);
|
||||
expect(BigInt(snowflake2)).toBeGreaterThan(BigInt(snowflake1));
|
||||
});
|
||||
});
|
||||
|
||||
describe('textOnlyMessages', () => {
|
||||
function textOnlyMessages(message: { cleanContent: string; type: number }): boolean {
|
||||
const { MessageType } = require('discord.js');
|
||||
return (
|
||||
message.cleanContent.length > 0 &&
|
||||
(message.type === MessageType.Default || message.type === MessageType.Reply)
|
||||
);
|
||||
}
|
||||
|
||||
it('should return true for messages with content and default type', () => {
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
type: 0, // Default
|
||||
};
|
||||
|
||||
expect(textOnlyMessages(mockMessage)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for messages with content and reply type', () => {
|
||||
const mockMessage = {
|
||||
cleanContent: 'Reply!',
|
||||
type: 19, // Reply
|
||||
};
|
||||
|
||||
expect(textOnlyMessages(mockMessage)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for empty messages', () => {
|
||||
const mockMessage = {
|
||||
cleanContent: '',
|
||||
type: 0,
|
||||
};
|
||||
|
||||
expect(textOnlyMessages(mockMessage)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for system messages', () => {
|
||||
const mockMessage = {
|
||||
cleanContent: 'System message',
|
||||
type: 1, // RecipientAdd
|
||||
};
|
||||
|
||||
expect(textOnlyMessages(mockMessage)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isGoodResponse', () => {
|
||||
function isGoodResponse(response: string): boolean {
|
||||
return response.length > 0;
|
||||
}
|
||||
|
||||
it('should return true for non-empty responses', () => {
|
||||
expect(isGoodResponse('Hello!')).toBe(true);
|
||||
expect(isGoodResponse('a')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for empty responses', () => {
|
||||
expect(isGoodResponse('')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseLoadingEmojis', () => {
|
||||
it('should parse emojis from environment variable', () => {
|
||||
const result = parseLoadingEmojis();
|
||||
expect(result).toHaveLength(4);
|
||||
expect(result).toEqual([
|
||||
'<:clueless:123>',
|
||||
'<a:hachune:456>',
|
||||
'<a:chairspin:789>',
|
||||
'<a:nekodance:012>',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return default emojis when LOADING_EMOJIS is empty', () => {
|
||||
const original = mockEnv.LOADING_EMOJIS;
|
||||
mockEnv.LOADING_EMOJIS = '';
|
||||
const result = parseLoadingEmojis();
|
||||
mockEnv.LOADING_EMOJIS = original;
|
||||
expect(result).toEqual(['🤔', '✨', '🎵']);
|
||||
});
|
||||
|
||||
it('should handle whitespace in emoji list', () => {
|
||||
const original = mockEnv.LOADING_EMOJIS;
|
||||
mockEnv.LOADING_EMOJIS = ' <:test:123> , <a:spin:456> ';
|
||||
const result = parseLoadingEmojis();
|
||||
mockEnv.LOADING_EMOJIS = original;
|
||||
expect(result).toEqual(['<:test:123>', '<a:spin:456>']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getRandomLoadingEmoji', () => {
|
||||
it('should return a valid emoji from the list', () => {
|
||||
const result = getRandomLoadingEmoji();
|
||||
const validEmojis = parseLoadingEmojis();
|
||||
expect(validEmojis).toContain(result);
|
||||
});
|
||||
});
|
||||
|
||||
describe('formatLoadingMessage', () => {
|
||||
it('should format message with emoji and phrase only when no reasoning', () => {
|
||||
const result = formatLoadingMessage('<:clueless:123>', '');
|
||||
expect(result).toContain('<:clueless:123>');
|
||||
// Check that there's no blockquote (newline followed by "> ")
|
||||
expect(result).not.toMatch(/\n\n> /);
|
||||
});
|
||||
|
||||
it('should include reasoning in blockquote when present', () => {
|
||||
const reasoning = 'This is my thought process...';
|
||||
const result = formatLoadingMessage('<a:hachune:456>', reasoning);
|
||||
expect(result).toContain('<a:hachune:456>');
|
||||
expect(result).toContain(`> ${reasoning}`);
|
||||
});
|
||||
|
||||
it('should truncate long reasoning text', () => {
|
||||
const longReasoning = 'a'.repeat(600);
|
||||
const result = formatLoadingMessage('<:clueless:123>', longReasoning);
|
||||
expect(result).toContain('...');
|
||||
expect(result.length).toBeLessThan(longReasoning.length + 50);
|
||||
});
|
||||
});
|
||||
});
|
||||
101
discord/__tests__/config.test.ts
Normal file
101
discord/__tests__/config.test.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
/**
|
||||
* Tests for commands/config/config.ts (llmconf command)
|
||||
*/
|
||||
|
||||
jest.mock('discord.js', () => {
|
||||
const actual = jest.requireActual('discord.js');
|
||||
return {
|
||||
...actual,
|
||||
SlashCommandBuilder: jest.fn().mockImplementation(() => ({
|
||||
setName: jest.fn().mockReturnThis(),
|
||||
setDescription: jest.fn().mockReturnThis(),
|
||||
addNumberOption: jest.fn().mockReturnThis(),
|
||||
addIntegerOption: jest.fn().mockReturnThis(),
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const configCommand = require('../commands/config/config');
|
||||
|
||||
describe('config command (llmconf)', () => {
|
||||
let mockInteraction: {
|
||||
user: { id: string };
|
||||
options: {
|
||||
getInteger: jest.Mock;
|
||||
getNumber: jest.Mock;
|
||||
};
|
||||
reply: jest.Mock;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.ADMIN = '123456789012345678';
|
||||
mockInteraction = {
|
||||
user: { id: '123456789012345678' },
|
||||
options: {
|
||||
getInteger: jest.fn(),
|
||||
getNumber: jest.fn(),
|
||||
},
|
||||
reply: jest.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
it('should have correct command data structure', () => {
|
||||
expect(configCommand.data).toBeDefined();
|
||||
expect(configCommand.data.setName).toBeDefined();
|
||||
expect(configCommand.execute).toBeDefined();
|
||||
expect(configCommand.state).toBeDefined();
|
||||
});
|
||||
|
||||
it('should reject non-admin users', async () => {
|
||||
mockInteraction.user = { id: 'unauthorized-user' };
|
||||
|
||||
await configCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
'You are not authorized to change model settings'
|
||||
);
|
||||
});
|
||||
|
||||
it('should accept admin users and return config', async () => {
|
||||
await configCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalled();
|
||||
const replyContent = mockInteraction.reply.mock.calls[0][0];
|
||||
expect(replyContent).toContain('max_new_tokens');
|
||||
expect(replyContent).toContain('temperature');
|
||||
});
|
||||
|
||||
it('should use default values when options not provided', async () => {
|
||||
mockInteraction.options.getInteger.mockReturnValue(null);
|
||||
mockInteraction.options.getNumber.mockReturnValue(null);
|
||||
|
||||
await configCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should accept custom temperature value', async () => {
|
||||
mockInteraction.options.getNumber.mockImplementation((name: string) => {
|
||||
if (name === 'temperature') return 0.9;
|
||||
return null;
|
||||
});
|
||||
|
||||
await configCommand.execute(mockInteraction);
|
||||
|
||||
const state = configCommand.state();
|
||||
expect(state.temperature).toBe(0.9);
|
||||
});
|
||||
|
||||
it('should accept custom msg_context value', async () => {
|
||||
mockInteraction.options.getInteger.mockImplementation((name: string) => {
|
||||
if (name === 'msg_context') return 16;
|
||||
return null;
|
||||
});
|
||||
|
||||
await configCommand.execute(mockInteraction);
|
||||
|
||||
const state = configCommand.state();
|
||||
expect(state.msg_context).toBe(16);
|
||||
});
|
||||
});
|
||||
115
discord/__tests__/edit_sysprompt.test.ts
Normal file
115
discord/__tests__/edit_sysprompt.test.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
/**
|
||||
* Tests for commands/config/edit_sysprompt.ts
|
||||
*/
|
||||
|
||||
jest.mock('node:fs', () => ({
|
||||
writeFileSync: jest.fn(),
|
||||
readFileSync: jest.fn(),
|
||||
existsSync: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('node:path', () => ({
|
||||
...jest.requireActual('node:path'),
|
||||
resolve: jest.fn((_, ...args) => `/mock/path/${args.join('/')}`),
|
||||
}));
|
||||
|
||||
jest.mock('discord.js', () => {
|
||||
const actual = jest.requireActual('discord.js');
|
||||
return {
|
||||
...actual,
|
||||
SlashCommandBuilder: jest.fn().mockImplementation(() => ({
|
||||
setName: jest.fn().mockReturnThis(),
|
||||
setDescription: jest.fn().mockReturnThis(),
|
||||
addStringOption: jest.fn().mockReturnThis(),
|
||||
addAttachmentOption: jest.fn().mockReturnThis(),
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const editSyspromptCommand = require('../commands/config/edit_sysprompt');
|
||||
|
||||
describe('edit_sysprompt command', () => {
|
||||
let mockInteraction: {
|
||||
user: { id: string };
|
||||
options: {
|
||||
getString: jest.Mock;
|
||||
getAttachment: jest.Mock;
|
||||
};
|
||||
reply: jest.Mock;
|
||||
};
|
||||
|
||||
const mockAttachment = {
|
||||
url: 'http://example.com/file.txt',
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.ADMIN = '123456789012345678';
|
||||
mockInteraction = {
|
||||
user: { id: '123456789012345678' },
|
||||
options: {
|
||||
getString: jest.fn(),
|
||||
getAttachment: jest.fn(),
|
||||
},
|
||||
reply: jest.fn(),
|
||||
};
|
||||
|
||||
global.fetch = jest.fn().mockResolvedValue({
|
||||
text: jest.fn().mockResolvedValue('New system prompt content'),
|
||||
}) as jest.Mock;
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
delete (global as unknown as Record<string, unknown>).fetch;
|
||||
});
|
||||
|
||||
it('should have correct command data structure', () => {
|
||||
expect(editSyspromptCommand.data).toBeDefined();
|
||||
expect(editSyspromptCommand.execute).toBeDefined();
|
||||
});
|
||||
|
||||
it('should reject non-admin users', async () => {
|
||||
mockInteraction.user = { id: 'unauthorized-user' };
|
||||
|
||||
await editSyspromptCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
'You are not authorized to change model settings'
|
||||
);
|
||||
});
|
||||
|
||||
it('should reject invalid prompt names (non-alphanumeric)', async () => {
|
||||
mockInteraction.options.getString.mockReturnValue('invalid name!');
|
||||
|
||||
await editSyspromptCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(expect.stringContaining('alphanumeric'));
|
||||
});
|
||||
|
||||
it('should accept valid prompt name and content for admin users', async () => {
|
||||
mockInteraction.options.getString.mockReturnValue('test_prompt');
|
||||
mockInteraction.options.getAttachment.mockReturnValue(mockAttachment);
|
||||
|
||||
await editSyspromptCommand.execute(mockInteraction);
|
||||
|
||||
expect(global.fetch).toHaveBeenCalledWith(mockAttachment.url);
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
expect.stringContaining('System prompt "test_prompt" set to')
|
||||
);
|
||||
});
|
||||
|
||||
it('should truncate long content in response', async () => {
|
||||
const longContent = 'a'.repeat(2000);
|
||||
(global.fetch as jest.Mock).mockResolvedValue({
|
||||
text: jest.fn().mockResolvedValue(longContent),
|
||||
});
|
||||
|
||||
mockInteraction.options.getString.mockReturnValue('test_prompt');
|
||||
mockInteraction.options.getAttachment.mockReturnValue(mockAttachment);
|
||||
|
||||
await editSyspromptCommand.execute(mockInteraction);
|
||||
|
||||
const replyContent = mockInteraction.reply.mock.calls[0][0];
|
||||
expect(replyContent).toContain('...');
|
||||
});
|
||||
});
|
||||
259
discord/__tests__/ollama_provider.test.ts
Normal file
259
discord/__tests__/ollama_provider.test.ts
Normal file
@@ -0,0 +1,259 @@
|
||||
/**
|
||||
* Tests for OllamaProvider
|
||||
*/
|
||||
|
||||
const mockChat = jest.fn();
|
||||
|
||||
jest.mock('ollama', () => {
|
||||
const MockOllama = jest.fn().mockImplementation(() => ({
|
||||
chat: mockChat,
|
||||
}));
|
||||
return { Ollama: MockOllama };
|
||||
});
|
||||
|
||||
jest.mock('../util', () => ({
|
||||
serializeMessageHistory: jest.fn((msg) =>
|
||||
Promise.resolve({
|
||||
timestamp: msg.createdAt.toUTCString(),
|
||||
author: msg.author.username,
|
||||
name: 'Test User',
|
||||
content: msg.cleanContent,
|
||||
})
|
||||
),
|
||||
}));
|
||||
|
||||
jest.mock('../../logging', () => ({
|
||||
logError: jest.fn(),
|
||||
logInfo: jest.fn(),
|
||||
logWarn: jest.fn(),
|
||||
}));
|
||||
|
||||
import { OllamaProvider } from '../provider/ollama';
|
||||
import type { LLMConfig } from '../commands/types';
|
||||
|
||||
describe('OllamaProvider', () => {
|
||||
const mockConfig: LLMConfig = {
|
||||
max_new_tokens: 100,
|
||||
min_new_tokens: 1,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
msg_context: 8,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.LLM_HOST = 'http://test-ollama-host';
|
||||
mockChat.mockReset();
|
||||
});
|
||||
|
||||
it('should initialize with host and model', () => {
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
expect(provider).toBeDefined();
|
||||
expect(provider.name()).toContain('llama2');
|
||||
});
|
||||
|
||||
it('should use environment variable when host not explicitly provided', () => {
|
||||
// When undefined is passed, constructor falls back to process.env.LLM_HOST
|
||||
const provider = new OllamaProvider(undefined, 'llama2');
|
||||
expect(provider).toBeDefined();
|
||||
expect(provider.name()).toContain('llama2');
|
||||
});
|
||||
|
||||
it('should return correct name', () => {
|
||||
const provider = new OllamaProvider('http://test-host', 'mistral');
|
||||
expect(provider.name()).toBe('Ollama (mistral)');
|
||||
});
|
||||
|
||||
it('should set model correctly', () => {
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
provider.setModel('mistral');
|
||||
expect(provider.name()).toBe('Ollama (mistral)');
|
||||
});
|
||||
|
||||
it('should request LLM response successfully', async () => {
|
||||
mockChat.mockResolvedValue({
|
||||
message: {
|
||||
content: 'Hello! This is a test response from Ollama.',
|
||||
},
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
const response = await provider.requestLLMResponse(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
expect(response).toBe('Hello! This is a test response from Ollama.');
|
||||
expect(mockChat).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle empty response from API', async () => {
|
||||
mockChat.mockResolvedValue({
|
||||
message: {
|
||||
content: '',
|
||||
},
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
|
||||
await expect(
|
||||
provider.requestLLMResponse([mockMessage], 'You are a helpful assistant', mockConfig)
|
||||
).rejects.toThrow('Ollama chat API returned no message.');
|
||||
});
|
||||
|
||||
it('should handle empty history', async () => {
|
||||
// Mock serializeMessageHistory to return undefined for this test
|
||||
const { serializeMessageHistory } = require('../util');
|
||||
serializeMessageHistory.mockResolvedValue(undefined);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: '',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
|
||||
await expect(
|
||||
provider.requestLLMResponse([mockMessage], 'You are a helpful assistant', mockConfig)
|
||||
).rejects.toThrow('No messages with content provided in history!');
|
||||
});
|
||||
|
||||
it('should use default parameters when config not provided', async () => {
|
||||
// Reset the mock to its default implementation
|
||||
const { serializeMessageHistory } = require('../util');
|
||||
serializeMessageHistory.mockImplementation((msg: import('discord.js').Message) =>
|
||||
Promise.resolve({
|
||||
timestamp: msg.createdAt.toUTCString(),
|
||||
author: msg.author.username,
|
||||
name: 'Test User',
|
||||
content: msg.cleanContent,
|
||||
})
|
||||
);
|
||||
|
||||
mockChat.mockResolvedValue({
|
||||
message: {
|
||||
content: 'Response with defaults',
|
||||
},
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
await provider.requestLLMResponse(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
{} as LLMConfig
|
||||
);
|
||||
|
||||
expect(mockChat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
options: expect.objectContaining({
|
||||
temperature: 0.5,
|
||||
top_p: 0.9,
|
||||
num_predict: 128,
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('OllamaProvider streaming', () => {
|
||||
const mockConfig: LLMConfig = {
|
||||
max_new_tokens: 100,
|
||||
min_new_tokens: 1,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
msg_context: 8,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.LLM_HOST = 'http://test-ollama-host';
|
||||
mockChat.mockReset();
|
||||
});
|
||||
|
||||
it('should stream response with content chunks', async () => {
|
||||
const mockStream = {
|
||||
[Symbol.asyncIterator]: async function* () {
|
||||
yield { message: { content: 'Hello' } };
|
||||
yield { message: { content: '!' } };
|
||||
yield { message: { content: ' Test' } };
|
||||
},
|
||||
};
|
||||
|
||||
mockChat.mockResolvedValue(mockStream);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
const stream = provider.requestLLMResponseStreaming!(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
const chunks: { reasoning?: string; content?: string }[] = [];
|
||||
let finalResult = '';
|
||||
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
if (chunk.content) {
|
||||
finalResult = chunk.content;
|
||||
}
|
||||
}
|
||||
|
||||
expect(chunks.length).toBeGreaterThan(0);
|
||||
expect(finalResult).toBe('Hello! Test');
|
||||
});
|
||||
|
||||
it('should handle empty history in streaming', async () => {
|
||||
const { serializeMessageHistory } = require('../util');
|
||||
serializeMessageHistory.mockResolvedValue(undefined);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: '',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OllamaProvider('http://test-host', 'llama2');
|
||||
|
||||
const stream = provider.requestLLMResponseStreaming!(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
await expect(async () => {
|
||||
for await (const _ of stream) {
|
||||
// Should throw before yielding
|
||||
}
|
||||
}).rejects.toThrow('No messages with content provided in history!');
|
||||
});
|
||||
});
|
||||
346
discord/__tests__/openai_provider.test.ts
Normal file
346
discord/__tests__/openai_provider.test.ts
Normal file
@@ -0,0 +1,346 @@
|
||||
/**
|
||||
* Tests for OpenAIProvider
|
||||
*/
|
||||
|
||||
const mockCreate = jest.fn();
|
||||
const mockChatCompletions = {
|
||||
create: mockCreate,
|
||||
};
|
||||
const mockChat = {
|
||||
completions: mockChatCompletions,
|
||||
};
|
||||
|
||||
jest.mock('openai', () => {
|
||||
const MockOpenAI = jest.fn().mockImplementation(() => ({
|
||||
chat: mockChat,
|
||||
}));
|
||||
return { OpenAI: MockOpenAI };
|
||||
});
|
||||
|
||||
jest.mock('../util', () => ({
|
||||
serializeMessageHistory: jest.fn((msg) =>
|
||||
Promise.resolve({
|
||||
timestamp: msg.createdAt.toUTCString(),
|
||||
author: msg.author.username,
|
||||
name: 'Test User',
|
||||
content: msg.cleanContent,
|
||||
})
|
||||
),
|
||||
}));
|
||||
|
||||
jest.mock('../../logging', () => ({
|
||||
logError: jest.fn(),
|
||||
logInfo: jest.fn(),
|
||||
logWarn: jest.fn(),
|
||||
}));
|
||||
|
||||
import { OpenAIProvider } from '../provider/openai';
|
||||
import type { LLMConfig } from '../commands/types';
|
||||
|
||||
describe('OpenAIProvider', () => {
|
||||
const mockConfig: LLMConfig = {
|
||||
max_new_tokens: 100,
|
||||
min_new_tokens: 1,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
msg_context: 8,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.LLM_TOKEN = 'test-token';
|
||||
process.env.OPENAI_HOST = 'http://test-host';
|
||||
mockCreate.mockReset();
|
||||
});
|
||||
|
||||
it('should initialize with token and model', () => {
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
expect(provider).toBeDefined();
|
||||
expect(provider.name()).toContain('gpt-4');
|
||||
});
|
||||
|
||||
it('should use environment variable when token not explicitly provided', () => {
|
||||
// When undefined is passed, constructor falls back to process.env.LLM_TOKEN
|
||||
const provider = new OpenAIProvider(undefined, 'gpt-4');
|
||||
expect(provider).toBeDefined();
|
||||
expect(provider.name()).toContain('gpt-4');
|
||||
});
|
||||
|
||||
it('should return correct name', () => {
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-3.5-turbo');
|
||||
expect(provider.name()).toBe('OpenAI (gpt-3.5-turbo)');
|
||||
});
|
||||
|
||||
it('should set model correctly', () => {
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
provider.setModel('gpt-3.5-turbo');
|
||||
expect(provider.name()).toBe('OpenAI (gpt-3.5-turbo)');
|
||||
});
|
||||
|
||||
it('should request LLM response successfully', async () => {
|
||||
mockCreate.mockResolvedValue({
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'Hello! This is a test response.',
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
const response = await provider.requestLLMResponse(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
expect(response).toBe('Hello! This is a test response.');
|
||||
expect(mockCreate).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle empty response from API', async () => {
|
||||
mockCreate.mockResolvedValue({
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: '',
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
|
||||
await expect(
|
||||
provider.requestLLMResponse([mockMessage], 'You are a helpful assistant', mockConfig)
|
||||
).rejects.toThrow('OpenAI API returned no message.');
|
||||
});
|
||||
|
||||
it('should handle empty history', async () => {
|
||||
// Mock serializeMessageHistory to return undefined for this test
|
||||
const { serializeMessageHistory } = require('../util');
|
||||
serializeMessageHistory.mockResolvedValue(undefined);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: '',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
|
||||
await expect(
|
||||
provider.requestLLMResponse([mockMessage], 'You are a helpful assistant', mockConfig)
|
||||
).rejects.toThrow('No messages with content provided in history!');
|
||||
});
|
||||
|
||||
it('should use default parameters when config not provided', async () => {
|
||||
// Reset the mock to its default implementation
|
||||
const { serializeMessageHistory } = require('../util');
|
||||
serializeMessageHistory.mockImplementation((msg: import('discord.js').Message) =>
|
||||
Promise.resolve({
|
||||
timestamp: msg.createdAt.toUTCString(),
|
||||
author: msg.author.username,
|
||||
name: 'Test User',
|
||||
content: msg.cleanContent,
|
||||
})
|
||||
);
|
||||
|
||||
mockCreate.mockResolvedValue({
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'Response with defaults',
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
await provider.requestLLMResponse(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
{} as LLMConfig
|
||||
);
|
||||
|
||||
expect(mockCreate).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
temperature: 0.5,
|
||||
top_p: 0.9,
|
||||
max_tokens: 128,
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should strip </think> tags from response', async () => {
|
||||
mockCreate.mockResolvedValue({
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: '</think>Hello! This is the actual response.',
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
const response = await provider.requestLLMResponse(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
expect(response).toBe('Hello! This is the actual response.');
|
||||
});
|
||||
});
|
||||
|
||||
describe('OpenAIProvider streaming', () => {
|
||||
const mockConfig: LLMConfig = {
|
||||
max_new_tokens: 100,
|
||||
min_new_tokens: 1,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
msg_context: 8,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.LLM_TOKEN = 'test-token';
|
||||
process.env.OPENAI_HOST = 'http://test-host';
|
||||
mockCreate.mockReset();
|
||||
});
|
||||
|
||||
it('should stream response with content chunks', async () => {
|
||||
const mockStream = {
|
||||
[Symbol.asyncIterator]: async function* () {
|
||||
yield { choices: [{ delta: { content: 'Hello' } }] };
|
||||
yield { choices: [{ delta: { content: '!' } }] };
|
||||
yield { choices: [{ delta: { content: ' Test' } }] };
|
||||
},
|
||||
};
|
||||
|
||||
mockCreate.mockResolvedValue(mockStream);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
const stream = provider.requestLLMResponseStreaming!(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
const chunks: { reasoning?: string; content?: string }[] = [];
|
||||
let finalResult = '';
|
||||
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
if (chunk.content) {
|
||||
finalResult = chunk.content;
|
||||
}
|
||||
}
|
||||
|
||||
expect(chunks.length).toBeGreaterThan(0);
|
||||
expect(finalResult).toBe('Hello! Test');
|
||||
});
|
||||
|
||||
it('should stream response with reasoning chunks', async () => {
|
||||
const mockStream = {
|
||||
[Symbol.asyncIterator]: async function* () {
|
||||
yield { choices: [{ delta: { reasoning_content: 'Let me think...' } }] };
|
||||
yield { choices: [{ delta: { reasoning_content: ' about this' } }] };
|
||||
yield { choices: [{ delta: { content: 'Hello!' } }] };
|
||||
},
|
||||
};
|
||||
|
||||
mockCreate.mockResolvedValue(mockStream);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello!',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
const stream = provider.requestLLMResponseStreaming!(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
const chunks: { reasoning?: string; content?: string }[] = [];
|
||||
let finalReasoning = '';
|
||||
let finalContent = '';
|
||||
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
if (chunk.reasoning) {
|
||||
finalReasoning = chunk.reasoning;
|
||||
}
|
||||
if (chunk.content) {
|
||||
finalContent = chunk.content;
|
||||
}
|
||||
}
|
||||
|
||||
expect(finalReasoning).toBe('Let me think... about this');
|
||||
expect(finalContent).toBe('Hello!');
|
||||
});
|
||||
|
||||
it('should handle empty history in streaming', async () => {
|
||||
const { serializeMessageHistory } = require('../util');
|
||||
serializeMessageHistory.mockResolvedValue(undefined);
|
||||
|
||||
const mockMessage = {
|
||||
cleanContent: '',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
} as unknown as import('discord.js').Message;
|
||||
|
||||
const provider = new OpenAIProvider('test-token', 'gpt-4');
|
||||
|
||||
const stream = provider.requestLLMResponseStreaming!(
|
||||
[mockMessage],
|
||||
'You are a helpful assistant',
|
||||
mockConfig
|
||||
);
|
||||
|
||||
await expect(async () => {
|
||||
for await (const _ of stream) {
|
||||
// Should throw before yielding
|
||||
}
|
||||
}).rejects.toThrow('No messages with content provided in history!');
|
||||
});
|
||||
});
|
||||
103
discord/__tests__/provider_command.test.ts
Normal file
103
discord/__tests__/provider_command.test.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
/**
|
||||
* Tests for commands/config/provider.ts
|
||||
*/
|
||||
|
||||
jest.mock('../provider/mikuai', () => ({
|
||||
MikuAIProvider: jest.fn().mockImplementation(() => ({
|
||||
name: jest.fn().mockReturnValue('MikuAI'),
|
||||
requestLLMResponse: jest.fn(),
|
||||
setModel: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('../provider/huggingface', () => ({
|
||||
HuggingfaceProvider: jest.fn().mockImplementation(() => ({
|
||||
name: jest.fn().mockReturnValue('Huggingface'),
|
||||
requestLLMResponse: jest.fn(),
|
||||
setModel: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('../provider/openai', () => ({
|
||||
OpenAIProvider: jest.fn().mockImplementation(() => ({
|
||||
name: jest.fn().mockReturnValue('OpenAI'),
|
||||
requestLLMResponse: jest.fn(),
|
||||
setModel: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('../provider/ollama', () => ({
|
||||
OllamaProvider: jest.fn().mockImplementation(() => ({
|
||||
name: jest.fn().mockReturnValue('Ollama'),
|
||||
requestLLMResponse: jest.fn(),
|
||||
setModel: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('discord.js', () => {
|
||||
const actual = jest.requireActual('discord.js');
|
||||
return {
|
||||
...actual,
|
||||
SlashCommandBuilder: jest.fn().mockImplementation(() => ({
|
||||
setName: jest.fn().mockReturnThis(),
|
||||
setDescription: jest.fn().mockReturnThis(),
|
||||
addStringOption: jest.fn().mockReturnThis(),
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const providerCommand = require('../commands/config/provider');
|
||||
|
||||
describe('provider command', () => {
|
||||
let mockInteraction: {
|
||||
user: { id: string };
|
||||
options: { getString: jest.Mock };
|
||||
reply: jest.Mock;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.ADMIN = '123456789012345678';
|
||||
mockInteraction = {
|
||||
user: { id: '123456789012345678' },
|
||||
options: { getString: jest.fn() },
|
||||
reply: jest.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
it('should have correct command data structure', () => {
|
||||
expect(providerCommand.data).toBeDefined();
|
||||
expect(providerCommand.execute).toBeDefined();
|
||||
expect(providerCommand.state).toBeDefined();
|
||||
});
|
||||
|
||||
it('should reject non-admin users', async () => {
|
||||
mockInteraction.user = { id: 'unauthorized-user' };
|
||||
|
||||
await providerCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
'You are not authorized to change model settings'
|
||||
);
|
||||
});
|
||||
|
||||
it('should accept admin users', async () => {
|
||||
mockInteraction.options.getString.mockImplementation((name: string) => {
|
||||
if (name === 'name') return 'openai';
|
||||
return null;
|
||||
});
|
||||
|
||||
await providerCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalled();
|
||||
const replyContent = mockInteraction.reply.mock.calls[0][0];
|
||||
expect(replyContent).toContain('Using provider');
|
||||
});
|
||||
|
||||
it('state function should return provider', () => {
|
||||
const state = providerCommand.state();
|
||||
expect(state).toBeDefined();
|
||||
expect(state.name).toBeDefined();
|
||||
expect(state.setModel).toBeDefined();
|
||||
});
|
||||
});
|
||||
18
discord/__tests__/setup.ts
Normal file
18
discord/__tests__/setup.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
// Mock environment variables for tests
|
||||
process.env.TOKEN = 'test-token';
|
||||
process.env.CLIENT = '123456789012345678';
|
||||
process.env.ADMIN = '123456789012345678';
|
||||
process.env.GUILD = '123456789012345678';
|
||||
process.env.REACTIONS = '<:this:1171632205924151387>,<:that:1171632205924151388>,❤️';
|
||||
process.env.LLM_TOKEN = 'test-llm-token';
|
||||
process.env.LLM_HOST = 'http://test-llm-host';
|
||||
process.env.OPENAI_HOST = 'http://test-openai-host';
|
||||
process.env.REPLY_CHANCE = '0';
|
||||
process.env.ENABLE_MOTD = '1';
|
||||
process.env.ENABLE_THROWBACK = '1';
|
||||
process.env.MOTD_CHANNEL = '123456789012345678';
|
||||
process.env.THROWBACK_CHANNEL = '123456789012345678';
|
||||
process.env.LOSER_CHANNEL = '123456789012345678';
|
||||
process.env.RVC_HOST = 'http://test-rvc-host';
|
||||
process.env.MOTD_HREF = 'http://test-motd-href';
|
||||
process.env.MOTD_QUERY = '.motd';
|
||||
95
discord/__tests__/sysprompt.test.ts
Normal file
95
discord/__tests__/sysprompt.test.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Tests for commands/config/sysprompt.ts
|
||||
*/
|
||||
|
||||
jest.mock('node:fs', () => ({
|
||||
readFileSync: jest.fn(() => 'Mock system prompt content'),
|
||||
writeFileSync: jest.fn(),
|
||||
existsSync: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('node:path', () => ({
|
||||
...jest.requireActual('node:path'),
|
||||
resolve: jest.fn((_, filename) => `/mock/path/${filename}`),
|
||||
}));
|
||||
|
||||
jest.mock('glob', () => ({
|
||||
globSync: jest.fn(() => ['/mock/path/sysprompt_cache/nous.txt']),
|
||||
}));
|
||||
|
||||
jest.mock('discord.js', () => {
|
||||
const actual = jest.requireActual('discord.js');
|
||||
return {
|
||||
...actual,
|
||||
SlashCommandBuilder: jest.fn().mockImplementation(() => ({
|
||||
setName: jest.fn().mockReturnThis(),
|
||||
setDescription: jest.fn().mockReturnThis(),
|
||||
addStringOption: jest.fn().mockReturnThis(),
|
||||
})),
|
||||
AttachmentBuilder: jest.fn().mockImplementation((buffer, options) => ({
|
||||
buffer,
|
||||
name: options?.name,
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const syspromptCommand = require('../commands/config/sysprompt');
|
||||
|
||||
describe('sysprompt command', () => {
|
||||
let mockInteraction: {
|
||||
user: { id: string };
|
||||
options: { getString: jest.Mock };
|
||||
reply: jest.Mock;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env.ADMIN = '123456789012345678';
|
||||
mockInteraction = {
|
||||
user: { id: '123456789012345678' },
|
||||
options: { getString: jest.fn() },
|
||||
reply: jest.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
it('should have correct command data structure', () => {
|
||||
expect(syspromptCommand.data).toBeDefined();
|
||||
expect(syspromptCommand.execute).toBeDefined();
|
||||
expect(syspromptCommand.state).toBeDefined();
|
||||
});
|
||||
|
||||
it('should reject non-admin users', async () => {
|
||||
mockInteraction.user = { id: 'unauthorized-user' };
|
||||
|
||||
await syspromptCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
'You are not authorized to change model settings'
|
||||
);
|
||||
});
|
||||
|
||||
it('should return current sysprompt for admin users', async () => {
|
||||
mockInteraction.options.getString.mockReturnValue(null);
|
||||
|
||||
await syspromptCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalled();
|
||||
const replyContent = mockInteraction.reply.mock.calls[0][0];
|
||||
expect(replyContent.content).toContain('Current system prompt');
|
||||
});
|
||||
|
||||
it('should handle unknown prompt name gracefully', async () => {
|
||||
mockInteraction.options.getString.mockReturnValue('nonexistent_prompt');
|
||||
|
||||
await syspromptCommand.execute(mockInteraction);
|
||||
|
||||
const replyContent = mockInteraction.reply.mock.calls[0][0];
|
||||
expect(replyContent.content).toContain('not found');
|
||||
});
|
||||
|
||||
it('state function should return sysprompt content', () => {
|
||||
const state = syspromptCommand.state();
|
||||
expect(typeof state).toBe('string');
|
||||
expect(state.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
94
discord/__tests__/tts.test.ts
Normal file
94
discord/__tests__/tts.test.ts
Normal file
@@ -0,0 +1,94 @@
|
||||
/**
|
||||
* Tests for commands/tts/tts.ts
|
||||
*/
|
||||
|
||||
jest.mock('discord.js', () => {
|
||||
const actual = jest.requireActual('discord.js');
|
||||
return {
|
||||
...actual,
|
||||
SlashCommandBuilder: jest.fn().mockImplementation(() => ({
|
||||
setName: jest.fn().mockReturnThis(),
|
||||
setDescription: jest.fn().mockReturnThis(),
|
||||
addStringOption: jest.fn().mockReturnThis(),
|
||||
})),
|
||||
AttachmentBuilder: jest.fn().mockImplementation((buffer, options) => ({
|
||||
buffer,
|
||||
name: options?.name,
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('../util', () => {
|
||||
const actual = jest.requireActual('../util');
|
||||
return {
|
||||
...actual,
|
||||
requestTTSResponse: jest.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('../../logging', () => ({
|
||||
logError: jest.fn(),
|
||||
logInfo: jest.fn(),
|
||||
logWarn: jest.fn(),
|
||||
}));
|
||||
|
||||
const ttsCommand = require('../commands/tts/tts');
|
||||
const { requestTTSResponse } = require('../util');
|
||||
|
||||
describe('tts command', () => {
|
||||
let mockInteraction: {
|
||||
options: { getString: jest.Mock };
|
||||
reply: jest.Mock;
|
||||
editReply: jest.Mock;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockInteraction = {
|
||||
options: { getString: jest.fn() },
|
||||
reply: jest.fn(),
|
||||
editReply: jest.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
it('should have correct command data structure', () => {
|
||||
expect(ttsCommand.data).toBeDefined();
|
||||
expect(ttsCommand.data.setName).toBeDefined();
|
||||
expect(ttsCommand.execute).toBeDefined();
|
||||
expect(ttsCommand.config).toBeDefined();
|
||||
});
|
||||
|
||||
it('should generate TTS audio for valid text', async () => {
|
||||
mockInteraction.options.getString.mockReturnValue('Hello world');
|
||||
requestTTSResponse.mockResolvedValue({
|
||||
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
|
||||
});
|
||||
|
||||
await ttsCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
expect.stringContaining('generating audio for')
|
||||
);
|
||||
expect(requestTTSResponse).toHaveBeenCalledWith('Hello world');
|
||||
expect(mockInteraction.editReply).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle TTS generation errors', async () => {
|
||||
mockInteraction.options.getString.mockReturnValue('Hello world');
|
||||
requestTTSResponse.mockRejectedValue(new Error('TTS failed'));
|
||||
|
||||
await ttsCommand.execute(mockInteraction);
|
||||
|
||||
expect(mockInteraction.reply).toHaveBeenCalledWith(
|
||||
expect.stringContaining('generating audio for')
|
||||
);
|
||||
expect(mockInteraction.editReply).toHaveBeenCalledWith(expect.stringContaining('Error:'));
|
||||
});
|
||||
|
||||
it('should include TTS configuration', () => {
|
||||
expect(ttsCommand.config).toBeDefined();
|
||||
expect(ttsCommand.config.ttsSettings).toBeDefined();
|
||||
expect(ttsCommand.config.ttsSettings.pitch_change_oct).toBeDefined();
|
||||
expect(ttsCommand.config.ttsSettings.pitch_change_sem).toBeDefined();
|
||||
});
|
||||
});
|
||||
121
discord/__tests__/util.test.ts
Normal file
121
discord/__tests__/util.test.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
/**
|
||||
* Tests for util.ts
|
||||
* Tests database operations, reaction handling, and message serialization
|
||||
*/
|
||||
|
||||
import { MessageReaction, User, Message, Attachment } from 'discord.js';
|
||||
import { openDb, recordReaction, serializeMessageHistory, REAL_NAMES } from '../util';
|
||||
|
||||
// Mock discord.js
|
||||
jest.mock('discord.js', () => {
|
||||
const actual = jest.requireActual('discord.js');
|
||||
return {
|
||||
...actual,
|
||||
MessageReaction: jest.fn(),
|
||||
User: jest.fn(),
|
||||
Message: jest.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
// Mock sqlite
|
||||
jest.mock('sqlite', () => ({
|
||||
open: jest.fn(() =>
|
||||
Promise.resolve({
|
||||
get: jest.fn(),
|
||||
run: jest.fn(),
|
||||
all: jest.fn(),
|
||||
close: jest.fn(),
|
||||
})
|
||||
),
|
||||
}));
|
||||
|
||||
jest.mock('sqlite3', () => ({
|
||||
Database: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('util.ts', () => {
|
||||
beforeAll(async () => {
|
||||
await openDb();
|
||||
});
|
||||
|
||||
describe('REAL_NAMES', () => {
|
||||
it('should contain expected username mappings', () => {
|
||||
expect(REAL_NAMES.vinso1445).toBe('Vincent Iannelli');
|
||||
expect(REAL_NAMES.scoliono).toBe('James Shiffer');
|
||||
expect(REAL_NAMES.gnuwu).toBe('David Zheng');
|
||||
});
|
||||
|
||||
it('should include Hatsune Miku', () => {
|
||||
expect(REAL_NAMES['Hatsune Miku']).toBe('Hatsune Miku');
|
||||
});
|
||||
});
|
||||
|
||||
describe('serializeMessageHistory', () => {
|
||||
it('should return undefined for messages without content', async () => {
|
||||
const mockMessage = {
|
||||
cleanContent: '',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
type: 0,
|
||||
reactions: { cache: new Map() },
|
||||
} as unknown as Message;
|
||||
|
||||
const result = await serializeMessageHistory(mockMessage);
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should serialize a valid message with content', async () => {
|
||||
const mockDate = new Date('2024-01-01T00:00:00Z');
|
||||
const mockMessage = {
|
||||
cleanContent: 'Hello, world!',
|
||||
createdAt: mockDate,
|
||||
author: { username: 'testuser' },
|
||||
type: 0,
|
||||
reactions: { cache: new Map() },
|
||||
} as unknown as Message;
|
||||
|
||||
const result = await serializeMessageHistory(mockMessage);
|
||||
|
||||
expect(result).toBeDefined();
|
||||
expect(result?.content).toBe('Hello, world!');
|
||||
expect(result?.author).toBe('testuser');
|
||||
expect(result?.timestamp).toBe(mockDate.toUTCString());
|
||||
});
|
||||
|
||||
it('should include real name if available', async () => {
|
||||
const mockMessage = {
|
||||
cleanContent: 'Test message',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'vinso1445' },
|
||||
type: 0,
|
||||
reactions: { cache: new Map() },
|
||||
} as unknown as Message;
|
||||
|
||||
const result = await serializeMessageHistory(mockMessage);
|
||||
|
||||
expect(result?.name).toBe('Vincent Iannelli');
|
||||
});
|
||||
|
||||
it('should serialize reactions', async () => {
|
||||
const mockReaction = {
|
||||
emoji: { name: '👍' },
|
||||
count: 5,
|
||||
};
|
||||
const mockMessage = {
|
||||
cleanContent: 'Test',
|
||||
createdAt: new Date(),
|
||||
author: { username: 'testuser' },
|
||||
type: 0,
|
||||
reactions: {
|
||||
cache: new Map([['reaction1', mockReaction]]),
|
||||
},
|
||||
} as unknown as Message;
|
||||
|
||||
const result = await serializeMessageHistory(mockMessage);
|
||||
|
||||
expect(result?.reactions).toBeDefined();
|
||||
expect(result?.reactions).toContain(':+1:');
|
||||
expect(result?.reactions).toContain('(5)');
|
||||
});
|
||||
});
|
||||
});
|
||||
238
discord/bot.ts
238
discord/bot.ts
@@ -23,7 +23,7 @@ import {
|
||||
} from 'discord.js';
|
||||
import fs = require('node:fs');
|
||||
import path = require('node:path');
|
||||
import fetch from 'node-fetch';
|
||||
import fetch, { Blob as NodeFetchBlob } from 'node-fetch';
|
||||
import FormData = require('form-data');
|
||||
import tmp = require('tmp');
|
||||
import { JSDOM } from 'jsdom';
|
||||
@@ -40,7 +40,7 @@ import {
|
||||
} from './util';
|
||||
import 'dotenv/config';
|
||||
import { LLMConfig } from './commands/types';
|
||||
import { LLMProvider } from './provider/provider';
|
||||
import { LLMProvider, StreamingChunk } from './provider/provider';
|
||||
|
||||
interface State {
|
||||
llmconf?(): LLMConfig;
|
||||
@@ -49,6 +49,58 @@ interface State {
|
||||
}
|
||||
const state: State = {};
|
||||
|
||||
/**
|
||||
* Parse loading emojis from environment variable
|
||||
* Format: "<:clueless:123>,<a:hachune:456>,..."
|
||||
*/
|
||||
function parseLoadingEmojis(): string[] {
|
||||
const emojiStr = process.env.LOADING_EMOJIS || '';
|
||||
if (!emojiStr.trim()) {
|
||||
// Default fallback emojis if not configured
|
||||
return ['🤔', '✨', '🎵'];
|
||||
}
|
||||
return emojiStr
|
||||
.split(',')
|
||||
.map((e) => e.trim())
|
||||
.filter((e) => e.length > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pick a random loading emoji from the configured list
|
||||
*/
|
||||
function getRandomLoadingEmoji(): string {
|
||||
const emojis = parseLoadingEmojis();
|
||||
return emojis[Math.floor(Math.random() * emojis.length)];
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the loading message with emoji and reasoning content
|
||||
*/
|
||||
function formatLoadingMessage(emoji: string, reasoning: string): string {
|
||||
const kawaiiPhrases = [
|
||||
'Hmm... let me think~ ♪',
|
||||
'Processing nyaa~',
|
||||
'Miku is thinking...',
|
||||
'Calculating with magic ✨',
|
||||
'Pondering desu~',
|
||||
'Umm... one moment! ♪',
|
||||
'Brain go brrr~',
|
||||
'Assembling thoughts... ♪',
|
||||
'Loading Miku-brain...',
|
||||
'Thinking hard senpai~',
|
||||
];
|
||||
const phrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
|
||||
|
||||
let content = `${emoji} ${phrase}`;
|
||||
if (reasoning && reasoning.trim().length > 0) {
|
||||
// Truncate reasoning if too long for display
|
||||
const displayReasoning =
|
||||
reasoning.length > 500 ? reasoning.slice(0, 500) + '...' : reasoning;
|
||||
content += `\n\n> ${displayReasoning}`;
|
||||
}
|
||||
return content;
|
||||
}
|
||||
|
||||
interface CommandClient extends Client {
|
||||
commands?: Collection<
|
||||
string,
|
||||
@@ -75,6 +127,8 @@ client.once(Events.ClientReady, async () => {
|
||||
const emojiName = emojiConfig.includes(':') ? emojiConfig.split(':')[1] : emojiConfig;
|
||||
logInfo(`[bot] util: reaction_${i + 1} = ${emojiName}`);
|
||||
}
|
||||
const loadingEmojis = parseLoadingEmojis();
|
||||
logInfo(`[bot] Loaded ${loadingEmojis.length} loading emojis: ${loadingEmojis.join(', ')}`);
|
||||
});
|
||||
|
||||
async function onMessageReactionChanged(
|
||||
@@ -176,21 +230,88 @@ async function onNewMessage(message: Message) {
|
||||
const cleanHistoryList = [...historyMessages, message];
|
||||
|
||||
try {
|
||||
if ('sendTyping' in message.channel) {
|
||||
await message.channel.sendTyping();
|
||||
}
|
||||
// Pick a random loading emoji for this generation
|
||||
const loadingEmoji = getRandomLoadingEmoji();
|
||||
|
||||
const response = await state.provider!().requestLLMResponse(
|
||||
cleanHistoryList,
|
||||
state.sysprompt!(),
|
||||
state.llmconf!()
|
||||
);
|
||||
// evaluate response
|
||||
if (!isGoodResponse(response)) {
|
||||
logWarn(`[bot] Burning bad response: "${response}"`);
|
||||
return;
|
||||
// Send initial loading message
|
||||
const loadingMsg = await message.reply(formatLoadingMessage(loadingEmoji, ''));
|
||||
|
||||
// Check if provider supports streaming
|
||||
const provider = state.provider!();
|
||||
if (provider.requestLLMResponseStreaming) {
|
||||
// Use streaming with reasoning updates
|
||||
let lastUpdateTime = Date.now();
|
||||
const updateIntervalMs = 3000; // Update every ~3 seconds
|
||||
let latestReasoning = '';
|
||||
let finalContent = '';
|
||||
|
||||
try {
|
||||
const stream = provider.requestLLMResponseStreaming(
|
||||
cleanHistoryList,
|
||||
state.sysprompt!(),
|
||||
state.llmconf!()
|
||||
);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
// Update reasoning if present
|
||||
if (chunk.reasoning) {
|
||||
latestReasoning = chunk.reasoning;
|
||||
}
|
||||
|
||||
// Track final content
|
||||
if (chunk.content) {
|
||||
finalContent = chunk.content;
|
||||
}
|
||||
|
||||
// Update message periodically (only if reasoning changed and interval passed)
|
||||
const now = Date.now();
|
||||
if (latestReasoning && now - lastUpdateTime >= updateIntervalMs) {
|
||||
await loadingMsg.edit(formatLoadingMessage(loadingEmoji, latestReasoning));
|
||||
lastUpdateTime = now;
|
||||
}
|
||||
}
|
||||
|
||||
// Generation complete - check if we got stuck in reasoning
|
||||
if (latestReasoning && !isGoodResponse(finalContent)) {
|
||||
// Token budget exhausted during reasoning, never produced final content
|
||||
const errorMsg = 'Oops! I thought so hard I ran out of tokens... owo';
|
||||
logError(
|
||||
`[bot] Token budget exhausted during reasoning! Reasoning length: ${latestReasoning.length} chars, no final content produced.`
|
||||
);
|
||||
// Show the end of the reasoning trace (where it got stuck)
|
||||
const reasoningTail =
|
||||
latestReasoning.length > 300
|
||||
? '...' + latestReasoning.slice(-300)
|
||||
: latestReasoning;
|
||||
await loadingMsg.edit(
|
||||
`${loadingEmoji} ${errorMsg}\n\n*Reasoning trace (end):*\n> ${reasoningTail}`
|
||||
);
|
||||
} else if (isGoodResponse(finalContent)) {
|
||||
// Success - edit message with final response (no reasoning)
|
||||
await loadingMsg.edit(finalContent);
|
||||
} else {
|
||||
logWarn(`[bot] Burning bad response: "${finalContent}"`);
|
||||
await loadingMsg.delete();
|
||||
}
|
||||
} catch (streamErr) {
|
||||
logError(`[bot] Streaming error: ${streamErr}`);
|
||||
await loadingMsg.edit('Oops! Something went wrong while I was thinking... owo');
|
||||
}
|
||||
} else {
|
||||
// Fallback to non-streaming method
|
||||
const response = await provider.requestLLMResponse(
|
||||
cleanHistoryList,
|
||||
state.sysprompt!(),
|
||||
state.llmconf!()
|
||||
);
|
||||
|
||||
if (isGoodResponse(response)) {
|
||||
await loadingMsg.edit(response);
|
||||
} else {
|
||||
logWarn(`[bot] Burning bad response: "${response}"`);
|
||||
await loadingMsg.delete();
|
||||
}
|
||||
}
|
||||
await message.reply(response);
|
||||
} catch (err) {
|
||||
logError(`[bot] Error while generating LLM response: ${err}`);
|
||||
}
|
||||
@@ -209,14 +330,14 @@ async function fetchMotd() {
|
||||
}
|
||||
}
|
||||
|
||||
async function requestRVCResponse(src: Attachment): Promise<Blob> {
|
||||
async function requestRVCResponse(src: Attachment): Promise<NodeFetchBlob> {
|
||||
logInfo(`[bot] Downloading audio message ${src.url}`);
|
||||
const srcres = await fetch(src.url);
|
||||
const srcbuf = await srcres.arrayBuffer();
|
||||
const tmpFile = tmp.fileSync();
|
||||
const tmpFileName = tmpFile.name;
|
||||
fs.writeFileSync(tmpFileName, Buffer.from(srcbuf));
|
||||
logInfo(`[bot] Got audio file: ${srcbuf.size} bytes`);
|
||||
logInfo(`[bot] Got audio file: ${srcbuf.byteLength} bytes`);
|
||||
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('token', process.env.LLM_TOKEN || '');
|
||||
@@ -224,7 +345,7 @@ async function requestRVCResponse(src: Attachment): Promise<Blob> {
|
||||
const fd = new FormData();
|
||||
fd.append('file', fs.readFileSync(tmpFileName), 'voice-message.ogg');
|
||||
|
||||
const rvcEndpoint = `${process.env.LLM_HOST}/rvc?${queryParams.toString()}`;
|
||||
const rvcEndpoint = `${process.env.RVC_HOST}/rvc?${queryParams.toString()}`;
|
||||
logInfo(`[bot] Requesting RVC response for ${src.id}`);
|
||||
const res = await fetch(rvcEndpoint, {
|
||||
method: 'POST',
|
||||
@@ -447,26 +568,23 @@ async function scheduleBiggestLoser(firstTime = false) {
|
||||
|
||||
if (biggestLosers.length > 0) {
|
||||
biggestLosers.sort();
|
||||
let streakCount = 1;
|
||||
const streakFile = path.join(__dirname, 'biggest_loser_streak.json');
|
||||
// Track individual streaks per person
|
||||
const streakFile = path.join(__dirname, 'biggest_loser_streaks.json');
|
||||
let streaks: Record<string, number> = {};
|
||||
if (fs.existsSync(streakFile)) {
|
||||
try {
|
||||
const streakData = JSON.parse(fs.readFileSync(streakFile, 'utf8'));
|
||||
const prevNames = Array.isArray(streakData.names)
|
||||
? streakData.names
|
||||
: [streakData.name];
|
||||
prevNames.sort();
|
||||
if (JSON.stringify(prevNames) === JSON.stringify(biggestLosers)) {
|
||||
streakCount = streakData.count + 1;
|
||||
}
|
||||
streaks = JSON.parse(fs.readFileSync(streakFile, 'utf8'));
|
||||
} catch (e) {
|
||||
logWarn(`[bot] Failed to read streak data: ${e}`);
|
||||
streaks = {};
|
||||
}
|
||||
}
|
||||
fs.writeFileSync(
|
||||
streakFile,
|
||||
JSON.stringify({ names: biggestLosers, count: streakCount })
|
||||
);
|
||||
// Update streaks: continue if this person was in yesterday's losers, otherwise reset to 1
|
||||
const newStreaks: Record<string, number> = {};
|
||||
for (const name of biggestLosers) {
|
||||
newStreaks[name] = (streaks[name] || 0) + 1;
|
||||
}
|
||||
fs.writeFileSync(streakFile, JSON.stringify(newStreaks));
|
||||
|
||||
const firstNames = biggestLosers.map((n) => n.split(' ')[0]);
|
||||
let joinedNames = firstNames[0];
|
||||
@@ -476,19 +594,56 @@ async function scheduleBiggestLoser(firstTime = false) {
|
||||
joinedNames = `${firstNames.slice(0, -1).join(', ')}, and ${firstNames[firstNames.length - 1]}`;
|
||||
}
|
||||
|
||||
const isAre = biggestLosers.length > 1 ? 'are' : 'is';
|
||||
const theyHave = biggestLosers.length > 1 ? 'They have' : 'They have';
|
||||
let declaration = `The biggest loser(s) of yesterday ${isAre} ${joinedNames} with only ${minCount} messages! ${theyHave} been the biggest loser(s) for ${streakCount} day(s) in a row.`;
|
||||
// Build message with individual streak info
|
||||
const isPlural = biggestLosers.length > 1;
|
||||
const loserWord = isPlural ? 'losers' : 'loser';
|
||||
const isAre = isPlural ? 'are' : 'is';
|
||||
let declaration: string;
|
||||
if (isPlural) {
|
||||
// For multiple losers, list each with their streak
|
||||
const streakParts = biggestLosers.map((name, idx) => {
|
||||
const firstName = firstNames[idx];
|
||||
const dayWord = newStreaks[name] === 1 ? 'day' : 'days';
|
||||
return `${firstName} (${newStreaks[name]} ${dayWord} in a row)`;
|
||||
});
|
||||
let streakDetails = streakParts[0];
|
||||
if (streakParts.length === 2) {
|
||||
streakDetails = `${streakParts[0]} and ${streakParts[1]}`;
|
||||
} else if (streakParts.length > 2) {
|
||||
streakDetails = `${streakParts.slice(0, -1).join(', ')}, and ${streakParts[streakParts.length - 1]}`;
|
||||
}
|
||||
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! Streaks: ${streakDetails}.`;
|
||||
} else {
|
||||
const dayWord = newStreaks[biggestLosers[0]] === 1 ? 'day' : 'days';
|
||||
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! They have been the biggest ${loserWord} for ${newStreaks[biggestLosers[0]]} ${dayWord} in a row.`;
|
||||
}
|
||||
|
||||
try {
|
||||
let pingTags: string[] = [];
|
||||
if (guild) {
|
||||
const members = await guild.members.fetch();
|
||||
// Build a reverse map from real name to Discord user IDs
|
||||
const realNameToUserIds = new Map<string, string[]>();
|
||||
for (const [username, realName] of Object.entries(REAL_NAMES)) {
|
||||
if (!realNameToUserIds.has(realName)) {
|
||||
realNameToUserIds.set(realName, []);
|
||||
}
|
||||
realNameToUserIds.get(realName)!.push(username);
|
||||
}
|
||||
|
||||
// Fetch members for the usernames we need to ping
|
||||
const usernamesToCheck = new Set<string>();
|
||||
for (const realName of biggestLosers) {
|
||||
const usernames = realNameToUserIds.get(realName);
|
||||
if (usernames) {
|
||||
usernames.forEach((u) => usernamesToCheck.add(u));
|
||||
}
|
||||
}
|
||||
|
||||
// Try to fetch members (with a shorter timeout to avoid hanging)
|
||||
const members = await guild.members.fetch({ time: 10000 });
|
||||
for (const [_, member] of members) {
|
||||
const realName = (REAL_NAMES as any)[member.user.username];
|
||||
if (realName && biggestLosers.includes(realName)) {
|
||||
// Make sure we only add one ping per real name if multiple accounts map to the same name
|
||||
// Actually it doesn't hurt to ping both, but checking uniqueness is nice:
|
||||
const username = member.user.username;
|
||||
if (usernamesToCheck.has(username)) {
|
||||
const tag = `<@${member.user.id}>`;
|
||||
if (!pingTags.includes(tag)) {
|
||||
pingTags.push(tag);
|
||||
@@ -505,6 +660,9 @@ async function scheduleBiggestLoser(firstTime = false) {
|
||||
|
||||
logInfo(`[bot] Declaring biggest loser: ${declaration}`);
|
||||
await channel.send(declaration);
|
||||
await channel.send(
|
||||
'https://tenor.com/view/klajumas-spit-skreplis-klajumas-skreplis-gif-13538828554330887910'
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
logError(`[bot] Error finding biggest loser: ${err}`);
|
||||
|
||||
@@ -4,11 +4,11 @@
|
||||
|
||||
create table messages
|
||||
(
|
||||
id integer
|
||||
id text
|
||||
constraint messages_pk
|
||||
primary key,
|
||||
guild integer not null,
|
||||
channel integer not null,
|
||||
guild text not null,
|
||||
channel text not null,
|
||||
author text not null,
|
||||
content text,
|
||||
reaction_1_count integer not null default 0,
|
||||
|
||||
4023
discord/package-lock.json
generated
4023
discord/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,11 @@
|
||||
"tmp": "^0.2.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/jest": "^29.5.12",
|
||||
"@types/node-fetch": "^2.6.11",
|
||||
"jest": "^29.7.0",
|
||||
"prettier": "^3.5.3",
|
||||
"ts-jest": "^29.1.2",
|
||||
"typescript": "^5.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
@@ -28,6 +32,8 @@
|
||||
"sync": "npm run build && node sync.js",
|
||||
"deploy": "npm run build && node deploy.js",
|
||||
"format": "prettier --write .",
|
||||
"format:check": "prettier --check ."
|
||||
"format:check": "prettier --check .",
|
||||
"test": "jest",
|
||||
"test:watch": "jest --watch"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,4 +142,73 @@ export class HuggingfaceProvider implements LLMProvider {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async *requestLLMResponseStreaming(
|
||||
history: Message[],
|
||||
sysprompt: string,
|
||||
params: LLMConfig
|
||||
): AsyncGenerator<{ reasoning?: string; content?: string; done?: boolean }, string, unknown> {
|
||||
let messageList = await Promise.all(history.map(serializeMessageHistory));
|
||||
messageList = messageList.filter((x) => !!x);
|
||||
|
||||
if (messageList.length === 0) {
|
||||
throw new TypeError('No messages with content provided in history!');
|
||||
}
|
||||
|
||||
const lastMsg = messageList[messageList.length - 1];
|
||||
let newDate = new Date(lastMsg!.timestamp);
|
||||
newDate.setSeconds(newDate.getSeconds() + 5);
|
||||
|
||||
let templateMsgTxt = JSON.stringify({
|
||||
timestamp: newDate.toUTCString(),
|
||||
author: 'Hatsune Miku',
|
||||
name: 'Hatsune Miku',
|
||||
context: lastMsg!.content,
|
||||
content: '...',
|
||||
});
|
||||
|
||||
const messageHistoryTxt =
|
||||
messageList.map((msg) => JSON.stringify(msg)).join('\n') + '\n' + templateMsgTxt;
|
||||
logInfo(`[hf] Requesting streaming response for message history: ${messageHistoryTxt}`);
|
||||
|
||||
try {
|
||||
const stream = this.client.chatCompletionStream({
|
||||
model: this.model,
|
||||
messages: [
|
||||
{ role: 'system', content: sysprompt },
|
||||
{ role: 'user', content: USER_PROMPT + messageHistoryTxt },
|
||||
],
|
||||
temperature: params?.temperature || 0.5,
|
||||
top_p: params?.top_p || 0.9,
|
||||
max_tokens: params?.max_new_tokens || 128,
|
||||
});
|
||||
|
||||
let fullContent = '';
|
||||
let reasoningContent = '';
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const delta = chunk.choices?.[0]?.delta?.content || '';
|
||||
|
||||
// Handle reasoning content if present
|
||||
if (
|
||||
'reasoning_content' in chunk.choices?.[0]?.delta &&
|
||||
chunk.choices[0].delta.reasoning_content
|
||||
) {
|
||||
reasoningContent += chunk.choices[0].delta.reasoning_content;
|
||||
yield { reasoning: reasoningContent };
|
||||
}
|
||||
|
||||
if (delta) {
|
||||
fullContent += delta;
|
||||
yield { content: fullContent };
|
||||
}
|
||||
}
|
||||
|
||||
logInfo(`[hf] Streaming API response: ${fullContent}`);
|
||||
return fullContent;
|
||||
} catch (err) {
|
||||
logError(`[hf] Streaming API Error: ` + err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,4 +97,66 @@ export class OllamaProvider implements LLMProvider {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async *requestLLMResponseStreaming(
|
||||
history: Message[],
|
||||
sysprompt: string,
|
||||
params: LLMConfig
|
||||
): AsyncGenerator<{ reasoning?: string; content?: string; done?: boolean }, string, unknown> {
|
||||
let messageList = await Promise.all(history.map(serializeMessageHistory));
|
||||
messageList = messageList.filter((x) => !!x);
|
||||
|
||||
if (messageList.length === 0) {
|
||||
throw new TypeError('No messages with content provided in history!');
|
||||
}
|
||||
|
||||
const lastMsg = messageList[messageList.length - 1];
|
||||
let newDate = new Date(lastMsg!.timestamp);
|
||||
newDate.setSeconds(newDate.getSeconds() + 5);
|
||||
|
||||
let templateMsgTxt = JSON.stringify({
|
||||
timestamp: newDate.toUTCString(),
|
||||
author: 'Hatsune Miku',
|
||||
name: 'Hatsune Miku',
|
||||
context: lastMsg!.content,
|
||||
content: '...',
|
||||
});
|
||||
|
||||
const messageHistoryTxt =
|
||||
messageList.map((msg) => JSON.stringify(msg)).join('\n') + '\n' + templateMsgTxt;
|
||||
logInfo(`[ollama] Requesting streaming response for message history: ${messageHistoryTxt}`);
|
||||
|
||||
try {
|
||||
const stream = await this.client.chat({
|
||||
model: this.model,
|
||||
messages: [
|
||||
{ role: 'system', content: sysprompt },
|
||||
{ role: 'user', content: USER_PROMPT + messageHistoryTxt },
|
||||
],
|
||||
stream: true,
|
||||
options: {
|
||||
temperature: params?.temperature || 0.5,
|
||||
top_p: params?.top_p || 0.9,
|
||||
num_predict: params?.max_new_tokens || 128,
|
||||
},
|
||||
});
|
||||
|
||||
let fullContent = '';
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const messageContent = chunk.message?.content || '';
|
||||
|
||||
if (messageContent) {
|
||||
fullContent += messageContent;
|
||||
yield { content: fullContent };
|
||||
}
|
||||
}
|
||||
|
||||
logInfo(`[ollama] Streaming API response: ${fullContent}`);
|
||||
return fullContent;
|
||||
} catch (err) {
|
||||
logError(`[ollama] Streaming API Error: ` + err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,19 +86,91 @@ export class OpenAIProvider implements LLMProvider {
|
||||
});
|
||||
|
||||
let content = response.choices[0].message.content;
|
||||
if (!content) {
|
||||
throw new TypeError('OpenAI API returned no message.');
|
||||
}
|
||||
if (content.lastIndexOf('</think>') > -1) {
|
||||
content = content.slice(content.lastIndexOf('</think>') + 8);
|
||||
}
|
||||
logInfo(`[openai] API response: ${content}`);
|
||||
|
||||
if (!content) {
|
||||
throw new TypeError('OpenAI API returned no message.');
|
||||
}
|
||||
|
||||
return content;
|
||||
} catch (err) {
|
||||
logError(`[openai] API Error: ` + err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async *requestLLMResponseStreaming(
|
||||
history: Message[],
|
||||
sysprompt: string,
|
||||
params: LLMConfig
|
||||
): AsyncGenerator<{ reasoning?: string; content?: string; done?: boolean }, string, unknown> {
|
||||
let messageList = await Promise.all(history.map(serializeMessageHistory));
|
||||
messageList = messageList.filter((x) => !!x);
|
||||
|
||||
if (messageList.length === 0) {
|
||||
throw new TypeError('No messages with content provided in history!');
|
||||
}
|
||||
|
||||
const lastMsg = messageList[messageList.length - 1];
|
||||
let newDate = new Date(lastMsg!.timestamp);
|
||||
newDate.setSeconds(newDate.getSeconds() + 5);
|
||||
|
||||
let templateMsgTxt = JSON.stringify({
|
||||
timestamp: newDate.toUTCString(),
|
||||
author: 'Hatsune Miku',
|
||||
name: 'Hatsune Miku',
|
||||
context: lastMsg!.content,
|
||||
content: '...',
|
||||
});
|
||||
|
||||
const messageHistoryTxt =
|
||||
messageList.map((msg) => JSON.stringify(msg)).join('\n') + '\n' + templateMsgTxt;
|
||||
logInfo(`[openai] Requesting streaming response for message history: ${messageHistoryTxt}`);
|
||||
|
||||
try {
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: [
|
||||
{ role: 'system', content: sysprompt },
|
||||
{ role: 'user', content: USER_PROMPT + messageHistoryTxt },
|
||||
],
|
||||
temperature: params?.temperature || 0.5,
|
||||
top_p: params?.top_p || 0.9,
|
||||
max_tokens: params?.max_new_tokens || 128,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
let fullContent = '';
|
||||
let reasoningContent = '';
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const delta = chunk.choices[0]?.delta;
|
||||
|
||||
// Handle reasoning content if present (some models include it)
|
||||
if ('reasoning_content' in delta && delta.reasoning_content) {
|
||||
reasoningContent += delta.reasoning_content;
|
||||
yield { reasoning: reasoningContent };
|
||||
}
|
||||
|
||||
// Handle regular content
|
||||
if (delta.content) {
|
||||
fullContent += delta.content;
|
||||
yield { content: fullContent };
|
||||
}
|
||||
}
|
||||
|
||||
// Strip </think> tags if present
|
||||
if (fullContent.lastIndexOf('</think>') > -1) {
|
||||
fullContent = fullContent.slice(fullContent.lastIndexOf('</think>') + 8);
|
||||
}
|
||||
|
||||
logInfo(`[openai] Streaming API response: ${fullContent}`);
|
||||
return fullContent;
|
||||
} catch (err) {
|
||||
logError(`[openai] Streaming API Error: ` + err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
import { Message } from 'discord.js';
|
||||
import { LLMConfig } from '../commands/types';
|
||||
|
||||
export interface StreamingChunk {
|
||||
reasoning?: string;
|
||||
content?: string;
|
||||
done?: boolean;
|
||||
}
|
||||
|
||||
export interface LLMProvider {
|
||||
name(): string;
|
||||
requestLLMResponse(history: Message[], sysprompt: string, params: LLMConfig): Promise<string>;
|
||||
setModel(id: string);
|
||||
requestLLMResponseStreaming?(
|
||||
history: Message[],
|
||||
sysprompt: string,
|
||||
params: LLMConfig
|
||||
): AsyncGenerator<StreamingChunk, string, unknown>;
|
||||
setModel(id: string): void;
|
||||
}
|
||||
|
||||
export interface LLMDiscordMessage {
|
||||
|
||||
12
discord/tsconfig.json
Normal file
12
discord/tsconfig.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"target": "es2020",
|
||||
"sourceMap": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"types": ["jest", "node"]
|
||||
},
|
||||
"exclude": ["node_modules", "__tests__"]
|
||||
}
|
||||
@@ -18,7 +18,7 @@ import { get as httpGet } from 'https';
|
||||
import { Database, open } from 'sqlite';
|
||||
import { Database as Database3 } from 'sqlite3';
|
||||
import 'dotenv/config';
|
||||
import fetch from 'node-fetch';
|
||||
import fetch, { Blob as NodeFetchBlob } from 'node-fetch';
|
||||
import { logError, logInfo, logWarn } from '../logging';
|
||||
import { ScoreboardMessageRow } from '../models';
|
||||
import { LLMDiscordMessage } from './provider/provider';
|
||||
@@ -258,7 +258,7 @@ async function sync(guilds: GuildManager) {
|
||||
}
|
||||
}
|
||||
|
||||
async function requestTTSResponse(txt: string): Promise<Blob> {
|
||||
async function requestTTSResponse(txt: string): Promise<NodeFetchBlob> {
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('token', process.env.LLM_TOKEN);
|
||||
queryParams.append('text', txt);
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
*/
|
||||
|
||||
interface ScoreboardMessageRow {
|
||||
id: number;
|
||||
guild: number;
|
||||
channel: number;
|
||||
id: string;
|
||||
guild: string;
|
||||
channel: string;
|
||||
author: string;
|
||||
content: string;
|
||||
reaction_1_count: number;
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
"target": "es2020",
|
||||
"sourceMap": true
|
||||
},
|
||||
"exclude": ["discord/node_modules"]
|
||||
"exclude": ["discord/node_modules", "discord/__tests__"]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user