Test coverage reports; voicemsg command; debug command; refactoring

This commit is contained in:
2026-03-01 18:18:18 -08:00
parent e48e74c20e
commit 15cffb3b66
20 changed files with 3884 additions and 483 deletions

55
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Test and Coverage
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]
jobs:
test:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./discord
strategy:
matrix:
node-version: [18.x, 20.x, 22.x]
steps:
- uses: actions/checkout@v4
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'
cache-dependency-path: discord/package-lock.json
- name: Install dependencies
run: npm ci
- name: Build TypeScript
run: npm run build || echo "Build warnings - continuing with tests"
- name: Run tests with coverage
run: npm run test:ci
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
files: ./discord/coverage/lcov.info
flags: discord-bot
name: discord-bot-coverage
fail_ci_if_error: false
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage artifact
uses: actions/upload-artifact@v4
with:
name: coverage-report-node-${{ matrix.node-version }}
path: discord/coverage/
retention-days: 7

14
discord/.c8rc.json Normal file
View File

@@ -0,0 +1,14 @@
{
"all": true,
"include": ["commands/**/*.js", "provider/**/*.js", "util.js", "bot.js", "logging.js"],
"exclude": ["**/__tests__/**", "**/*.d.ts", "deploy.js", "sync.js", "node_modules"],
"reporter": ["text", "lcov", "html"],
"reportsDirectory": "./coverage",
"tempDirectory": "./coverage/tmp",
"clean": true,
"check-coverage": true,
"lines": 40,
"functions": 40,
"branches": 40,
"statements": 40
}

View File

@@ -1,9 +1,11 @@
TOKEN="sadkfl;jasdkl;fj"
REACTIONS="💀,💯,😭,<:based:1178222955830968370>,<:this:1171632205924151387>"
CLIENT="123456789012345678"
GUILD="123456789012345678"
ADMIN="123456789012345678"
# Comma-separated list of guild IDs to count reactions from
REACTION_GUILDS="123456789012345678,876543210987654321"
# Custom emojis for loading states (format: <a:name:id> or <:name:id>)
LOADING_EMOJIS="<:clueless:1476853248135790643>,<a:hachune:1476838658169503878>,<a:chairspin:1476838586929119234>,<a:nekodance:1476838199019049056>"

2
discord/.gitignore vendored
View File

@@ -1 +1,3 @@
db.sqlite
biggest_loser_streaks.json

8
discord/.prettierignore Normal file
View File

@@ -0,0 +1,8 @@
node_modules
dist
build
*.js
*.d.ts
coverage
.vscode
.idea

8
discord/.prettierrc.json Normal file
View File

@@ -0,0 +1,8 @@
{
"semi": true,
"singleQuote": true,
"tabWidth": 4,
"trailingComma": "es5",
"printWidth": 100,
"arrowParens": "always"
}

View File

@@ -29,42 +29,16 @@ jest.mock('fs', () => ({
existsSync: jest.fn(),
}));
// Mock environment variables
const mockEnv = {
LOADING_EMOJIS: '<:clueless:123>,<a:hachune:456>,<a:chairspin:789>,<a:nekodance:012>',
};
// Helper functions for testing
function parseLoadingEmojis(): string[] {
const emojiStr = mockEnv.LOADING_EMOJIS || '';
if (!emojiStr.trim()) {
return ['🤔', '✨', '🎵'];
}
return emojiStr
.split(',')
.map((e) => e.trim())
.filter((e) => e.length > 0);
}
function getRandomLoadingEmoji(): string {
const emojis = parseLoadingEmojis();
return emojis[Math.floor(Math.random() * emojis.length)];
}
// Import helper functions from shared module
const {
parseLoadingEmojis,
getRandomLoadingEmoji,
KAWAII_PHRASES,
createStatusEmbed,
} = require('../commands/helpers');
function formatLoadingMessage(emoji: string, reasoning: string): string {
const kawaiiPhrases = [
'Hmm... let me think~ ♪',
'Processing nyaa~',
'Miku is thinking...',
'Calculating with magic ✨',
'Pondering desu~',
'Umm... one moment! ♪',
'Brain go brrr~',
'Assembling thoughts... ♪',
'Loading Miku-brain...',
'Thinking hard senpai~',
];
const phrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
const phrase = KAWAII_PHRASES[Math.floor(Math.random() * KAWAII_PHRASES.length)];
let content = `${emoji}\n${phrase}`;
if (reasoning && reasoning.trim().length > 0) {
@@ -191,7 +165,11 @@ describe('bot.ts helper functions', () => {
describe('parseLoadingEmojis', () => {
it('should parse emojis from environment variable', () => {
const original = process.env.LOADING_EMOJIS;
process.env.LOADING_EMOJIS =
'<:clueless:123>,<a:hachune:456>,<a:chairspin:789>,<a:nekodance:012>';
const result = parseLoadingEmojis();
process.env.LOADING_EMOJIS = original;
expect(result).toHaveLength(4);
expect(result).toEqual([
'<:clueless:123>',
@@ -202,18 +180,18 @@ describe('bot.ts helper functions', () => {
});
it('should return default emojis when LOADING_EMOJIS is empty', () => {
const original = mockEnv.LOADING_EMOJIS;
mockEnv.LOADING_EMOJIS = '';
const original = process.env.LOADING_EMOJIS;
process.env.LOADING_EMOJIS = '';
const result = parseLoadingEmojis();
mockEnv.LOADING_EMOJIS = original;
process.env.LOADING_EMOJIS = original;
expect(result).toEqual(['🤔', '✨', '🎵']);
});
it('should handle whitespace in emoji list', () => {
const original = mockEnv.LOADING_EMOJIS;
mockEnv.LOADING_EMOJIS = ' <:test:123> , <a:spin:456> ';
const original = process.env.LOADING_EMOJIS;
process.env.LOADING_EMOJIS = ' <:test:123> , <a:spin:456> ';
const result = parseLoadingEmojis();
mockEnv.LOADING_EMOJIS = original;
process.env.LOADING_EMOJIS = original;
expect(result).toEqual(['<:test:123>', '<a:spin:456>']);
});
});

View File

@@ -344,3 +344,218 @@ describe('OpenAIProvider streaming', () => {
}).rejects.toThrow('No messages with content provided in history!');
});
});
describe('OpenAIProvider structured voice response', () => {
const mockConfig: LLMConfig = {
max_new_tokens: 256,
min_new_tokens: 1,
temperature: 0.7,
top_p: 0.9,
frequency_penalty: 0.0,
presence_penalty: 0.0,
msg_context: 8,
};
beforeEach(() => {
jest.clearAllMocks();
process.env.LLM_TOKEN = 'test-token';
process.env.OPENAI_HOST = 'http://test-host';
mockCreate.mockReset();
});
it('should request structured voice response successfully', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: JSON.stringify({
message: 'Hello! Nice to meet you~ ♪',
instruct: 'Speak cheerfully and energetically',
}),
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
const response = await provider.requestStructuredVoiceResponse(
'Hello Miku!',
'You are Miku',
mockConfig
);
expect(response).toEqual({
message: 'Hello! Nice to meet you~ ♪',
instruct: 'Speak cheerfully and energetically',
});
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
response_format: { type: 'json_object' },
})
);
});
it('should use json_object response format', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: '{"message": "Test", "instruct": "Speak normally"}',
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
await provider.requestStructuredVoiceResponse('Test message', 'You are Miku', mockConfig);
const callArgs = mockCreate.mock.calls[0][0];
expect(callArgs.response_format).toEqual({ type: 'json_object' });
});
it('should handle empty response from API', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: '',
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
await expect(
provider.requestStructuredVoiceResponse('Test', 'You are Miku', mockConfig)
).rejects.toThrow('OpenAI API returned no message.');
});
it('should use default message when message field is missing', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: JSON.stringify({
instruct: 'Speak happily',
}),
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
const response = await provider.requestStructuredVoiceResponse(
'Test',
'You are Miku',
mockConfig
);
expect(response.message).toBe('Hello! I am Miku~ ♪');
expect(response.instruct).toBe('Speak happily');
});
it('should use default instruct when instruct field is missing', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: JSON.stringify({
message: 'Hello there!',
}),
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
const response = await provider.requestStructuredVoiceResponse(
'Test',
'You are Miku',
mockConfig
);
expect(response.message).toBe('Hello there!');
expect(response.instruct).toBe('Speak in a friendly and enthusiastic tone');
});
it('should handle malformed JSON response', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: 'Not valid JSON at all',
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
await expect(
provider.requestStructuredVoiceResponse('Test', 'You are Miku', mockConfig)
).rejects.toThrow();
});
it('should use default parameters when config not provided', async () => {
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: JSON.stringify({
message: 'Response with defaults',
instruct: 'Speak normally',
}),
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
await provider.requestStructuredVoiceResponse('Test', 'You are Miku', {} as LLMConfig);
expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
temperature: 0.7,
top_p: 0.9,
max_tokens: 256,
})
);
});
it('should log API response', async () => {
const { logInfo } = require('../../logging');
mockCreate.mockResolvedValue({
choices: [
{
message: {
content: JSON.stringify({
message: 'Hello!',
instruct: 'Speak happily',
}),
},
},
],
});
const provider = new OpenAIProvider('test-token', 'gpt-4');
await provider.requestStructuredVoiceResponse('Test', 'You are Miku', mockConfig);
expect(logInfo).toHaveBeenCalledWith(expect.stringContaining('Structured API response:'));
});
it('should log errors', async () => {
const { logError } = require('../../logging');
mockCreate.mockRejectedValue(new Error('API error'));
const provider = new OpenAIProvider('test-token', 'gpt-4');
try {
await provider.requestStructuredVoiceResponse('Test', 'You are Miku', mockConfig);
} catch (e) {
// Expected
}
expect(logError).toHaveBeenCalledWith(expect.stringContaining('Structured API Error:'));
});
});

View File

@@ -10,11 +10,22 @@ jest.mock('discord.js', () => {
setName: jest.fn().mockReturnThis(),
setDescription: jest.fn().mockReturnThis(),
addStringOption: jest.fn().mockReturnThis(),
addIntegerOption: jest.fn().mockReturnThis(),
})),
AttachmentBuilder: jest.fn().mockImplementation((buffer, options) => ({
buffer,
name: options?.name,
EmbedBuilder: jest.fn().mockImplementation(() => ({
setColor: jest.fn().mockReturnThis(),
setAuthor: jest.fn().mockReturnThis(),
setDescription: jest.fn().mockReturnThis(),
setFooter: jest.fn().mockReturnThis(),
setTimestamp: jest.fn().mockReturnThis(),
})),
AttachmentBuilder: jest.fn().mockImplementation((buffer, options) => {
const file = { buffer, name: options?.name };
return {
...file,
setName: jest.fn().mockReturnThis(),
};
}),
};
});
@@ -37,7 +48,7 @@ const { requestTTSResponse } = require('../util');
describe('tts command', () => {
let mockInteraction: {
options: { getString: jest.Mock };
options: { getString: jest.Mock; getInteger: jest.Mock };
reply: jest.Mock;
editReply: jest.Mock;
};
@@ -45,7 +56,7 @@ describe('tts command', () => {
beforeEach(() => {
jest.clearAllMocks();
mockInteraction = {
options: { getString: jest.fn() },
options: { getString: jest.fn(), getInteger: jest.fn() },
reply: jest.fn(),
editReply: jest.fn(),
};
@@ -58,37 +69,130 @@ describe('tts command', () => {
expect(ttsCommand.config).toBeDefined();
});
it('should generate TTS audio for valid text', async () => {
mockInteraction.options.getString.mockReturnValue('Hello world');
it('should generate TTS audio for valid text with default options', async () => {
mockInteraction.options.getString.mockImplementation((name: string) => {
if (name === 'text') return 'Hello world';
if (name === 'speaker') return null;
if (name === 'instruct') return null;
return null;
});
mockInteraction.options.getInteger.mockReturnValue(null);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await ttsCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith(
expect.stringContaining('generating audio for')
// Should reply with loading embed
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
expect(requestTTSResponse).toHaveBeenCalledWith('Hello world', 'Ono_Anna', 0, null);
// Should edit with final embed and audio file
expect(mockInteraction.editReply).toHaveBeenCalledTimes(1);
const editCall = mockInteraction.editReply.mock.calls[0][0];
expect(editCall.embeds).toBeDefined();
expect(editCall.files).toBeDefined();
expect(editCall.files.length).toBeGreaterThan(0);
});
it('should generate TTS audio with custom speaker', async () => {
mockInteraction.options.getString.mockImplementation((name: string) => {
if (name === 'text') return 'Hello world';
if (name === 'speaker') return 'Miku';
if (name === 'instruct') return null;
return null;
});
mockInteraction.options.getInteger.mockReturnValue(null);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await ttsCommand.execute(mockInteraction);
expect(requestTTSResponse).toHaveBeenCalledWith('Hello world', 'Miku', 0, null);
});
it('should generate TTS audio with custom pitch', async () => {
mockInteraction.options.getString.mockImplementation((name: string) => {
if (name === 'text') return 'Hello world';
if (name === 'speaker') return null;
if (name === 'instruct') return null;
return null;
});
mockInteraction.options.getInteger.mockReturnValue(12);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await ttsCommand.execute(mockInteraction);
expect(requestTTSResponse).toHaveBeenCalledWith('Hello world', 'Ono_Anna', 12, null);
});
it('should generate TTS audio with instruction', async () => {
mockInteraction.options.getString.mockImplementation((name: string) => {
if (name === 'text') return 'Hello world';
if (name === 'speaker') return null;
if (name === 'instruct') return 'speak softly';
return null;
});
mockInteraction.options.getInteger.mockReturnValue(null);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await ttsCommand.execute(mockInteraction);
expect(requestTTSResponse).toHaveBeenCalledWith(
'Hello world',
'Ono_Anna',
0,
'speak softly'
);
expect(requestTTSResponse).toHaveBeenCalledWith('Hello world');
expect(mockInteraction.editReply).toHaveBeenCalled();
});
it('should generate TTS audio with all custom options', async () => {
mockInteraction.options.getString.mockImplementation((name: string) => {
if (name === 'text') return 'Hello world';
if (name === 'speaker') return 'Miku';
if (name === 'instruct') return 'speak softly';
return null;
});
mockInteraction.options.getInteger.mockReturnValue(0);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await ttsCommand.execute(mockInteraction);
expect(requestTTSResponse).toHaveBeenCalledWith('Hello world', 'Miku', 0, 'speak softly');
});
it('should handle TTS generation errors', async () => {
mockInteraction.options.getString.mockReturnValue('Hello world');
mockInteraction.options.getString.mockImplementation((name: string) => {
if (name === 'text') return 'Hello world';
return null;
});
mockInteraction.options.getInteger.mockReturnValue(null);
requestTTSResponse.mockRejectedValue(new Error('TTS failed'));
await ttsCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith(
expect.stringContaining('generating audio for')
);
expect(mockInteraction.editReply).toHaveBeenCalledWith(expect.stringContaining('Error:'));
// Should reply with loading embed
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
// Should edit with error embed
expect(mockInteraction.editReply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
});
it('should include TTS configuration', () => {
expect(ttsCommand.config).toBeDefined();
expect(ttsCommand.config.ttsSettings).toBeDefined();
expect(ttsCommand.config.ttsSettings.pitch_change_oct).toBeDefined();
expect(ttsCommand.config.ttsSettings.speaker).toBeDefined();
expect(ttsCommand.config.ttsSettings.pitch_change_sem).toBeDefined();
});
});

View File

@@ -0,0 +1,411 @@
/**
* Tests for commands/voicemsg/voicemsg.ts
*/
jest.mock('discord.js', () => {
const actual = jest.requireActual('discord.js');
return {
...actual,
SlashCommandBuilder: jest.fn().mockImplementation(() => ({
setName: jest.fn().mockReturnThis(),
setDescription: jest.fn().mockReturnThis(),
addStringOption: jest.fn().mockReturnThis(),
})),
EmbedBuilder: jest.fn().mockImplementation(() => ({
setColor: jest.fn().mockReturnThis(),
setAuthor: jest.fn().mockReturnThis(),
setDescription: jest.fn().mockReturnThis(),
setFooter: jest.fn().mockReturnThis(),
setTimestamp: jest.fn().mockReturnThis(),
})),
AttachmentBuilder: jest.fn().mockImplementation((buffer, options) => {
const file = { buffer, name: options?.name };
return file;
}),
};
});
jest.mock('../util', () => {
const actual = jest.requireActual('../util');
return {
...actual,
requestTTSResponse: jest.fn(),
};
});
jest.mock('../../logging', () => ({
logError: jest.fn(),
logInfo: jest.fn(),
logWarn: jest.fn(),
}));
const voicemsgModule = require('../commands/voicemsg/voicemsg');
const voicemsgCommand = voicemsgModule.default || voicemsgModule;
const { requestTTSResponse } = require('../util');
const { parseLoadingEmojis, getRandomLoadingEmoji } = require('../commands/helpers');
describe('voicemsg helper functions', () => {
describe('parseLoadingEmojis', () => {
afterEach(() => {
delete process.env.LOADING_EMOJIS;
});
it('should parse emojis from environment variable', () => {
process.env.LOADING_EMOJIS = '<:clueless:123>,<a:hachune:456>,🎵';
const result = parseLoadingEmojis();
expect(result).toEqual(['<:clueless:123>', '<a:hachune:456>', '🎵']);
});
it('should return default emojis when LOADING_EMOJIS is empty', () => {
process.env.LOADING_EMOJIS = '';
const result = parseLoadingEmojis();
expect(result).toEqual(['🤔', '✨', '🎵']);
});
it('should return default emojis when LOADING_EMOJIS is whitespace only', () => {
process.env.LOADING_EMOJIS = ' ';
const result = parseLoadingEmojis();
expect(result).toEqual(['🤔', '✨', '🎵']);
});
it('should handle whitespace in emoji list', () => {
process.env.LOADING_EMOJIS = ' 🤔 , ✨ , 🎵 ';
const result = parseLoadingEmojis();
expect(result).toEqual(['🤔', '✨', '🎵']);
});
it('should filter out empty entries', () => {
process.env.LOADING_EMOJIS = '🤔,,✨,,,';
const result = parseLoadingEmojis();
expect(result).toEqual(['🤔', '✨']);
});
});
describe('getRandomLoadingEmoji', () => {
afterEach(() => {
delete process.env.LOADING_EMOJIS;
});
it('should return a valid emoji from the list', () => {
process.env.LOADING_EMOJIS = '🤔,✨,🎵';
const result = getRandomLoadingEmoji();
expect(['🤔', '✨', '🎵']).toContain(result);
});
it('should return default emoji when LOADING_EMOJIS is empty', () => {
process.env.LOADING_EMOJIS = '';
const result = getRandomLoadingEmoji();
expect(['🤔', '✨', '🎵']).toContain(result);
});
it('should return different emojis on multiple calls', () => {
process.env.LOADING_EMOJIS = '🤔,✨,🎵,🎤,🌸';
const results = new Set();
for (let i = 0; i < 20; i++) {
results.add(getRandomLoadingEmoji());
}
// With 5 emojis and 20 calls, we should get at least 2 different ones
expect(results.size).toBeGreaterThanOrEqual(2);
});
});
});
describe('voicemsg command', () => {
let mockInteraction: {
options: { getString: jest.Mock };
reply: jest.Mock;
editReply: jest.Mock;
client: {
provider: jest.Mock;
llmconf: jest.Mock;
sysprompt: jest.Mock;
};
};
let mockProvider: {
name: jest.Mock;
requestLLMResponse: jest.Mock;
requestStructuredVoiceResponse: jest.Mock;
setModel: jest.Mock;
};
const mockConfig = {
max_new_tokens: 100,
min_new_tokens: 1,
temperature: 0.7,
top_p: 0.9,
frequency_penalty: 0.0,
presence_penalty: 0.0,
msg_context: 8,
};
beforeEach(() => {
jest.clearAllMocks();
mockProvider = {
name: jest.fn().mockReturnValue('OpenAI (gpt-4)'),
requestLLMResponse: jest.fn(),
requestStructuredVoiceResponse: jest.fn(),
setModel: jest.fn(),
};
mockInteraction = {
options: { getString: jest.fn() },
reply: jest.fn(),
editReply: jest.fn(),
client: {
provider: jest.fn().mockReturnValue(mockProvider),
llmconf: jest.fn().mockReturnValue(mockConfig),
sysprompt: jest.fn().mockReturnValue('You are Miku'),
},
};
});
it('should have correct command data structure', () => {
expect(voicemsgCommand.data).toBeDefined();
expect(voicemsgCommand.data.setName).toBeDefined();
expect(voicemsgCommand.execute).toBeDefined();
});
it('should have correct command name and description', () => {
// The mock SlashCommandBuilder returns a chainable object
// We verify the structure exists rather than specific values
expect(voicemsgCommand.data).toBeDefined();
expect(voicemsgCommand.data.setName).toBeDefined();
expect(voicemsgCommand.data.setDescription).toBeDefined();
});
it('should have required text option', () => {
// The command data is built when the module loads
// We just verify the export structure is correct
expect(voicemsgCommand.data).toBeDefined();
expect(voicemsgCommand.execute).toBeDefined();
});
it('should generate voice message with structured response', async () => {
mockInteraction.options.getString.mockReturnValue('Hello Miku!');
mockProvider.requestStructuredVoiceResponse.mockResolvedValue({
message: 'Hello there! Nice to meet you~ ♪',
instruct: 'Speak cheerfully and energetically',
});
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await voicemsgCommand.execute(mockInteraction);
// Should show initial loading embed
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
// Should call structured response method
expect(mockProvider.requestStructuredVoiceResponse).toHaveBeenCalledWith(
'Hello Miku!',
'You are Miku',
mockConfig
);
// Should generate TTS with instruct
expect(requestTTSResponse).toHaveBeenCalledWith(
'Hello there! Nice to meet you~ ♪',
undefined,
undefined,
'Speak cheerfully and energetically'
);
// Should update with final embed and audio file (called 3 times: thinking, tts, final)
expect(mockInteraction.editReply).toHaveBeenCalledTimes(3);
// Verify the last call includes files (audio attachment)
const lastEditCall = mockInteraction.editReply.mock.calls[2][0];
// The mock EmbedBuilder methods return the mock function, not the embed
// So we just verify editReply was called with an object containing embeds
expect(lastEditCall.embeds).toBeDefined();
expect(mockInteraction.editReply).toHaveBeenCalledWith(
expect.objectContaining({
embeds: expect.anything(),
})
);
});
it('should handle provider without structured output (fallback)', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
// Remove structured method to test fallback
delete mockProvider.requestStructuredVoiceResponse;
mockProvider.requestLLMResponse.mockResolvedValue(
JSON.stringify({
message: 'Fallback response',
instruct: 'Speak normally',
})
);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await voicemsgCommand.execute(mockInteraction);
expect(mockProvider.requestLLMResponse).toHaveBeenCalled();
expect(requestTTSResponse).toHaveBeenCalledWith(
'Fallback response',
undefined,
undefined,
'Speak normally'
);
});
it('should handle malformed JSON in fallback', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
delete mockProvider.requestStructuredVoiceResponse;
mockProvider.requestLLMResponse.mockResolvedValue('Invalid JSON response');
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await voicemsgCommand.execute(mockInteraction);
// Should use fallback defaults
expect(requestTTSResponse).toHaveBeenCalledWith(
'Invalid JSON response',
undefined,
undefined,
'Speak in a friendly and enthusiastic tone'
);
});
it('should handle JSON with markdown code blocks', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
delete mockProvider.requestStructuredVoiceResponse;
mockProvider.requestLLMResponse.mockResolvedValue(
'```json\n{"message": "Parsed response", "instruct": "Speak softly"}\n```'
);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await voicemsgCommand.execute(mockInteraction);
expect(requestTTSResponse).toHaveBeenCalledWith(
'Parsed response',
undefined,
undefined,
'Speak softly'
);
});
it('should handle missing message field in JSON response', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
delete mockProvider.requestStructuredVoiceResponse;
mockProvider.requestLLMResponse.mockResolvedValue(
JSON.stringify({
instruct: 'Speak happily',
})
);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await voicemsgCommand.execute(mockInteraction);
// Should use the full response as message
expect(requestTTSResponse).toHaveBeenCalledWith(
expect.anything(),
undefined,
undefined,
'Speak happily'
);
});
it('should handle missing instruct field in JSON response', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
delete mockProvider.requestStructuredVoiceResponse;
mockProvider.requestLLMResponse.mockResolvedValue(
JSON.stringify({
message: 'Hello!',
})
);
requestTTSResponse.mockResolvedValue({
arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(100)),
});
await voicemsgCommand.execute(mockInteraction);
expect(requestTTSResponse).toHaveBeenCalledWith(
'Hello!',
undefined,
undefined,
'Speak in a friendly and enthusiastic tone'
);
});
it('should handle errors gracefully', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
mockProvider.requestStructuredVoiceResponse.mockRejectedValue(new Error('LLM API error'));
await voicemsgCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
expect(mockInteraction.editReply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
});
it('should handle missing provider configuration', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
mockInteraction.client.provider = jest.fn().mockReturnValue(null);
await voicemsgCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
expect(mockInteraction.editReply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
});
it('should handle missing llmconf configuration', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
mockInteraction.client.llmconf = jest.fn().mockReturnValue(null);
await voicemsgCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
expect(mockInteraction.editReply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
});
it('should handle missing sysprompt configuration', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
mockInteraction.client.sysprompt = jest.fn().mockReturnValue(null);
await voicemsgCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
expect(mockInteraction.editReply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
});
it('should handle TTS generation errors', async () => {
mockInteraction.options.getString.mockReturnValue('Test message');
mockProvider.requestStructuredVoiceResponse.mockResolvedValue({
message: 'Hello!',
instruct: 'Speak happily',
});
requestTTSResponse.mockRejectedValue(new Error('TTS service unavailable'));
await voicemsgCommand.execute(mockInteraction);
expect(mockInteraction.reply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
expect(mockInteraction.editReply).toHaveBeenCalledWith({
embeds: [expect.anything()],
});
});
});

View File

@@ -43,17 +43,29 @@ import {
import 'dotenv/config';
import { LLMConfig } from './commands/types';
import { LLMProvider, StreamingChunk } from './provider/provider';
import {
createStatusEmbed,
getRandomLoadingEmoji,
getRandomKawaiiPhrase,
KAWAII_PHRASES,
fetchMotd,
dateToSnowflake,
sendBiggestLoserAnnouncement,
triggerThrowback,
} from './commands/helpers';
interface State {
llmconf?(): LLMConfig;
provider?(): LLMProvider;
sysprompt?(): string;
config?(): LLMConfig;
}
const state: State = {};
/**
* Parse loading emojis from environment variable
* Format: "<:clueless:123>,<a:hachune:456>,..."
* Re-exported from helpers for backwards compatibility
*/
function parseLoadingEmojis(): string[] {
const emojiStr = process.env.LOADING_EMOJIS || '';
@@ -68,58 +80,36 @@ function parseLoadingEmojis(): string[] {
}
/**
* Pick a random loading emoji from the configured list
* Parse reaction guild IDs from environment variable
* Format: "123456789,987654321,..."
*/
function getRandomLoadingEmoji(): string {
const emojis = parseLoadingEmojis();
return emojis[Math.floor(Math.random() * emojis.length)];
}
/**
* Create an embed for status updates during LLM generation
*/
function createStatusEmbed(emoji: string, phrase: string, status: string): EmbedBuilder {
// Miku teal color
return new EmbedBuilder()
.setColor(0x39c5bb)
.setAuthor({ name: phrase })
.setDescription(`${emoji}\n${status}`)
.setTimestamp();
}
/**
* Format the loading message with emoji and reasoning content
*/
function formatLoadingMessage(emoji: string, reasoning: string): string {
const kawaiiPhrases = [
'Hmm... let me think~ ♪',
'Processing nyaa~',
'Miku is thinking...',
'Calculating with magic ✨',
'Pondering desu~',
'Umm... one moment! ♪',
'Brain go brrr~',
'Assembling thoughts... ♪',
'Loading Miku-brain...',
'Thinking hard senpai~',
];
const phrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
let content = `${emoji} ${phrase}`;
if (reasoning && reasoning.trim().length > 0) {
// Truncate reasoning if too long for display
const displayReasoning =
reasoning.length > 500 ? reasoning.slice(0, 500) + '...' : reasoning;
content += `\n\n> ${displayReasoning}`;
function parseReactionGuilds(): Set<string> {
const guildsStr = process.env.REACTION_GUILDS || process.env.GUILD || '';
if (!guildsStr.trim()) {
logWarn('[bot] No REACTION_GUILDS or GUILD configured, reactions will not be counted.');
return new Set();
}
return content;
const guilds = new Set<string>();
guildsStr.split(',').forEach((id) => {
const trimmed = id.trim();
if (trimmed) {
guilds.add(trimmed);
}
});
logInfo(`[bot] Configured reaction guilds: ${[...guilds].join(', ')}`);
return guilds;
}
const reactionGuilds = parseReactionGuilds();
interface CommandClient extends Client {
commands?: Collection<
string,
{ data: SlashCommandBuilder; execute: (interaction: Interaction) => Promise<void> }
>;
llmconf?: () => LLMConfig;
provider?: () => LLMProvider;
sysprompt?: () => string;
}
const client: CommandClient = new Client({
@@ -171,6 +161,11 @@ async function onMessageReactionChanged(
}
}
// Only count reactions from the configured guilds
if (!reactionGuilds.has(reaction.message.guildId)) {
return;
}
// Now the message has been cached and is fully available
logInfo(
`[bot] ${reaction.message.author?.id}'s message reaction count changed: ${reaction.emoji.name}x${reaction.count}`
@@ -246,23 +241,9 @@ async function onNewMessage(message: Message) {
const cleanHistoryList = [...historyMessages, message];
try {
// Pick a random loading emoji for this generation
// Pick a random loading emoji and phrase for this generation
const loadingEmoji = getRandomLoadingEmoji();
// Send initial loading message with embed
const kawaiiPhrases = [
'Hmm... let me think~ ♪',
'Processing nyaa~',
'Miku is thinking...',
'Calculating with magic ✨',
'Pondering desu~',
'Umm... one moment! ♪',
'Brain go brrr~',
'Assembling thoughts... ♪',
'Loading Miku-brain...',
'Thinking hard senpai~',
];
const loadingPhrase = kawaiiPhrases[Math.floor(Math.random() * kawaiiPhrases.length)];
const loadingPhrase = getRandomKawaiiPhrase();
const loadingEmbed = createStatusEmbed(loadingEmoji, loadingPhrase, 'Starting...');
const loadingMsg = await message.reply({ embeds: [loadingEmbed] });
@@ -372,19 +353,6 @@ async function onNewMessage(message: Message) {
}
}
async function fetchMotd() {
try {
const res = await fetch(process.env.MOTD_HREF);
const xml = await res.text();
const parser = new JSDOM(xml);
const doc = parser.window.document;
const el = doc.querySelector(process.env.MOTD_QUERY);
return el ? el.textContent : null;
} catch (err) {
logWarn('[bot] Failed to fetch MOTD; is the booru down?');
}
}
async function requestRVCResponse(src: Attachment): Promise<NodeFetchBlob> {
logInfo(`[bot] Downloading audio message ${src.url}`);
const srcres = await fetch(src.url);
@@ -450,17 +418,6 @@ async function scheduleRandomMessage(firstTime = false) {
setTimeout(scheduleRandomMessage, timeoutMins * 60 * 1000);
}
/**
* Convert a Date to a Discord snowflake ID (approximate)
* Discord epoch: 2015-01-01T00:00:00.000Z
*/
function dateToSnowflake(date: Date): string {
const DISCORD_EPOCH = 1420070400000n;
const timestamp = BigInt(date.getTime());
const snowflake = (timestamp - DISCORD_EPOCH) << 22n;
return snowflake.toString();
}
async function scheduleThrowback(firstTime = false) {
if (!firstTime) {
if (!process.env.THROWBACK_CHANNEL) {
@@ -477,56 +434,14 @@ async function scheduleThrowback(firstTime = false) {
}
try {
// Calculate date from 1 year ago
const oneYearAgo = new Date();
oneYearAgo.setFullYear(oneYearAgo.getFullYear() - 1);
// Convert to approximate snowflake ID
const aroundSnowflake = dateToSnowflake(oneYearAgo);
logInfo(
`[bot] Fetching messages around ${oneYearAgo.toISOString()} (snowflake: ${aroundSnowflake})`
await triggerThrowback(
client,
channel,
channel,
state.provider!(),
state.sysprompt!(),
state.llmconf!()
);
// Fetch messages around that time
const messages = await channel.messages.fetch({
around: aroundSnowflake,
limit: 50,
});
// Filter to only text messages from non-bots
const textMessages = messages.filter(
(m) =>
!m.author.bot &&
m.cleanContent.length > 0 &&
(m.type === MessageType.Default || m.type === MessageType.Reply)
);
if (textMessages.size === 0) {
logWarn('[bot] No messages found from 1 year ago, skipping throwback.');
} else {
// Pick a random message
const messagesArray = [...textMessages.values()];
const randomMsg = messagesArray[Math.floor(Math.random() * messagesArray.length)];
logInfo(
`[bot] Selected throwback message from ${randomMsg.author.username}: "${randomMsg.cleanContent}"`
);
// Generate LLM response using the standard system prompt
if ('sendTyping' in channel) {
await channel.sendTyping();
}
const llmResponse = await state.provider!().requestLLMResponse(
[randomMsg],
state.sysprompt!(),
state.llmconf!()
);
// Reply directly to the original message
await randomMsg.reply(llmResponse);
logInfo(`[bot] Sent throwback reply: ${llmResponse}`);
}
} catch (err) {
logError(`[bot] Error fetching throwback message: ${err}`);
}
@@ -550,174 +465,13 @@ async function scheduleBiggestLoser(firstTime = false) {
const channel = <TextChannel>await client.channels.fetch(process.env.LOSER_CHANNEL);
if (channel) {
try {
const yesterdayStart = new Date();
yesterdayStart.setDate(yesterdayStart.getDate() - 1);
yesterdayStart.setHours(0, 0, 0, 0);
const declaration = await sendBiggestLoserAnnouncement(client, channel);
const yesterdayEnd = new Date();
yesterdayEnd.setHours(0, 0, 0, 0);
const startId = dateToSnowflake(yesterdayStart);
const endId = dateToSnowflake(yesterdayEnd);
const realNameToCount = new Map<string, number>();
for (const realName of new Set(Object.values(REAL_NAMES))) {
if (LOSER_WHITELIST.includes(realName as string)) {
realNameToCount.set(realName as string, 0);
}
}
const guild = await client.guilds.fetch(process.env.GUILD as string);
if (guild) {
const channels = await guild.channels.fetch();
const textChannels = channels.filter((c: any) => c && c.isTextBased());
for (const [_, textChannel] of textChannels) {
let lastId = startId;
while (true) {
try {
const messages = await (textChannel as any).messages.fetch({
after: lastId,
limit: 100,
});
if (messages.size === 0) break;
let maxId = lastId;
for (const [msgId, msg] of messages) {
if (BigInt(msgId) > BigInt(maxId)) maxId = msgId;
if (BigInt(msgId) >= BigInt(endId)) continue;
if (
!msg.author.bot &&
(REAL_NAMES as any)[msg.author.username]
) {
const realName = (REAL_NAMES as any)[msg.author.username];
if (realNameToCount.has(realName)) {
realNameToCount.set(
realName,
realNameToCount.get(realName)! + 1
);
}
}
}
lastId = maxId;
if (BigInt(lastId) >= BigInt(endId) || messages.size < 100) break;
} catch (e) {
logWarn(`[bot] Error fetching from channel: ${e}`);
break;
}
}
}
}
let minCount = Infinity;
let biggestLosers: string[] = [];
for (const [realName, count] of realNameToCount.entries()) {
if (count < minCount) {
minCount = count;
biggestLosers = [realName];
} else if (count === minCount) {
biggestLosers.push(realName);
}
}
if (biggestLosers.length > 0) {
biggestLosers.sort();
// Track individual streaks per person
const streakFile = path.join(__dirname, 'biggest_loser_streaks.json');
let streaks: Record<string, number> = {};
if (fs.existsSync(streakFile)) {
try {
streaks = JSON.parse(fs.readFileSync(streakFile, 'utf8'));
} catch (e) {
logWarn(`[bot] Failed to read streak data: ${e}`);
streaks = {};
}
}
// Update streaks: continue if this person was in yesterday's losers, otherwise reset to 1
const newStreaks: Record<string, number> = {};
for (const name of biggestLosers) {
newStreaks[name] = (streaks[name] || 0) + 1;
}
fs.writeFileSync(streakFile, JSON.stringify(newStreaks));
const firstNames = biggestLosers.map((n) => n.split(' ')[0]);
let joinedNames = firstNames[0];
if (firstNames.length === 2) {
joinedNames = `${firstNames[0]} and ${firstNames[1]}`;
} else if (firstNames.length > 2) {
joinedNames = `${firstNames.slice(0, -1).join(', ')}, and ${firstNames[firstNames.length - 1]}`;
}
// Build message with individual streak info
const isPlural = biggestLosers.length > 1;
const loserWord = isPlural ? 'losers' : 'loser';
const isAre = isPlural ? 'are' : 'is';
let declaration: string;
if (isPlural) {
// For multiple losers, list each with their streak
const streakParts = biggestLosers.map((name, idx) => {
const firstName = firstNames[idx];
const dayWord = newStreaks[name] === 1 ? 'day' : 'days';
return `${firstName} (${newStreaks[name]} ${dayWord} in a row)`;
});
let streakDetails = streakParts[0];
if (streakParts.length === 2) {
streakDetails = `${streakParts[0]} and ${streakParts[1]}`;
} else if (streakParts.length > 2) {
streakDetails = `${streakParts.slice(0, -1).join(', ')}, and ${streakParts[streakParts.length - 1]}`;
}
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! Streaks: ${streakDetails}.`;
} else {
const dayWord = newStreaks[biggestLosers[0]] === 1 ? 'day' : 'days';
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! They have been the biggest ${loserWord} for ${newStreaks[biggestLosers[0]]} ${dayWord} in a row.`;
}
try {
let pingTags: string[] = [];
if (guild) {
// Build a reverse map from real name to Discord user IDs
const realNameToUserIds = new Map<string, string[]>();
for (const [username, realName] of Object.entries(REAL_NAMES)) {
if (!realNameToUserIds.has(realName)) {
realNameToUserIds.set(realName, []);
}
realNameToUserIds.get(realName)!.push(username);
}
// Fetch members for the usernames we need to ping
const usernamesToCheck = new Set<string>();
for (const realName of biggestLosers) {
const usernames = realNameToUserIds.get(realName);
if (usernames) {
usernames.forEach((u) => usernamesToCheck.add(u));
}
}
// Try to fetch members (with a shorter timeout to avoid hanging)
const members = await guild.members.fetch({ time: 10000 });
for (const [_, member] of members) {
const username = member.user.username;
if (usernamesToCheck.has(username)) {
const tag = `<@${member.user.id}>`;
if (!pingTags.includes(tag)) {
pingTags.push(tag);
}
}
}
}
if (pingTags.length > 0) {
declaration += `\n${pingTags.join(' ')}`;
}
} catch (e) {
logWarn(`[bot] Error fetching members for ping: ${e}`);
}
logInfo(`[bot] Declaring biggest loser: ${declaration}`);
await channel.send(declaration);
await channel.send(
'https://tenor.com/view/klajumas-spit-skreplis-klajumas-skreplis-gif-13538828554330887910'
);
}
logInfo(`[bot] Declaring biggest loser: ${declaration}`);
await channel.send(declaration);
await channel.send(
'https://tenor.com/view/klajumas-spit-skreplis-klajumas-skreplis-gif-13538828554330887910'
);
} catch (err) {
logError(`[bot] Error finding biggest loser: ${err}`);
}
@@ -800,6 +554,11 @@ client.on(Events.InteractionCreate, async (interaction) => {
}
}
// Attach shared state to client for commands to access
client.llmconf = () => state.llmconf?.() ?? state.config?.();
client.provider = () => state.provider?.();
client.sysprompt = () => state.sysprompt?.();
logInfo('[bot] Logging in...');
await client.login(process.env.TOKEN);
if (process.env.ENABLE_MOTD) {

View File

@@ -0,0 +1,193 @@
import { ChatInputCommandInteraction, SlashCommandBuilder, MessageType } from 'discord.js';
import { logInfo, logWarn, logError } from '../../../logging';
import {
fetchMotd,
dateToSnowflake,
sendBiggestLoserAnnouncement,
triggerThrowback,
} from '../helpers';
/**
* debug.ts
* Debug commands for ADMIN to force-trigger scheduled events
*/
async function debugCommand(interaction: ChatInputCommandInteraction) {
// Only ADMIN can use debug commands
if (interaction.user.id !== process.env.ADMIN) {
await interaction.reply({
content: '❌ You are not authorized to use debug commands.',
ephemeral: true,
});
return;
}
const subcommand = interaction.options.getString('action');
if (!subcommand) {
await interaction.reply({
content: '❌ No action specified.',
ephemeral: true,
});
return;
}
await interaction.deferReply({ ephemeral: true });
try {
switch (subcommand) {
case 'motd': {
logInfo('[debug] ADMIN triggered MOTD');
const randomMessage = await fetchMotd();
if (randomMessage) {
// Send to the channel where the command was invoked
await interaction.channel.send(randomMessage);
logInfo(`[debug] Sent forced MOTD: ${randomMessage}`);
await interaction.editReply({
content: `✅ MOTD sent successfully!\n\n**Message:** ${randomMessage}`,
});
} else {
await interaction.editReply({
content: '❌ Could not fetch MOTD.',
});
}
break;
}
case 'throwback': {
logInfo('[debug] ADMIN triggered throwback');
if (!process.env.THROWBACK_CHANNEL) {
await interaction.editReply({
content: '❌ THROWBACK_CHANNEL not configured.',
});
return;
}
// Get provider/config from client
const provider = (interaction.client as any).provider?.();
const llmconf = (interaction.client as any).llmconf?.();
const sysprompt = (interaction.client as any).sysprompt?.();
if (!provider || !llmconf || !sysprompt) {
await interaction.editReply({
content: '❌ LLM provider/configuration not available.',
});
return;
}
// Determine source channel (optional parameter or default)
const sourceId = interaction.options.getString('source');
let sourceChannel: any;
if (sourceId) {
sourceChannel = await interaction.client.channels.fetch(sourceId);
if (!sourceChannel || !('messages' in sourceChannel)) {
await interaction.editReply({
content: '❌ Source channel not found or invalid.',
});
return;
}
} else {
sourceChannel = await interaction.client.channels.fetch(
process.env.THROWBACK_CHANNEL
);
}
// Target channel is where the command was invoked
const targetChannel = interaction.channel;
try {
const result = await triggerThrowback(
interaction.client,
sourceChannel,
targetChannel,
provider,
sysprompt,
llmconf
);
await interaction.editReply({
content: `✅ Throwback sent successfully!\n\n**Original message:** ${result.originalMessage}\n\n**Reply:** ${result.response}`,
});
} catch (err) {
logError(`[debug] Error fetching throwback message: ${err}`);
await interaction.editReply({
content: `❌ Error: ${err}`,
});
}
break;
}
case 'biggest-loser': {
logInfo('[debug] ADMIN triggered biggest loser announcement');
if (!process.env.LOSER_CHANNEL) {
await interaction.editReply({
content: '❌ LOSER_CHANNEL not configured.',
});
return;
}
// Determine source guild (optional parameter or default)
const sourceId = interaction.options.getString('source');
// Target channel is where the command was invoked
const targetChannel = interaction.channel;
try {
const declaration = await sendBiggestLoserAnnouncement(
interaction.client,
targetChannel,
sourceId || undefined
);
logInfo(`[debug] Declaring biggest loser: ${declaration}`);
await targetChannel.send(declaration);
await targetChannel.send(
'https://tenor.com/view/klajumas-spit-skreplis-klajumas-skreplis-gif-13538828554330887910'
);
await interaction.editReply({
content: `✅ Biggest loser announcement sent!\n\n**Declaration:** ${declaration}`,
});
} catch (err) {
logError(`[debug] Error finding biggest loser: ${err}`);
await interaction.editReply({
content: `❌ Error: ${err}`,
});
}
break;
}
default: {
await interaction.editReply({
content: `❌ Unknown action: ${subcommand}`,
});
}
}
} catch (err) {
logError(`[debug] Error executing debug command: ${err}`);
await interaction.editReply({
content: `❌ Error: ${err}`,
});
}
}
export = {
data: new SlashCommandBuilder()
.setName('debug')
.setDescription('Debug commands for admin')
.addStringOption((option) =>
option
.setName('action')
.setDescription('The scheduled event to trigger')
.setRequired(true)
.addChoices(
{ name: 'MOTD (Message of the Day)', value: 'motd' },
{ name: 'Throwback (1 year ago message)', value: 'throwback' },
{ name: 'Biggest Loser Announcement', value: 'biggest-loser' }
)
)
.addStringOption((option) =>
option
.setName('source')
.setDescription('Source channel/guild ID to pull history from (optional)')
),
execute: debugCommand,
};

376
discord/commands/helpers.ts Normal file
View File

@@ -0,0 +1,376 @@
/**
* helpers.ts
* Shared helper functions for Discord commands
*/
import {
EmbedBuilder,
MessageType,
Client,
Guild,
GuildTextBasedChannel,
Collection,
} from 'discord.js';
import { logInfo, logWarn, logError } from '../../logging';
import { REAL_NAMES, LOSER_WHITELIST } from '../util';
import path = require('node:path');
import fs = require('node:fs');
/**
* Kawaii loading phrases used in status embeds
*/
export const KAWAII_PHRASES = [
'Hmm... let me think~ ♪',
'Processing nyaa~',
'Miku is thinking...',
'Calculating with magic ✨',
'Pondering desu~',
'Umm... one moment! ♪',
'Brain go brrr~',
'Assembling thoughts... ♪',
'Loading Miku-brain...',
'Thinking hard senpai~',
];
/**
* Miku's theme color (teal)
*/
export const MIKU_COLOR = 0x39c5bb;
/**
* Parse loading emojis from environment variable
* Format: "<:clueless:123>,<a:hachune:456>,..."
*/
export function parseLoadingEmojis(): string[] {
const emojiStr = process.env.LOADING_EMOJIS || '';
if (!emojiStr.trim()) {
// Default fallback emojis if not configured
return ['🤔', '✨', '🎵'];
}
return emojiStr
.split(',')
.map((e) => e.trim())
.filter((e) => e.length > 0);
}
/**
* Pick a random loading emoji from the configured list
*/
export function getRandomLoadingEmoji(): string {
const emojis = parseLoadingEmojis();
return emojis[Math.floor(Math.random() * emojis.length)];
}
/**
* Pick a random kawaii phrase
*/
export function getRandomKawaiiPhrase(): string {
return KAWAII_PHRASES[Math.floor(Math.random() * KAWAII_PHRASES.length)];
}
/**
* Create an embed for status updates during generation
*/
export function createStatusEmbed(emoji: string, phrase: string, status: string): EmbedBuilder {
return new EmbedBuilder()
.setColor(MIKU_COLOR)
.setAuthor({ name: phrase })
.setDescription(`${emoji}\n${status}`)
.setTimestamp();
}
/**
* Create a simple status embed (without emoji/phrase)
*/
export function createSimpleStatusEmbed(status: string): EmbedBuilder {
const emoji = getRandomLoadingEmoji();
const phrase = getRandomKawaiiPhrase();
return createStatusEmbed(emoji, phrase, status);
}
/**
* Convert a Date to a Discord snowflake ID (approximate)
*/
export function dateToSnowflake(date: Date): string {
const DISCORD_EPOCH = 1420070400000n;
const timestamp = BigInt(date.getTime());
const snowflake = (timestamp - DISCORD_EPOCH) << 22n;
return snowflake.toString();
}
/**
* Fetch MOTD from configured source
*/
export async function fetchMotd(): Promise<string | null> {
try {
const { JSDOM } = await import('jsdom');
const fetch = (await import('node-fetch')).default;
const res = await fetch(process.env.MOTD_HREF!);
const xml = await res.text();
const parser = new JSDOM(xml);
const doc = parser.window.document;
const el = doc.querySelector(process.env.MOTD_QUERY!);
return el ? el.textContent : null;
} catch (err) {
logWarn('[helpers] Failed to fetch MOTD; is the booru down?');
return null;
}
}
/**
* Send biggest loser announcement to a channel
* Returns the declaration string
* @param client - Discord client
* @param targetChannel - Channel to send the announcement to
* @param sourceGuildId - Optional guild ID to fetch message history from (defaults to all configured guilds)
*/
export async function sendBiggestLoserAnnouncement(
client: Client,
targetChannel: any,
sourceGuildId?: string
): Promise<string> {
const yesterdayStart = new Date();
yesterdayStart.setDate(yesterdayStart.getDate() - 1);
yesterdayStart.setHours(0, 0, 0, 0);
const yesterdayEnd = new Date();
yesterdayEnd.setHours(0, 0, 0, 0);
const startId = dateToSnowflake(yesterdayStart);
const endId = dateToSnowflake(yesterdayEnd);
const realNameToCount = new Map<string, number>();
for (const realName of new Set(Object.values(REAL_NAMES))) {
if (LOSER_WHITELIST.includes(realName as string)) {
realNameToCount.set(realName as string, 0);
}
}
// Parse REACTION_GUILDS or fall back to GUILD
const guildsStr = process.env.REACTION_GUILDS || process.env.GUILD || '';
let guildIds = guildsStr
.split(',')
.map((id) => id.trim())
.filter((id) => id);
// Override with source guild if specified
if (sourceGuildId) {
guildIds = [sourceGuildId];
}
const fetchedGuilds: Guild[] = [];
for (const guildId of guildIds) {
const guild = await client.guilds.fetch(guildId);
if (!guild) {
logWarn(`[helpers] Guild ${guildId} not found, skipping.`);
continue;
}
fetchedGuilds.push(guild);
const channels = await guild.channels.fetch();
const textChannels = channels.filter((c: any) => c && c.isTextBased());
for (const [_, textChannel] of textChannels) {
let lastId = startId;
while (true) {
try {
const messages = await (textChannel as any).messages.fetch({
after: lastId,
limit: 100,
});
if (messages.size === 0) break;
let maxId = lastId;
for (const [msgId, msg] of messages) {
if (BigInt(msgId) > BigInt(maxId)) maxId = msgId;
if (BigInt(msgId) >= BigInt(endId)) continue;
const realName = (REAL_NAMES as Record<string, string>)[
msg.author.username
];
if (!msg.author.bot && realName) {
if (realNameToCount.has(realName)) {
realNameToCount.set(realName, realNameToCount.get(realName)! + 1);
}
}
}
lastId = maxId;
if (BigInt(lastId) >= BigInt(endId) || messages.size < 100) break;
} catch (e) {
logWarn(`[helpers] Error fetching from channel: ${e}`);
break;
}
}
}
}
let minCount = Infinity;
let biggestLosers: string[] = [];
for (const [realName, count] of realNameToCount.entries()) {
if (count < minCount) {
minCount = count;
biggestLosers = [realName];
} else if (count === minCount) {
biggestLosers.push(realName);
}
}
if (biggestLosers.length === 0 || minCount === Infinity) {
throw new Error('No eligible losers found for yesterday.');
}
biggestLosers.sort();
const streakFile = path.join(__dirname, 'biggest_loser_streaks.json');
let streaks: Record<string, number> = {};
if (fs.existsSync(streakFile)) {
try {
streaks = JSON.parse(fs.readFileSync(streakFile, 'utf8'));
} catch (e) {
logWarn(`[helpers] Failed to read streak data: ${e}`);
streaks = {};
}
}
const newStreaks: Record<string, number> = {};
for (const name of biggestLosers) {
newStreaks[name] = (streaks[name] || 0) + 1;
}
fs.writeFileSync(streakFile, JSON.stringify(newStreaks));
const firstNames = biggestLosers.map((n) => n.split(' ')[0]);
let joinedNames = firstNames[0];
if (firstNames.length === 2) {
joinedNames = `${firstNames[0]} and ${firstNames[1]}`;
} else if (firstNames.length > 2) {
joinedNames = `${firstNames.slice(0, -1).join(', ')}, and ${firstNames[firstNames.length - 1]}`;
}
const isPlural = biggestLosers.length > 1;
const loserWord = process.env.LOSER_WORD || 'loser';
const isAre = isPlural ? 'are' : 'is';
let declaration: string;
if (isPlural) {
const streakParts = biggestLosers.map((name, idx) => {
const firstName = firstNames[idx];
const dayWord = newStreaks[name] === 1 ? 'day' : 'days';
return `${firstName} (${newStreaks[name]} ${dayWord} in a row)`;
});
let streakDetails = streakParts[0];
if (streakParts.length === 2) {
streakDetails = `${streakParts[0]} and ${streakParts[1]}`;
} else if (streakParts.length > 2) {
streakDetails = `${streakParts.slice(0, -1).join(', ')}, and ${streakParts[streakParts.length - 1]}`;
}
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! Streaks: ${streakDetails}.`;
} else {
const dayWord = newStreaks[biggestLosers[0]] === 1 ? 'day' : 'days';
declaration = `Yesterday's biggest ${loserWord} ${isAre} ${joinedNames} with only ${minCount} messages! They have been the biggest ${loserWord} for ${newStreaks[biggestLosers[0]]} ${dayWord} in a row.`;
}
try {
let pingTags: string[] = [];
if (fetchedGuilds.length > 0) {
const realNameToUserIds = new Map<string, string[]>();
for (const [username, realName] of Object.entries(REAL_NAMES)) {
const name = realName as string;
if (!realNameToUserIds.has(name)) {
realNameToUserIds.set(name, []);
}
realNameToUserIds.get(name)!.push(username);
}
const usernamesToCheck = new Set<string>();
for (const realName of biggestLosers) {
const usernames = realNameToUserIds.get(realName);
if (usernames) {
usernames.forEach((u) => usernamesToCheck.add(u));
}
}
for (const guild of fetchedGuilds) {
try {
const members = await guild.members.fetch({ time: 10000 });
for (const [_, member] of members) {
const username = member.user.username;
if (usernamesToCheck.has(username)) {
const tag = `<@${member.user.id}>`;
if (!pingTags.includes(tag)) {
pingTags.push(tag);
}
}
}
} catch (e) {
logWarn(`[helpers] Error fetching members from guild ${guild.id}: ${e}`);
}
}
}
if (pingTags.length > 0) {
declaration += `\n${pingTags.join(' ')}`;
}
} catch (e) {
logWarn(`[helpers] Error fetching members for ping: ${e}`);
}
return declaration;
}
export interface ThrowbackResult {
originalMessage: string;
author: string;
response: string;
}
/**
* Trigger a throwback message - fetch a message from 1 year ago and generate an LLM response
* @param sourceChannel - Channel to fetch historical messages from (optional, defaults to targetChannel)
* @param targetChannel - Channel to send the throwback reply to
*/
export async function triggerThrowback(
client: Client,
sourceChannel: any,
targetChannel: any,
provider: any,
sysprompt: string,
llmconf: any
): Promise<ThrowbackResult> {
// Calculate date from 1 year ago
const oneYearAgo = new Date();
oneYearAgo.setFullYear(oneYearAgo.getFullYear() - 1);
const aroundSnowflake = dateToSnowflake(oneYearAgo);
// Fetch messages around that time from source channel
const messages = await sourceChannel.messages.fetch({
around: aroundSnowflake,
limit: 50,
});
// Filter to only text messages from non-bots
const textMessages = messages.filter(
(m: any) =>
!m.author.bot &&
m.cleanContent.length > 0 &&
(m.type === MessageType.Default || m.type === MessageType.Reply)
);
if (textMessages.size === 0) {
throw new Error('No messages found from 1 year ago.');
}
// Pick a random message
const messagesArray = [...textMessages.values()];
const randomMsg = messagesArray[Math.floor(Math.random() * messagesArray.length)];
logInfo(
`[helpers] Selected throwback message from ${randomMsg.author.username}: "${randomMsg.cleanContent}"`
);
// Generate LLM response using the standard system prompt
const llmResponse = await provider.requestLLMResponse([randomMsg], sysprompt, llmconf);
// Send reply to target channel
await targetChannel.send(llmResponse);
logInfo(`[helpers] Sent throwback reply: ${llmResponse}`);
return {
originalMessage: randomMsg.cleanContent,
author: randomMsg.author.username,
response: llmResponse,
};
}

View File

@@ -1,13 +1,23 @@
import { AttachmentBuilder, ChatInputCommandInteraction, SlashCommandBuilder } from 'discord.js';
import {
AttachmentBuilder,
ChatInputCommandInteraction,
EmbedBuilder,
SlashCommandBuilder,
} from 'discord.js';
import 'dotenv/config';
import { logError } from '../../../logging';
import { requestTTSResponse } from '../../util';
import {
createStatusEmbed,
getRandomLoadingEmoji,
getRandomKawaiiPhrase,
MIKU_COLOR,
} from '../helpers';
const config = {
ttsSettings: {
speaker: process.env.TTS_SPEAKER || 'Vivian',
pitch_change_oct: 1,
pitch_change_sem: parseInt(process.env.TTS_PITCH || '24', 10),
pitch_change_sem: parseInt(process.env.TTS_PITCH || '0', 10),
},
};
@@ -17,16 +27,44 @@ async function ttsCommand(interaction: ChatInputCommandInteraction) {
const pitch = interaction.options.getInteger('pitch') ?? config.ttsSettings.pitch_change_sem;
const instruct = interaction.options.getString('instruct');
await interaction.reply(`generating audio for "${text}"...`);
// Pick a random loading emoji and phrase for this generation
const loadingEmoji = getRandomLoadingEmoji();
const loadingPhrase = getRandomKawaiiPhrase();
// Initial loading embed
const loadingEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Generating audio for: "${text}"`
);
await interaction.reply({ embeds: [loadingEmbed] });
try {
const audio = await requestTTSResponse(text, speaker, pitch, instruct);
const audioBuf = await audio.arrayBuffer();
const audioFile = new AttachmentBuilder(Buffer.from(audioBuf)).setName('mikuified.wav');
// Final embed with the TTS result
const finalEmbed = new EmbedBuilder()
.setColor(MIKU_COLOR)
.setAuthor({ name: 'Miku speaks:' })
.setDescription(text)
.setFooter({
text: `Voice: ${speaker} | Pitch: ${pitch} semitones${instruct ? ` | ${instruct}` : ''}`,
})
.setTimestamp();
await interaction.editReply({
embeds: [finalEmbed],
files: [audioFile],
});
} catch (err) {
await interaction.editReply(`Error: ${err}`);
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Oops! Something went wrong... 😭\n\`${err}\``
);
await interaction.editReply({ embeds: [errorEmbed] });
logError(`Error while generating TTS: ${err}`);
}
}
@@ -42,7 +80,7 @@ export = {
.addIntegerOption((opt) =>
opt
.setName('pitch')
.setDescription('Pitch shift in semitones (default: 24)')
.setDescription('Pitch shift in semitones (default: 0)')
.setRequired(false)
)
.addStringOption((opt) =>

View File

@@ -0,0 +1,169 @@
import {
AttachmentBuilder,
ChatInputCommandInteraction,
EmbedBuilder,
SlashCommandBuilder,
} from 'discord.js';
import 'dotenv/config';
import { logError, logInfo } from '../../../logging';
import { requestTTSResponse } from '../../util';
import { LLMConfig } from '../types';
import { LLMProvider } from '../../provider/provider';
import {
createStatusEmbed,
getRandomLoadingEmoji,
getRandomKawaiiPhrase,
MIKU_COLOR,
} from '../helpers';
interface VoiceMessageResponse {
message: string;
instruct: string;
}
async function voicemsgCommand(interaction: ChatInputCommandInteraction) {
const text = interaction.options.getString('text');
// Pick a random loading emoji and phrase for this generation
const loadingEmoji = getRandomLoadingEmoji();
const loadingPhrase = getRandomKawaiiPhrase();
// Initial loading embed
const loadingEmbed = createStatusEmbed(loadingEmoji, loadingPhrase, `Processing: "${text}"`);
await interaction.reply({ embeds: [loadingEmbed] });
try {
// Get provider and config from client state
const client = interaction.client as any;
const provider: LLMProvider = client.provider!();
const llmconf: LLMConfig = client.llmconf!();
const sysprompt: string = client.sysprompt!();
if (!provider || !llmconf || !sysprompt) {
throw new Error('LLM provider or configuration not initialized');
}
// Update status: querying LLM
const thinkingEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
'Asking Miku for her response...'
);
await interaction.editReply({ embeds: [thinkingEmbed] });
// Request structured LLM response with message and instruct fields
const structuredResponse = await requestVoiceMessageLLM(provider, text, sysprompt, llmconf);
logInfo(
`[voicemsg] LLM response: message="${structuredResponse.message}", instruct="${structuredResponse.instruct}"`
);
// Update status: generating TTS
const ttsEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Generating voice with: "${structuredResponse.instruct}"`
);
await interaction.editReply({ embeds: [ttsEmbed] });
// Generate TTS with the instruct field
const audio = await requestTTSResponse(
structuredResponse.message,
undefined, // use default speaker
undefined, // use default pitch
structuredResponse.instruct
);
const audioBuf = await audio.arrayBuffer();
const audioFile = new AttachmentBuilder(Buffer.from(audioBuf)).setName('mikuified.wav');
// Final embed with the voice message
const finalEmbed = new EmbedBuilder()
.setColor(MIKU_COLOR)
.setAuthor({ name: 'Miku says:' })
.setDescription(structuredResponse.message)
.setFooter({ text: `Expression: ${structuredResponse.instruct}` })
.setTimestamp();
await interaction.editReply({
embeds: [finalEmbed],
files: [audioFile],
});
} catch (err) {
const errorEmbed = createStatusEmbed(
loadingEmoji,
loadingPhrase,
`Oops! Something went wrong... 😭\n\`${err}\``
);
await interaction.editReply({ embeds: [errorEmbed] });
logError(`[voicemsg] Error while generating voice message: ${err}`);
}
}
/**
* Request a structured LLM response with message and instruct fields.
* Uses OpenAI's structured outputs via JSON mode.
*/
async function requestVoiceMessageLLM(
provider: LLMProvider,
userText: string,
sysprompt: string,
params: LLMConfig
): Promise<VoiceMessageResponse> {
// Check if provider has structured output method (OpenAI-specific)
if ('requestStructuredVoiceResponse' in provider) {
return await (provider as any).requestStructuredVoiceResponse(userText, sysprompt, params);
}
// Fallback: use regular LLM response and parse JSON
// This is a fallback for non-OpenAI providers
const prompt = `You are Hatsune Miku. A user wants you to respond with a voice message.
User message: "${userText}"
Respond with a JSON object containing:
- "message": Your spoken response as Miku (keep it concise, 1-3 sentences)
- "instruct": A one-sentence instruction describing the expression/tone to use (e.g., "Speak cheerfully and energetically", "Whisper softly and sweetly")
Return ONLY valid JSON, no other text.`;
const response = await provider.requestLLMResponse(
[] as any, // Empty history for this specific prompt
sysprompt + '\n\n' + prompt,
params
);
// Parse JSON response
try {
// Strip any markdown code blocks if present
let cleanResponse = response
.replace(/```json\s*/g, '')
.replace(/```\s*/g, '')
.trim();
const parsed = JSON.parse(cleanResponse);
return {
message: parsed.message || response,
instruct: parsed.instruct || 'Speak in a friendly and enthusiastic tone',
};
} catch (parseErr) {
logError(`[voicemsg] Failed to parse LLM JSON response: ${parseErr}`);
// Fallback to default
return {
message: response,
instruct: 'Speak in a friendly and enthusiastic tone',
};
}
}
const voicemsgExport = {
data: new SlashCommandBuilder()
.setName('voicemsg')
.setDescription('Say something to Miku and have her respond with a voice message!')
.addStringOption((opt) =>
opt.setName('text').setDescription('Your message to Miku').setRequired(true)
),
execute: voicemsgCommand,
};
export default voicemsgExport;
module.exports = voicemsgExport;

2126
discord/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -20,6 +20,7 @@
"devDependencies": {
"@types/jest": "^29.5.12",
"@types/node-fetch": "^2.6.11",
"c8": "^11.0.0",
"jest": "^29.7.0",
"prettier": "^3.5.3",
"ts-jest": "^29.1.2",
@@ -34,6 +35,8 @@
"format": "prettier --write .",
"format:check": "prettier --check .",
"test": "jest",
"test:watch": "jest --watch"
"test:watch": "jest --watch",
"test:coverage": "c8 jest",
"test:ci": "c8 jest"
}
}

View File

@@ -183,4 +183,57 @@ export class OpenAIProvider implements LLMProvider {
throw err;
}
}
/**
* Request a structured response for voice messages with message and instruct fields.
* Uses OpenAI's structured outputs via JSON mode.
*/
async requestStructuredVoiceResponse(
userText: string,
sysprompt: string,
params: LLMConfig
): Promise<{ message: string; instruct: string }> {
const prompt = `You are Hatsune Miku. A user wants you to respond with a voice message.
User message: "${userText}"
Respond with a JSON object containing:
- "message": Your spoken response as Miku (keep it concise, 1-3 sentences)
- "instruct": A one-sentence instruction describing the expression/tone to use (e.g., "Speak cheerfully and energetically", "Whisper softly and sweetly")
Return ONLY valid JSON, no other text.`;
logInfo(`[openai] Requesting structured voice response for: "${userText}"`);
try {
const response = await this.client.chat.completions.create({
model: this.model,
messages: [
{ role: 'system', content: sysprompt },
{ role: 'user', content: prompt },
],
temperature: params?.temperature || 0.7,
top_p: params?.top_p || 0.9,
max_tokens: params?.max_new_tokens || 256,
response_format: { type: 'json_object' },
});
let content = response.choices[0].message.content;
if (!content) {
throw new TypeError('OpenAI API returned no message.');
}
logInfo(`[openai] Structured API response: ${content}`);
// Parse and validate JSON response
const parsed = JSON.parse(content);
return {
message: parsed.message || 'Hello! I am Miku~ ♪',
instruct: parsed.instruct || 'Speak in a friendly and enthusiastic tone',
};
} catch (err) {
logError(`[openai] Structured API Error: ` + err);
throw err;
}
}
}

View File

@@ -222,64 +222,82 @@ async function serializeMessageHistory(m: Message): Promise<LLMDiscordMessage |
}
async function sync(guilds: GuildManager) {
const guild = await guilds.fetch(process.env.GUILD);
if (!guild) {
logError(`[bot] FATAL: guild ${guild.id} not found!`);
// Parse REACTION_GUILDS or fall back to GUILD for backwards compatibility
const guildsStr = process.env.REACTION_GUILDS || process.env.GUILD || '';
if (!guildsStr.trim()) {
logError('[bot] FATAL: No REACTION_GUILDS or GUILD configured!');
return 1;
}
logInfo(`[bot] Entered guild ${guild.id}`);
const channels = await guild.channels.fetch();
const textChannels = <Collection<string, GuildTextBasedChannel>>(
channels.filter((c) => c && 'messages' in c && c.isTextBased)
);
for (const [id, textChannel] of textChannels) {
logInfo(`[bot] Found text channel ${id}`);
const oldestMsg = await db.get<ScoreboardMessageRow>(
'SELECT * FROM messages WHERE guild = ? AND channel = ? ORDER BY id ASC LIMIT 1',
guild.id,
id
const guildIds = guildsStr
.split(',')
.map((id) => id.trim())
.filter((id) => id);
for (const guildId of guildIds) {
const guild = await guilds.fetch(guildId);
if (!guild) {
logError(`[bot] FATAL: guild ${guildId} not found!`);
continue;
}
logInfo(`[bot] Entered guild ${guild.id}`);
const channels = await guild.channels.fetch();
const textChannels = <Collection<string, GuildTextBasedChannel>>(
channels.filter((c) => c && 'messages' in c && c.isTextBased)
);
const newestMsg = await db.get<ScoreboardMessageRow>(
'SELECT * FROM messages WHERE guild = ? AND channel = ? ORDER BY id DESC LIMIT 1',
guild.id,
id
);
let before: string = oldestMsg && String(oldestMsg.id);
let after: string = newestMsg && String(newestMsg.id);
let messagesCount = 0;
let reactionsCount = 0;
let newMessagesBefore: Collection<string, Message<true>>;
let newMessagesAfter: Collection<string, Message<true>>;
try {
do {
newMessagesBefore = await textChannel.messages.fetch({ before, limit: 100 });
messagesCount += newMessagesBefore.size;
for (const [id, textChannel] of textChannels) {
logInfo(`[bot] Found text channel ${id}`);
const oldestMsg = await db.get<ScoreboardMessageRow>(
'SELECT * FROM messages WHERE guild = ? AND channel = ? ORDER BY id ASC LIMIT 1',
guild.id,
id
);
const newestMsg = await db.get<ScoreboardMessageRow>(
'SELECT * FROM messages WHERE guild = ? AND channel = ? ORDER BY id DESC LIMIT 1',
guild.id,
id
);
let before: string = oldestMsg && String(oldestMsg.id);
let after: string = newestMsg && String(newestMsg.id);
let messagesCount = 0;
let reactionsCount = 0;
let newMessagesBefore: Collection<string, Message<true>>;
let newMessagesAfter: Collection<string, Message<true>>;
try {
do {
newMessagesBefore = await textChannel.messages.fetch({ before, limit: 100 });
messagesCount += newMessagesBefore.size;
newMessagesAfter = await textChannel.messages.fetch({ after, limit: 100 });
messagesCount += newMessagesAfter.size;
logInfo(
`[bot] [${id}] Fetched ${messagesCount} messages (+${newMessagesBefore.size} older, ${newMessagesAfter.size} newer)`
);
newMessagesAfter = await textChannel.messages.fetch({ after, limit: 100 });
messagesCount += newMessagesAfter.size;
logInfo(
`[bot] [${id}] Fetched ${messagesCount} messages (+${newMessagesBefore.size} older, ${newMessagesAfter.size} newer)`
);
const reactions = newMessagesBefore
.flatMap<MessageReaction>((m) => m.reactions.cache)
.concat(newMessagesAfter.flatMap<MessageReaction>((m) => m.reactions.cache));
for (const [_, reaction] of reactions) {
await recordReaction(reaction);
}
reactionsCount += reactions.size;
logInfo(`[bot] [${id}] Recorded ${reactionsCount} reactions (+${reactions.size}).`);
const reactions = newMessagesBefore
.flatMap<MessageReaction>((m) => m.reactions.cache)
.concat(
newMessagesAfter.flatMap<MessageReaction>((m) => m.reactions.cache)
);
for (const [_, reaction] of reactions) {
await recordReaction(reaction);
}
reactionsCount += reactions.size;
logInfo(
`[bot] [${id}] Recorded ${reactionsCount} reactions (+${reactions.size}).`
);
if (newMessagesBefore.size > 0) {
before = newMessagesBefore.last().id;
}
if (newMessagesAfter.size > 0) {
after = newMessagesAfter.first().id;
}
} while (newMessagesBefore.size === 100 || newMessagesAfter.size === 100);
logInfo(`[bot] [${id}] Done.`);
} catch (err) {
logWarn(`[bot] [${id}] Failed to fetch messages and reactions: ${err}`);
if (newMessagesBefore.size > 0) {
before = newMessagesBefore.last().id;
}
if (newMessagesAfter.size > 0) {
after = newMessagesAfter.first().id;
}
} while (newMessagesBefore.size === 100 || newMessagesAfter.size === 100);
logInfo(`[bot] [${id}] Done.`);
} catch (err) {
logWarn(`[bot] [${id}] Failed to fetch messages and reactions: ${err}`);
}
}
}
}

View File

@@ -2,7 +2,8 @@
"compilerOptions": {
"module": "commonjs",
"target": "es2020",
"sourceMap": true
"sourceMap": true,
"skipLibCheck": true
},
"exclude": ["discord/node_modules", "discord/__tests__"]
}