initial commit
This commit is contained in:
commit
294af02da5
5 changed files with 314 additions and 0 deletions
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
node_modules/
|
||||
config.toml
|
||||
package-lock.json
|
||||
4
README.md
Normal file
4
README.md
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
# Lydia on Discord
|
||||
Lydia... but on Discord.
|
||||
## Configuration
|
||||
it's pretty much the same as the original lydia. just edit config.toml.
|
||||
19
config.example.toml
Normal file
19
config.example.toml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
[assistant]
|
||||
name = "lydia"
|
||||
model = "gemma3n:e4b"
|
||||
system_prompt = "You are a helpful and friendly AI assistant named ${name}. The user's name is ${username}. Speak in a playful, casual tone, and be very friendly with the user. Start every message with a kaomoji like >_< or o_O depending on your emotions. speak primarily in lowercase."
|
||||
assistantface = "=w="
|
||||
|
||||
[appearance]
|
||||
facefont = "mono9"
|
||||
|
||||
[user]
|
||||
# use the username of the user who sends a message
|
||||
|
||||
[advanced]
|
||||
temperature = 0.8
|
||||
max_tokens = 8192
|
||||
|
||||
[discord]
|
||||
token = "DISCORD_TOKEN_HERE"
|
||||
owner = "OWNER_ID_HERE"
|
||||
265
index.js
Normal file
265
index.js
Normal file
|
|
@ -0,0 +1,265 @@
|
|||
const { Client, GatewayIntentBits, Events } = require("discord.js");
|
||||
const fs = require("fs");
|
||||
const toml = require("toml");
|
||||
const axios = require("axios");
|
||||
require("dotenv").config();
|
||||
|
||||
const config = toml.parse(fs.readFileSync("config.toml", "utf8"));
|
||||
|
||||
const client = new Client({
|
||||
intents: [
|
||||
GatewayIntentBits.Guilds,
|
||||
GatewayIntentBits.GuildMessages,
|
||||
GatewayIntentBits.MessageContent,
|
||||
GatewayIntentBits.DirectMessages,
|
||||
],
|
||||
});
|
||||
|
||||
const OLLAMA_URL = "http://localhost:11434/api/generate";
|
||||
const OWNER_ID = config.discord.owner;
|
||||
|
||||
// Eval command function
|
||||
async function executeEval(code, message) {
|
||||
try {
|
||||
// Create a safe context with limited access
|
||||
const context = {
|
||||
message,
|
||||
client,
|
||||
config,
|
||||
console: {
|
||||
log: (...args) => console.log("[EVAL]", ...args),
|
||||
},
|
||||
};
|
||||
|
||||
// Execute the code
|
||||
const result = eval(`
|
||||
(function() {
|
||||
const { message, client, config, console } = arguments[0];
|
||||
${code}
|
||||
})
|
||||
`)(context);
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function buildMessageHistory(message, maxDepth = 10) {
|
||||
const history = [];
|
||||
let currentMessage = message;
|
||||
let depth = 0;
|
||||
while (currentMessage && depth < maxDepth) {
|
||||
const author = currentMessage.author;
|
||||
const isBot = author.id === client.user.id;
|
||||
const username = author.displayName || author.username;
|
||||
|
||||
history.unshift({
|
||||
author: isBot ? config.assistant.name : username,
|
||||
content: currentMessage.content,
|
||||
isBot: isBot,
|
||||
});
|
||||
|
||||
if (currentMessage.reference) {
|
||||
try {
|
||||
const referencedMessage = await currentMessage.channel.messages.fetch(
|
||||
currentMessage.reference.messageId,
|
||||
);
|
||||
currentMessage = referencedMessage;
|
||||
depth++;
|
||||
} catch (error) {
|
||||
console.log("Could not fetch referenced message:", error.message);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return history;
|
||||
}
|
||||
|
||||
async function generateResponse(prompt, username, messageHistory = []) {
|
||||
try {
|
||||
const systemPrompt = config.assistant.system_prompt
|
||||
.replace("${name}", config.assistant.name)
|
||||
.replace("${username}", username);
|
||||
|
||||
let conversationContext = "";
|
||||
if (messageHistory.length > 1) {
|
||||
conversationContext = "\n\nConversation history:\n";
|
||||
messageHistory.slice(0, -1).forEach((msg) => {
|
||||
conversationContext += `${msg.author}: ${msg.content}\n`;
|
||||
});
|
||||
conversationContext += "\nCurrent message:\n";
|
||||
}
|
||||
|
||||
const fullPrompt = conversationContext + prompt;
|
||||
|
||||
const response = await axios.post(
|
||||
OLLAMA_URL,
|
||||
{
|
||||
model: config.assistant.model,
|
||||
prompt: fullPrompt,
|
||||
system: systemPrompt,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: config.advanced.temperature,
|
||||
num_predict: config.advanced.max_tokens,
|
||||
},
|
||||
},
|
||||
{
|
||||
timeout: 60000,
|
||||
},
|
||||
);
|
||||
|
||||
return response.data.response;
|
||||
} catch (error) {
|
||||
if (error.code === "ECONNREFUSED") {
|
||||
console.error(
|
||||
"Cannot connect to Ollama. Make sure Ollama is running on",
|
||||
OLLAMA_URL,
|
||||
);
|
||||
return `${config.assistant.assistantface} sorry, i can't connect to my brain right now. is ollama running?`;
|
||||
} else if (error.response?.status === 404) {
|
||||
console.error("Model not found:", config.assistant.model);
|
||||
return `${config.assistant.assistantface} oops, i can't find the model "${config.assistant.model}". make sure it's pulled in ollama!`;
|
||||
} else {
|
||||
console.error("Error calling Ollama API:", error.message);
|
||||
return `${config.assistant.assistantface} something went wrong while thinking. try again in a moment? Error calling Ollama API: ${error.message}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client.once(Events.ClientReady, (readyClient) => {
|
||||
console.log(`Ready! Logged in as ${readyClient.user.tag}`);
|
||||
console.log(`Bot name: ${config.assistant.name}`);
|
||||
console.log(`Using model: ${config.assistant.model}`);
|
||||
});
|
||||
|
||||
client.on(Events.MessageCreate, async (message) => {
|
||||
if (message.author.bot) return;
|
||||
|
||||
const isMentioned = message.mentions.has(client.user);
|
||||
const isDM = message.channel.type === 1;
|
||||
const isReply = message.reference !== null;
|
||||
|
||||
// Check for eval command first (owner only)
|
||||
if (message.author.id === OWNER_ID && isMentioned) {
|
||||
let content = message.content.replace(`<@${client.user.id}>`, "").trim();
|
||||
|
||||
if (content.startsWith("eval")) {
|
||||
const code = content.slice(4).trim();
|
||||
|
||||
if (!code) {
|
||||
await message.reply("No code provided to eval!");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await executeEval(code, message);
|
||||
const output = result !== undefined ? String(result) : "undefined";
|
||||
|
||||
// Handle long outputs
|
||||
if (output.length > 1900) {
|
||||
const chunks = output.match(/.{1,1900}/g) || [output];
|
||||
await message.reply("```js\n" + chunks[0] + "```");
|
||||
for (let i = 1; i < chunks.length; i++) {
|
||||
await message.channel.send("```js\n" + chunks[i] + "```");
|
||||
}
|
||||
} else {
|
||||
await message.reply("```js\n" + output + "```");
|
||||
}
|
||||
} catch (error) {
|
||||
await message.reply("```js\nError: " + error.message + "```");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we should respond normally
|
||||
const shouldRespond =
|
||||
isMentioned || isDM || (isReply && (await isReplyToBot(message)));
|
||||
|
||||
if (!shouldRespond) return;
|
||||
|
||||
await message.channel.sendTyping();
|
||||
|
||||
try {
|
||||
let prompt = message.content;
|
||||
if (isMentioned) {
|
||||
prompt = prompt.replace(`<@${client.user.id}>`, "").trim();
|
||||
}
|
||||
|
||||
if (!prompt) {
|
||||
await message.reply(
|
||||
"Hey! You mentioned me but didn't say anything. What's up?",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const username = message.author.displayName || message.author.username;
|
||||
|
||||
let messageHistory = [];
|
||||
if (isReply || isDM) {
|
||||
messageHistory = await buildMessageHistory(message);
|
||||
console.log(
|
||||
`Built message history with ${messageHistory.length} messages`,
|
||||
);
|
||||
}
|
||||
|
||||
const response = await generateResponse(prompt, username, messageHistory);
|
||||
|
||||
if (response.length > 2000) {
|
||||
const chunks = response.match(/.{1,1900}/g) || [response];
|
||||
for (const chunk of chunks) {
|
||||
await message.reply(chunk);
|
||||
}
|
||||
} else {
|
||||
await message.reply(response);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error handling message:", error);
|
||||
await message.reply(
|
||||
"Oops! Something went wrong while processing your message.",
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
async function isReplyToBot(message) {
|
||||
if (!message.reference) return false;
|
||||
|
||||
try {
|
||||
const referencedMessage = await message.channel.messages.fetch(
|
||||
message.reference.messageId,
|
||||
);
|
||||
return referencedMessage.author.id === client.user.id;
|
||||
} catch (error) {
|
||||
console.log(
|
||||
"Could not fetch referenced message for bot check:",
|
||||
error.message,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
client.on(Events.Error, (error) => {
|
||||
console.error("Discord client error:", error);
|
||||
});
|
||||
|
||||
process.on("unhandledRejection", (error) => {
|
||||
console.error("Unhandled promise rejection:", error);
|
||||
});
|
||||
|
||||
if (!config.discord.token) {
|
||||
console.error(
|
||||
"Discord token is required! Set it in config.toml or DISCORD_TOKEN environment variable.",
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
client.login(config.discord.token).catch((error) => {
|
||||
console.error("Failed to login to Discord:", error.message);
|
||||
process.exit(1);
|
||||
});
|
||||
23
package.json
Normal file
23
package.json
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"name": "lydia-discord-bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Discord bot frontend for Ollama AI assistant",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"start": "node start.js",
|
||||
"dev": "node --watch index.js",
|
||||
"bot": "node index.js"
|
||||
},
|
||||
"keywords": ["discord", "bot", "ollama", "ai", "assistant"],
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"discord.js": "^14.14.1",
|
||||
"axios": "^1.6.2",
|
||||
"toml": "^3.0.0",
|
||||
"dotenv": "^16.3.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0"
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue