diff --git a/README.md b/README.md index 257472f..1950554 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,14 @@ lydia is written to be easily configurable through a toml file which is easier t ## User settings - name = the name you want lydia to call you. default is "user" +## Advanced configuration +(I wouldn't touch this unless you know what you're doing) +- temperature = the temperature you want lydia to use. basically how random the model is. default is 0.8 +- max_tokens = the max context tokens you want lydia to use. basically how far she can remember. default is 8192 + +## Runtime configuration +the prompt can be changed by running l!prompt in the chatbox. this only applies for the current session, if you want a persistent change, you can edit the config file. + # Other stuff by hitting escape you can tab out of the chatbox, here you can do cool things like: - hit Q or CTRL+C to quit lydia (but why would you wanna do that anyway?) diff --git a/config.example.toml b/config.example.toml index fddbcfd..b28816e 100644 --- a/config.example.toml +++ b/config.example.toml @@ -9,3 +9,7 @@ facefont = "mono9" [user] name = "user" + +[advanced] +temperature = 0.8 +max_tokens = 8192 diff --git a/lydia.js b/lydia.js index 61be32e..5b97d27 100644 --- a/lydia.js +++ b/lydia.js @@ -20,6 +20,8 @@ const config = toml.parse(fs.readFileSync("./config.toml", "utf-8")); let assistantname = config.assistant.name; let assistantface = config.assistant.assistantface; let assistantmodel = config.assistant.model; +let maxtokens = config.advanced.max_tokens; +let temperature = config.advanced.temperature; let username = config.user.name; @@ -226,6 +228,10 @@ async function sendMessage(message) { ...conversationHistory, ], stream: true, + options: { + num_predict: maxtokens, + temperature: temperature, + }, }); for await (const part of response) {