From 278736ec4eef5cfe2759a55d3c8a1da68353eec1 Mon Sep 17 00:00:00 2001 From: Michaƫl Ball Date: Thu, 5 Dec 2024 16:11:40 +0000 Subject: Updates for gen.nvim --- neovim/.config/nvim/lua/plugins/gen_nvim.lua | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'neovim/.config/nvim/lua/plugins') diff --git a/neovim/.config/nvim/lua/plugins/gen_nvim.lua b/neovim/.config/nvim/lua/plugins/gen_nvim.lua index 074f863..52dd67d 100644 --- a/neovim/.config/nvim/lua/plugins/gen_nvim.lua +++ b/neovim/.config/nvim/lua/plugins/gen_nvim.lua @@ -3,18 +3,18 @@ return { "David-Kunz/gen.nvim", cmd = "Gen", opts = { - model = "llama3.2:3b-instruct-fp16", -- The default model to use. - quit_map = "q", -- set keymap to close the response window - retry_map = "", -- set keymap to re-send the current prompt - accept_map = "", -- set keymap to replace the previous selection with the last result - host = "localhost", -- The host running the Ollama service. - port = "11434", -- The port on which the Ollama service is listening. - display_mode = "split", -- The display mode. Can be "float" or "split" or "horizontal-split". - show_prompt = false, -- Shows the prompt submitted to Ollama. - show_model = true, -- Displays which model you are using at the beginning of your chat session. - no_auto_close = false, -- Never closes the window automatically. - file = false, -- Write the payload to a temporary file to keep the command short. - hidden = false, -- Hide the generation window (if true, will implicitly set `prompt.replace = true`), requires Neovim >= 0.10 + model = "llama3.2:3b-instruct-fp16", -- The default model to use. + quit_map = "q", -- set keymap to close the response window + retry_map = "", -- set keymap to re-send the current prompt + accept_map = "", -- set keymap to replace the previous selection with the last result + host = "localhost", -- The host running the Ollama service. + port = "11434", -- The port on which the Ollama service is listening. + display_mode = "split", -- The display mode. Can be "float" or "split" or "horizontal-split". + show_prompt = false, -- Shows the prompt submitted to Ollama. + show_model = true, -- Displays which model you are using at the beginning of your chat session. + no_auto_close = false, -- Never closes the window automatically. + file = false, -- Write the payload to a temporary file to keep the command short. + hidden = false, -- Hide the generation window (if true, will implicitly set `prompt.replace = true`), requires Neovim >= 0.10 init = function(options) pcall(io.popen, "ollama serve > /dev/null 2>&1 &") end, -- Function to initialize Ollama command = function(options) @@ -26,7 +26,7 @@ return { -- The executed command must return a JSON object with { response, context } -- (context property is optional). -- list_models = '', -- Retrieves a list of model names - debug = false -- Prints errors and the command which is run. + debug = false -- Prints errors and the command which is run. }, }, } -- cgit v1.2.3