From b1736352fdcf7c5aae6d32e2eb12e4f3e46dba8b Mon Sep 17 00:00:00 2001
From: Michaƫl Ball <michael.ball@krotosaudio.com>
Date: Thu, 5 Dec 2024 12:17:42 +0000
Subject: Add gen.nvim

---
 neovim/.config/nvim/lua/plugins/gen_nvim.lua | 32 ++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)
 create mode 100644 neovim/.config/nvim/lua/plugins/gen_nvim.lua

(limited to 'neovim/.config/nvim/lua/plugins')

diff --git a/neovim/.config/nvim/lua/plugins/gen_nvim.lua b/neovim/.config/nvim/lua/plugins/gen_nvim.lua
new file mode 100644
index 0000000..074f863
--- /dev/null
+++ b/neovim/.config/nvim/lua/plugins/gen_nvim.lua
@@ -0,0 +1,32 @@
+return {
+  {
+    "David-Kunz/gen.nvim",
+    cmd = "Gen",
+    opts = {
+      model = "llama3.2:3b-instruct-fp16",        -- The default model to use.
+      quit_map = "q",           -- set keymap to close the response window
+      retry_map = "<c-r>",      -- set keymap to re-send the current prompt
+      accept_map = "<c-cr>",    -- set keymap to replace the previous selection with the last result
+      host = "localhost",       -- The host running the Ollama service.
+      port = "11434",           -- The port on which the Ollama service is listening.
+      display_mode = "split",   -- The display mode. Can be "float" or "split" or "horizontal-split".
+      show_prompt = false,      -- Shows the prompt submitted to Ollama.
+      show_model = true,       -- Displays which model you are using at the beginning of your chat session.
+      no_auto_close = false,    -- Never closes the window automatically.
+      file = false,             -- Write the payload to a temporary file to keep the command short.
+      hidden = false,           -- Hide the generation window (if true, will implicitly set `prompt.replace = true`), requires Neovim >= 0.10
+      init = function(options) pcall(io.popen, "ollama serve > /dev/null 2>&1 &") end,
+      -- Function to initialize Ollama
+      command = function(options)
+        local body = { model = options.model, stream = true }
+        return "curl --silent --no-buffer -X POST http://" .. options.host .. ":" .. options.port .. "/api/chat -d $body"
+      end,
+      -- The command for the Ollama service. You can use placeholders $prompt, $model and $body (shellescaped).
+      -- This can also be a command string.
+      -- The executed command must return a JSON object with { response, context }
+      -- (context property is optional).
+      -- list_models = '<omitted lua function>', -- Retrieves a list of model names
+      debug = false   -- Prints errors and the command which is run.
+    },
+  },
+}
-- 
cgit v1.2.3