Created
April 25, 2023 01:54
-
-
Save hwayne/82d00806ea1179c579b44546d558ee79 to your computer and use it in GitHub Desktop.
Lua code for running GPT from Neovim
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
-- Put this in after/plugins | |
local api = vim.api | |
local function chunk(text) | |
local sections = {} | |
for line in vim.gsplit(text, "@@@") do | |
if line:find("^s") then | |
table.insert(sections, {role = "system", content = line:sub(3)}) | |
elseif line:find("^u") then | |
table.insert(sections, {role = "user", content = line:sub(3)}) | |
elseif line:find("^a") then | |
table.insert(sections, {role = "assistant", content = line:sub(3)}) | |
end | |
end | |
return sections | |
end | |
local function send_to_openai(content, model) | |
local messages = chunk(content) | |
-- todo allow param extraction | |
-- probably with vim.b.temperature or 0.7 | |
-- OR by returning info from the chunker | |
local max_tokens = vim.b.max_tokens or 256 | |
local data = { | |
model = model | |
, messages = messages | |
, max_tokens = max_tokens | |
, n = 1 | |
, temperature = 0.7 | |
} | |
local query = {"openai", "api", "chat_completions.create", | |
"-m", data.model, | |
"-M", data.max_tokens, | |
"-n", data.n} | |
for _, v in ipairs(messages) do | |
vim.list_extend(query, {"-g", v.role, v.content}) | |
end | |
return vim.fn.system(query) | |
end | |
local function output(result) | |
if not vim.b.ai_buffer then | |
vim.b.ai_buffer = api.nvim_create_buf(true, true) | |
-- TODO change the name of the buffer | |
end | |
api.nvim_buf_set_lines(vim.b.ai_buffer, 0, -1, false, vim.split(result, "\n")) | |
if vim.fn.bufwinnr(vim.b.ai_buffer) == -1 then | |
vim.cmd.sbuffer(vim.b.ai_buffer) | |
end | |
end | |
function GPT(model) | |
if model == 4 then | |
model = "gpt-4" | |
else | |
model = "gpt-3.5-turbo" | |
end | |
local top_bufnr = api.nvim_get_current_buf() | |
local content = table.concat(api.nvim_buf_get_lines(top_bufnr, 0, -1, false), "\n") | |
output(send_to_openai(content, model)) | |
end | |
function DebugGPT() | |
local top_bufnr = api.nvim_get_current_buf() | |
local content = table.concat(api.nvim_buf_get_lines(top_bufnr, 0, -1, false), "\n") | |
print(vim.inspect(chunk(content))) | |
end | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment