llama.vim : add classic vim support (#9995)
Some checks are pending
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-musa.Dockerfile platforms:linux/amd64 tag:full-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-musa.Dockerfile platforms:linux/amd64 tag:light-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-musa.Dockerfile platforms:linux/amd64 tag:server-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Waiting to run
Nix CI / nix-eval (macos-latest) (push) Waiting to run
Nix CI / nix-eval (ubuntu-latest) (push) Waiting to run
Nix CI / nix-build (macos-latest) (push) Waiting to run
Nix CI / nix-build (ubuntu-latest) (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run

* added classic vim support

* fixed ring update, removed blank line

* minor

* minor

* minor doc update

* removed uneeded var

* minor

* minor

* fixed job_start creating new scratch buffers

* fixed job_start creating new scratch buffers

* fixed ghost text indenting when expandtab is on

* removed unused code

* minor

* unified fim_on_exit

* minor

* vim ghost text rendering now uses pos_x and pos_y parameters

* renamed *_hlgroup to hlgroup_*

* renamed *_ghost_text to ghost_text_*, moved nvim/vim detection to llama#init()

* minor

---------

Co-authored-by: Michael Coppola <info@michaeljcoppola.com>
This commit is contained in:
Michael Coppola 2024-10-23 07:09:26 -04:00 committed by GitHub
parent 4c9388fb96
commit ac113a0fee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -2,7 +2,7 @@
"
" requires:
"
" - neovim
" - neovim or vim
" - curl
" - llama.cpp server instance
" - FIM-compatible model
@ -10,7 +10,7 @@
" sample config:
"
" - Tab - accept the current suggestion
" - Shift+Tab - accept just the first line of the segguestion
" - Shift+Tab - accept just the first line of the suggestion
" - Ctrl+F - toggle FIM completion manually
"
" make symlink or copy this file to ~/.config/nvim/autoload/llama.vim
@ -43,8 +43,8 @@
"
" colors (adjust to your liking)
highlight llama_hl_hint guifg=#ff772f
highlight llama_hl_info guifg=#77ff2f
highlight llama_hl_hint guifg=#ff772f ctermfg=202
highlight llama_hl_info guifg=#77ff2f ctermfg=119
" general parameters:
"
@ -93,6 +93,18 @@ let s:default_config = {
let g:llama_config = get(g:, 'llama_config', s:default_config)
function! s:get_indent(str)
let l:count = 0
for i in range(len(a:str))
if a:str[i] == "\t"
let l:count += &tabstop - 1
else
break
endif
endfor
return l:count
endfunction
function! s:rand(i0, i1) abort
return a:i0 + rand() % (a:i1 - a:i0 + 1)
endfunction
@ -129,6 +141,21 @@ function! llama#init()
let s:current_job = v:null
let s:ghost_text_nvim = exists('*nvim_buf_get_mark')
let s:ghost_text_vim = has('textprop')
if s:ghost_text_vim
let s:hlgroup_hint = 'llama_hl_hint'
let s:hlgroup_info = 'llama_hl_info'
if empty(prop_type_get(s:hlgroup_hint))
call prop_type_add(s:hlgroup_hint, {'highlight': s:hlgroup_hint})
endif
if empty(prop_type_get(s:hlgroup_info))
call prop_type_add(s:hlgroup_info, {'highlight': s:hlgroup_info})
endif
endif
augroup llama
autocmd!
autocmd InsertEnter * inoremap <expr> <silent> <C-F> llama#fim_inline(v:false)
@ -317,13 +344,22 @@ function! s:ring_update()
\ 't_max_predict_ms': 1
\ })
let l:curl_command = printf(
\ "curl --silent --no-buffer --request POST --url %s --header \"Content-Type: application/json\" --data %s",
\ g:llama_config.endpoint, shellescape(l:request)
\ )
let l:curl_command = [
\ "curl",
\ "--silent",
\ "--no-buffer",
\ "--request", "POST",
\ "--url", g:llama_config.endpoint,
\ "--header", "Content-Type: application/json",
\ "--data", l:request
\ ]
" no callbacks because we don't need to process the response
if s:ghost_text_nvim
call jobstart(l:curl_command, {})
elseif s:ghost_text_vim
call job_start(l:curl_command, {})
endif
endfunction
" necessary for 'inoremap <expr>'
@ -418,24 +454,37 @@ function! llama#fim(is_auto) abort
\ 't_max_predict_ms': g:llama_config.t_max_predict_ms
\ })
let l:curl_command = printf(
\ "curl --silent --no-buffer --request POST --url %s --header \"Content-Type: application/json\" --data %s",
\ g:llama_config.endpoint, shellescape(l:request)
\ )
let l:curl_command = [
\ "curl",
\ "--silent",
\ "--no-buffer",
\ "--request", "POST",
\ "--url", g:llama_config.endpoint,
\ "--header", "Content-Type: application/json",
\ "--data", l:request
\ ]
if s:current_job != v:null
if s:ghost_text_nvim
call jobstop(s:current_job)
elseif s:ghost_text_vim
call job_stop(s:current_job)
endif
endif
" send the request asynchronously
if s:ghost_text_nvim
let s:current_job = jobstart(l:curl_command, {
\ 'on_stdout': function('s:fim_on_stdout'),
\ 'on_stdout': function('s:fim_on_stdout', [s:pos_x, s:pos_y, a:is_auto]),
\ 'on_exit': function('s:fim_on_exit'),
\ 'stdout_buffered': v:true,
\ 'pos_x': s:pos_x,
\ 'pos_y': s:pos_y,
\ 'is_auto': a:is_auto
\ 'stdout_buffered': v:true
\ })
elseif s:ghost_text_vim
let s:current_job = job_start(l:curl_command, {
\ 'out_cb': function('s:fim_on_stdout', [s:pos_x, s:pos_y, a:is_auto]),
\ 'exit_cb': function('s:fim_on_exit')
\ })
endif
" TODO: per-file location
let l:delta_y = abs(s:pos_y - s:pos_y_pick)
@ -482,9 +531,13 @@ function! llama#fim_cancel()
" clear the virtual text
let l:bufnr = bufnr('%')
if s:ghost_text_nvim
let l:id_vt_fim = nvim_create_namespace('vt_fim')
call nvim_buf_clear_namespace(l:bufnr, l:id_vt_fim, 0, -1)
elseif s:ghost_text_vim
call prop_remove({'type': s:hlgroup_hint, 'all': v:true})
call prop_remove({'type': s:hlgroup_info, 'all': v:true})
endif
" remove the mappings
silent! iunmap <buffer> <Tab>
@ -499,13 +552,18 @@ function! s:on_move()
endfunction
" callback that processes the FIM result from the server and displays the suggestion
function! s:fim_on_stdout(job_id, data, event) dict
function! s:fim_on_stdout(pos_x, pos_y, is_auto, job_id, data, event = v:null)
if s:ghost_text_nvim
let l:raw = join(a:data, "\n")
elseif s:ghost_text_vim
let l:raw = a:data
endif
if len(l:raw) == 0
return
endif
if self.pos_x != col('.') - 1 || self.pos_y != line('.')
if a:pos_x != col('.') - 1 || a:pos_y != line('.')
return
endif
@ -514,14 +572,14 @@ function! s:fim_on_stdout(job_id, data, event) dict
return
endif
let s:pos_x = self.pos_x
let s:pos_y = self.pos_y
let s:pos_x = a:pos_x
let s:pos_y = a:pos_y
let s:can_accept = v:true
let l:has_info = v:false
if s:can_accept && v:shell_error
if !self.is_auto
if !a:is_auto
call add(s:content, "<| curl error: is the server on? |>")
endif
let s:can_accept = v:false
@ -642,7 +700,9 @@ function! s:fim_on_stdout(job_id, data, event) dict
" display virtual text with the suggestion
let l:bufnr = bufnr('%')
if s:ghost_text_nvim
let l:id_vt_fim = nvim_create_namespace('vt_fim')
endif
" construct the info message
if g:llama_config.show_info > 0 && l:has_info
@ -671,6 +731,7 @@ function! s:fim_on_stdout(job_id, data, event) dict
endif
" display the suggestion and append the info to the end of the first line
if s:ghost_text_nvim
call nvim_buf_set_extmark(l:bufnr, l:id_vt_fim, s:pos_y - 1, s:pos_x - 1, {
\ 'virt_text': [[s:content[0], 'llama_hl_hint'], [l:info, 'llama_hl_info']],
\ 'virt_text_win_col': virtcol('.') - 1
@ -680,6 +741,31 @@ function! s:fim_on_stdout(job_id, data, event) dict
\ 'virt_lines': map(s:content[1:], {idx, val -> [[val, 'llama_hl_hint']]}),
\ 'virt_text_win_col': virtcol('.')
\ })
elseif s:ghost_text_vim
let l:new_suffix = s:content[0]
if !empty(l:new_suffix)
call prop_add(s:pos_y, s:pos_x + 1, {
\ 'type': s:hlgroup_hint,
\ 'text': l:new_suffix
\ })
endif
for line in s:content[1:]
call prop_add(s:pos_y, 0, {
\ 'type': s:hlgroup_hint,
\ 'text': line,
\ 'text_padding_left': s:get_indent(line),
\ 'text_align': 'below'
\ })
endfor
if !empty(l:info)
call prop_add(s:pos_y, 0, {
\ 'type': s:hlgroup_info,
\ 'text': l:info,
\ 'text_padding_left': col('$'),
\ 'text_wrap': 'truncate'
\ })
endif
endif
" setup accept shortcuts
inoremap <buffer> <Tab> <C-O>:call llama#fim_accept(v:false)<CR>
@ -688,7 +774,7 @@ function! s:fim_on_stdout(job_id, data, event) dict
let s:hint_shown = v:true
endfunction
function! s:fim_on_exit(job_id, exit_code, event) dict
function! s:fim_on_exit(job_id, exit_code, event = v:null)
if a:exit_code != 0
echom "Job failed with exit code: " . a:exit_code
endif