llama.vim 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. " LLM-based text completion using llama.cpp
  2. "
  3. " requires:
  4. "
  5. " - neovim
  6. " - curl
  7. " - llama.cpp server instance
  8. " - FIM-compatible model
  9. "
  10. " sample config:
  11. "
  12. " - Tab - accept the current suggestion
  13. " - Shift+Tab - accept just the first line of the segguestion
  14. " - Ctrl+F - toggle FIM completion manually
  15. "
  16. " make symlink or copy this file to ~/.config/nvim/autoload/llama.vim
  17. "
  18. " start the llama.cpp server with a FIM-compatible model. for example:
  19. "
  20. " $ llama-server -m {model.gguf} --port 8012 -ngl 99 -fa -dt 0.1 --ubatch-size 512 --batch-size 1024 --cache-reuse 256
  21. "
  22. " --batch-size [512, model max context]
  23. "
  24. " adjust the batch size to control how much of the provided local context will be used during the inference
  25. " lower values will use smaller part of the context around the cursor, which will result in faster processing
  26. "
  27. " --ubatch-size [64, 2048]
  28. "
  29. " chunks the batch into smaller chunks for faster processing
  30. " depends on the specific hardware. use llama-bench to profile and determine the best size
  31. "
  32. " --cache-reuse (ge:llama_config.n_predict, 1024]
  33. "
  34. " this should be either 0 (disabled) or strictly larger than g:llama_config.n_predict
  35. " using non-zero value enables context reuse on the server side which dramatically improves the performance at
  36. " large contexts. a value of 256 should be good for all cases
  37. "
  38. " run this once to initialise llama.vim:
  39. "
  40. " :call llama#init()
  41. "
  42. " more info: https://github.com/ggerganov/llama.cpp/pull/9787
  43. "
  44. " colors (adjust to your liking)
  45. highlight llama_hl_hint guifg=#ff772f
  46. highlight llama_hl_info guifg=#77ff2f
  47. " general parameters:
  48. "
  49. " endpoint: llama.cpp server endpoint
  50. " n_prefix: number of lines before the cursor location to include in the local prefix
  51. " n_suffix: number of lines after the cursor location to include in the local suffix
  52. " n_predict: max number of tokens to predict
  53. " t_max_prompt_ms: max alloted time for the prompt processing (TODO: not yet supported)
  54. " t_max_predict_ms: max alloted time for the prediction
  55. " show_info: show extra info about the inference (0 - disabled, 1 - statusline, 2 - inline)
  56. " auto_fim: trigger FIM completion automatically on cursor movement
  57. " max_line_suffix: do not auto-trigger FIM completion if there are more than this number of characters to the right of the cursor
  58. "
  59. " ring buffer of chunks, accumulated with time upon:
  60. "
  61. " - completion request
  62. " - yank
  63. " - entering a buffer
  64. " - leaving a buffer
  65. " - writing a file
  66. "
  67. " parameters for the ring-buffer with extra context:
  68. "
  69. " ring_n_chunks: max number of chunks to pass as extra context to the server (0 to disable)
  70. " ring_chunk_size: max size of the chunks (in number of lines)
  71. " note: adjust these numbers so that you don't overrun your context
  72. " at ring_n_chunks = 64 and ring_chunk_size = 64 you need ~32k context
  73. " ring_scope: the range around the cursor position (in number of lines) for gathering chunks after FIM
  74. " ring_update_ms: how often to process queued chunks in normal mode
  75. "
  76. let s:default_config = {
  77. \ 'endpoint': 'http://127.0.0.1:8012/infill',
  78. \ 'n_prefix': 256,
  79. \ 'n_suffix': 64,
  80. \ 'n_predict': 128,
  81. \ 't_max_prompt_ms': 500,
  82. \ 't_max_predict_ms': 1000,
  83. \ 'show_info': 2,
  84. \ 'auto_fim': v:true,
  85. \ 'max_line_suffix': 8,
  86. \ 'ring_n_chunks': 64,
  87. \ 'ring_chunk_size': 64,
  88. \ 'ring_scope': 1024,
  89. \ 'ring_update_ms': 1000,
  90. \ }
  91. let g:llama_config = get(g:, 'llama_config', s:default_config)
  92. function! s:rand(i0, i1) abort
  93. return a:i0 + rand() % (a:i1 - a:i0 + 1)
  94. endfunction
  95. function! llama#init()
  96. if !executable('curl')
  97. echohl WarningMsg
  98. echo 'llama.vim requires the "curl" command to be available'
  99. echohl None
  100. return
  101. endif
  102. let s:pos_x = 0 " cursor position upon start of completion
  103. let s:pos_y = 0
  104. let s:line_cur = ''
  105. let s:line_cur_prefix = ''
  106. let s:line_cur_suffix = ''
  107. let s:ring_chunks = [] " current set of chunks used as extra context
  108. let s:ring_queued = [] " chunks that are queued to be sent for processing
  109. let s:ring_n_evict = 0
  110. let s:hint_shown = v:false
  111. let s:pos_y_pick = -9999 " last y where we picked a chunk
  112. let s:pos_dx = 0
  113. let s:content = []
  114. let s:can_accept = v:false
  115. let s:timer_fim = -1
  116. let s:t_fim_start = reltime() " used to measure total FIM time
  117. let s:t_last_move = reltime() " last time the cursor moved
  118. let s:current_job = v:null
  119. augroup llama
  120. autocmd!
  121. autocmd InsertEnter * inoremap <expr> <silent> <C-F> llama#fim_inline(v:false)
  122. autocmd InsertLeavePre * call llama#fim_cancel()
  123. autocmd CursorMoved * call s:on_move()
  124. autocmd CursorMovedI * call s:on_move()
  125. autocmd CompleteChanged * call llama#fim_cancel()
  126. if g:llama_config.auto_fim
  127. autocmd CursorMovedI * call llama#fim(v:true)
  128. endif
  129. " gather chunks upon yanking
  130. autocmd TextYankPost * if v:event.operator ==# 'y' | call s:pick_chunk(v:event.regcontents, v:false, v:true) | endif
  131. " gather chunks upon entering/leaving a buffer
  132. autocmd BufEnter * call timer_start(100, {-> s:pick_chunk(getline(max([1, line('.') - g:llama_config.ring_chunk_size/2]), min([line('.') + g:llama_config.ring_chunk_size/2, line('$')])), v:true, v:true)})
  133. autocmd BufLeave * call s:pick_chunk(getline(max([1, line('.') - g:llama_config.ring_chunk_size/2]), min([line('.') + g:llama_config.ring_chunk_size/2, line('$')])), v:true, v:true)
  134. " gather chunk upon saving the file
  135. autocmd BufWritePost * call s:pick_chunk(getline(max([1, line('.') - g:llama_config.ring_chunk_size/2]), min([line('.') + g:llama_config.ring_chunk_size/2, line('$')])), v:true, v:true)
  136. augroup END
  137. silent! call llama#fim_cancel()
  138. " init background update of the ring buffer
  139. if g:llama_config.ring_n_chunks > 0
  140. call s:ring_update()
  141. endif
  142. endfunction
  143. " compute how similar two chunks of text are
  144. " 0 - no similarity, 1 - high similarity
  145. " TODO: figure out something better
  146. function! s:chunk_sim(c0, c1)
  147. let l:lines0 = len(a:c0)
  148. let l:lines1 = len(a:c1)
  149. let l:common = 0
  150. for l:line0 in a:c0
  151. for l:line1 in a:c1
  152. if l:line0 == l:line1
  153. let l:common += 1
  154. break
  155. endif
  156. endfor
  157. endfor
  158. return 2.0 * l:common / (l:lines0 + l:lines1)
  159. endfunction
  160. " pick a random chunk of size g:llama_config.ring_chunk_size from the provided text and queue it for processing
  161. "
  162. " no_mod - do not pick chunks from buffers with pending changes
  163. " do_evict - evict chunks that are very similar to the new one
  164. "
  165. function! s:pick_chunk(text, no_mod, do_evict)
  166. " do not pick chunks from buffers with pending changes or buffers that are not files
  167. if a:no_mod && (getbufvar(bufnr('%'), '&modified') || !buflisted(bufnr('%')) || !filereadable(expand('%')))
  168. return
  169. endif
  170. " if the extra context option is disabled - do nothing
  171. if g:llama_config.ring_n_chunks <= 0
  172. return
  173. endif
  174. " don't pick very small chunks
  175. if len(a:text) < 3
  176. return
  177. endif
  178. if len(a:text) + 1 < g:llama_config.ring_chunk_size
  179. let l:chunk = a:text
  180. else
  181. let l:l0 = s:rand(0, max([0, len(a:text) - g:llama_config.ring_chunk_size/2]))
  182. let l:l1 = min([l:l0 + g:llama_config.ring_chunk_size/2, len(a:text)])
  183. let l:chunk = a:text[l:l0:l:l1]
  184. endif
  185. let l:chunk_str = join(l:chunk, "\n") . "\n"
  186. " check if this chunk is already added
  187. let l:exist = v:false
  188. for i in range(len(s:ring_chunks))
  189. if s:ring_chunks[i].data == l:chunk
  190. let l:exist = v:true
  191. break
  192. endif
  193. endfor
  194. for i in range(len(s:ring_queued))
  195. if s:ring_queued[i].data == l:chunk
  196. let l:exist = v:true
  197. break
  198. endif
  199. endfor
  200. if l:exist
  201. return
  202. endif
  203. " evict queued chunks that are very similar to the new one
  204. for i in range(len(s:ring_queued) - 1, 0, -1)
  205. if s:chunk_sim(s:ring_queued[i].data, l:chunk) > 0.9
  206. if a:do_evict
  207. call remove(s:ring_queued, i)
  208. let s:ring_n_evict += 1
  209. else
  210. return
  211. endif
  212. endif
  213. endfor
  214. " also from s:ring_chunks
  215. for i in range(len(s:ring_chunks) - 1, 0, -1)
  216. if s:chunk_sim(s:ring_chunks[i].data, l:chunk) > 0.9
  217. if a:do_evict
  218. call remove(s:ring_chunks, i)
  219. let s:ring_n_evict += 1
  220. else
  221. return
  222. endif
  223. endif
  224. endfor
  225. " TODO: become parameter ?
  226. if len(s:ring_queued) == 16
  227. call remove(s:ring_queued, 0)
  228. endif
  229. call add(s:ring_queued, {'data': l:chunk, 'str': l:chunk_str, 'time': reltime(), 'filename': expand('%')})
  230. "let &statusline = 'extra context: ' . len(s:ring_chunks) . ' / ' . len(s:ring_queued)
  231. endfunction
  232. " picks a queued chunk, sends it for processing and adds it to s:ring_chunks
  233. " called every g:llama_config.ring_update_ms
  234. function! s:ring_update()
  235. call timer_start(g:llama_config.ring_update_ms, {-> s:ring_update()})
  236. " update only if in normal mode or if the cursor hasn't moved for a while
  237. if mode() !=# 'n' && reltimefloat(reltime(s:t_last_move)) < 3.0
  238. return
  239. endif
  240. if len(s:ring_queued) == 0
  241. return
  242. endif
  243. " move the first queued chunk to the ring buffer
  244. if len(s:ring_chunks) == g:llama_config.ring_n_chunks
  245. call remove(s:ring_chunks, 0)
  246. endif
  247. call add(s:ring_chunks, remove(s:ring_queued, 0))
  248. "let &statusline = 'updated context: ' . len(s:ring_chunks) . ' / ' . len(s:ring_queued)
  249. " send asynchronous job with the new extra context so that it is ready for the next FIM
  250. let l:extra_context = []
  251. for l:chunk in s:ring_chunks
  252. call add(l:extra_context, {
  253. \ 'text': l:chunk.str,
  254. \ 'time': l:chunk.time,
  255. \ 'filename': l:chunk.filename
  256. \ })
  257. endfor
  258. " no samplers needed here
  259. let l:request = json_encode({
  260. \ 'input_prefix': "",
  261. \ 'input_suffix': "",
  262. \ 'input_extra': l:extra_context,
  263. \ 'prompt': "",
  264. \ 'n_predict': 1,
  265. \ 'temperature': 0.0,
  266. \ 'stream': v:false,
  267. \ 'samplers': ["temperature"],
  268. \ 'cache_prompt': v:true,
  269. \ 't_max_prompt_ms': 1,
  270. \ 't_max_predict_ms': 1
  271. \ })
  272. let l:curl_command = printf(
  273. \ "curl --silent --no-buffer --request POST --url %s --header \"Content-Type: application/json\" --data %s",
  274. \ g:llama_config.endpoint, shellescape(l:request)
  275. \ )
  276. " no callbacks because we don't need to process the response
  277. call jobstart(l:curl_command, {})
  278. endfunction
  279. " necessary for 'inoremap <expr>'
  280. function! llama#fim_inline(is_auto) abort
  281. call llama#fim(a:is_auto)
  282. return ''
  283. endfunction
  284. " the main FIM call
  285. " takes local context around the cursor and sends it together with the extra context to the server for completion
  286. function! llama#fim(is_auto) abort
  287. " we already have a suggestion for the current cursor position
  288. if s:hint_shown && !a:is_auto
  289. call llama#fim_cancel()
  290. return
  291. endif
  292. call llama#fim_cancel()
  293. " avoid sending repeated requests too fast
  294. if reltimefloat(reltime(s:t_fim_start)) < 0.6
  295. if s:timer_fim != -1
  296. call timer_stop(s:timer_fim)
  297. let s:timer_fim = -1
  298. endif
  299. let s:t_fim_start = reltime()
  300. let s:timer_fim = timer_start(600, {-> llama#fim(v:true)})
  301. return
  302. endif
  303. let s:t_fim_start = reltime()
  304. let s:content = []
  305. let s:can_accept = v:false
  306. let s:pos_x = col('.') - 1
  307. let s:pos_y = line('.')
  308. let l:max_y = line('$')
  309. let l:lines_prefix = getline(max([1, s:pos_y - g:llama_config.n_prefix]), s:pos_y - 1)
  310. let l:lines_suffix = getline(s:pos_y + 1, min([l:max_y, s:pos_y + g:llama_config.n_suffix]))
  311. let s:line_cur = getline('.')
  312. let s:line_cur_prefix = strpart(s:line_cur, 0, s:pos_x)
  313. let s:line_cur_suffix = strpart(s:line_cur, s:pos_x)
  314. if a:is_auto && len(s:line_cur_suffix) > g:llama_config.max_line_suffix
  315. return
  316. endif
  317. let l:prefix = ""
  318. \ . join(l:lines_prefix, "\n")
  319. \ . "\n"
  320. let l:prompt = ""
  321. \ . s:line_cur_prefix
  322. let l:suffix = ""
  323. \ . s:line_cur_suffix
  324. \ . "\n"
  325. \ . join(l:lines_suffix, "\n")
  326. \ . "\n"
  327. " prepare the extra context data
  328. let l:extra_context = []
  329. for l:chunk in s:ring_chunks
  330. call add(l:extra_context, {
  331. \ 'text': l:chunk.str,
  332. \ 'time': l:chunk.time,
  333. \ 'filename': l:chunk.filename
  334. \ })
  335. endfor
  336. " the indentation of the current line
  337. let l:indent = strlen(matchstr(s:line_cur_prefix, '^\s*'))
  338. let l:request = json_encode({
  339. \ 'input_prefix': l:prefix,
  340. \ 'input_suffix': l:suffix,
  341. \ 'input_extra': l:extra_context,
  342. \ 'prompt': l:prompt,
  343. \ 'n_predict': g:llama_config.n_predict,
  344. \ 'n_indent': l:indent,
  345. \ 'top_k': 40,
  346. \ 'top_p': 0.99,
  347. \ 'stream': v:false,
  348. \ 'samplers': ["top_k", "top_p", "infill"],
  349. \ 'cache_prompt': v:true,
  350. \ 't_max_prompt_ms': g:llama_config.t_max_prompt_ms,
  351. \ 't_max_predict_ms': g:llama_config.t_max_predict_ms
  352. \ })
  353. let l:curl_command = printf(
  354. \ "curl --silent --no-buffer --request POST --url %s --header \"Content-Type: application/json\" --data %s",
  355. \ g:llama_config.endpoint, shellescape(l:request)
  356. \ )
  357. if s:current_job != v:null
  358. call jobstop(s:current_job)
  359. endif
  360. " send the request asynchronously
  361. let s:current_job = jobstart(l:curl_command, {
  362. \ 'on_stdout': function('s:fim_on_stdout'),
  363. \ 'on_exit': function('s:fim_on_exit'),
  364. \ 'stdout_buffered': v:true,
  365. \ 'pos_x': s:pos_x,
  366. \ 'pos_y': s:pos_y,
  367. \ 'is_auto': a:is_auto
  368. \ })
  369. " TODO: per-file location
  370. let l:delta_y = abs(s:pos_y - s:pos_y_pick)
  371. " gather some extra context nearby and process it in the background
  372. " only gather chunks if the cursor has moved a lot
  373. " TODO: something more clever? reranking?
  374. if a:is_auto && l:delta_y > 32
  375. " expand the prefix even further
  376. call s:pick_chunk(getline(max([1, s:pos_y - g:llama_config.ring_scope]), max([1, s:pos_y - g:llama_config.n_prefix])), v:false, v:false)
  377. " pick a suffix chunk
  378. call s:pick_chunk(getline(min([l:max_y, s:pos_y + g:llama_config.n_suffix]), min([l:max_y, s:pos_y + g:llama_config.n_suffix + g:llama_config.ring_chunk_size])), v:false, v:false)
  379. let s:pos_y_pick = s:pos_y
  380. endif
  381. endfunction
  382. " if first_line == v:true accept only the first line of the response
  383. function! llama#fim_accept(first_line)
  384. " insert the suggestion at the cursor location
  385. if s:can_accept && len(s:content) > 0
  386. call setline(s:pos_y, s:line_cur[:(s:pos_x - 1)] . s:content[0])
  387. if len(s:content) > 1
  388. if !a:first_line
  389. call append(s:pos_y, s:content[1:-1])
  390. endif
  391. endif
  392. " move the cursor to the end of the accepted text
  393. if !a:first_line && len(s:content) > 1
  394. call cursor(s:pos_y + len(s:content) - 1, s:pos_x + s:pos_dx + 1)
  395. else
  396. call cursor(s:pos_y, s:pos_x + len(s:content[0]))
  397. endif
  398. endif
  399. call llama#fim_cancel()
  400. endfunction
  401. function! llama#fim_cancel()
  402. let s:hint_shown = v:false
  403. " clear the virtual text
  404. let l:bufnr = bufnr('%')
  405. let l:id_vt_fim = nvim_create_namespace('vt_fim')
  406. let l:id_vt_info = nvim_create_namespace('vt_info')
  407. call nvim_buf_clear_namespace(l:bufnr, l:id_vt_fim, 0, -1)
  408. call nvim_buf_clear_namespace(l:bufnr, l:id_vt_info, 0, -1)
  409. " remove the mappings
  410. silent! iunmap <buffer> <Tab>
  411. silent! iunmap <buffer> <S-Tab>
  412. silent! iunmap <buffer> <Esc>
  413. endfunction
  414. function! s:on_move()
  415. let s:t_last_move = reltime()
  416. call llama#fim_cancel()
  417. endfunction
  418. " callback that processes the FIM result from the server and displays the suggestion
  419. function! s:fim_on_stdout(job_id, data, event) dict
  420. let l:raw = join(a:data, "\n")
  421. if len(l:raw) == 0
  422. return
  423. endif
  424. if self.pos_x != col('.') - 1 || self.pos_y != line('.')
  425. return
  426. endif
  427. " show the suggestion only in insert mode
  428. if mode() !=# 'i'
  429. return
  430. endif
  431. let s:pos_x = self.pos_x
  432. let s:pos_y = self.pos_y
  433. let s:can_accept = v:true
  434. let l:has_info = v:false
  435. if s:can_accept && v:shell_error
  436. if !self.is_auto
  437. call add(s:content, "<| curl error: is the server on? |>")
  438. endif
  439. let s:can_accept = v:false
  440. endif
  441. let l:n_prompt = 0
  442. let l:t_prompt_ms = 1.0
  443. let l:s_prompt = 0
  444. let l:n_predict = 0
  445. let l:t_predict_ms = 1.0
  446. let l:s_predict = 0
  447. " get the generated suggestion
  448. if s:can_accept
  449. let l:response = json_decode(l:raw)
  450. for l:part in split(get(l:response, 'content', ''), "\n", 1)
  451. call add(s:content, l:part)
  452. endfor
  453. " remove trailing new lines
  454. while len(s:content) > 0 && s:content[-1] == ""
  455. call remove(s:content, -1)
  456. endwhile
  457. let l:generation_settings = get(l:response, 'generation_settings', {})
  458. let l:n_ctx = get(l:generation_settings, 'n_ctx', 0)
  459. let l:n_cached = get(l:response, 'tokens_cached', 0)
  460. let l:truncated = get(l:response, 'truncated', v:false)
  461. " if response.timings is available
  462. if len(get(l:response, 'timings', {})) > 0
  463. let l:has_info = v:true
  464. let l:timings = get(l:response, 'timings', {})
  465. let l:n_prompt = get(l:timings, 'prompt_n', 0)
  466. let l:t_prompt_ms = get(l:timings, 'prompt_ms', 1)
  467. let l:s_prompt = get(l:timings, 'prompt_per_second', 0)
  468. let l:n_predict = get(l:timings, 'predicted_n', 0)
  469. let l:t_predict_ms = get(l:timings, 'predicted_ms', 1)
  470. let l:s_predict = get(l:timings, 'predicted_per_second', 0)
  471. endif
  472. endif
  473. if len(s:content) == 0
  474. call add(s:content, "")
  475. let s:can_accept = v:false
  476. endif
  477. if len(s:content) == 0
  478. return
  479. endif
  480. " NOTE: the following is logic for discarding predictions that repeat existing text
  481. " the code is quite ugly and there is very likely a simpler and more canonical way to implement this
  482. "
  483. " still, I wonder if there is some better way that avoids having to do these special hacks?
  484. " on one hand, the LLM 'sees' the contents of the file before we start editing, so it is normal that it would
  485. " start generating whatever we have given it via the extra context. but on the other hand, it's not very
  486. " helpful to re-generate the same code that is already there
  487. " truncate the suggestion if the first line is empty
  488. if len(s:content) == 1 && s:content[0] == ""
  489. let s:content = [""]
  490. endif
  491. " ... and the next lines are repeated
  492. if len(s:content) > 1 && s:content[0] == "" && s:content[1:] == getline(s:pos_y + 1, s:pos_y + len(s:content) - 1)
  493. let s:content = [""]
  494. endif
  495. " truncate the suggestion if it repeats the suffix
  496. if len(s:content) == 1 && s:content[0] == s:line_cur_suffix
  497. let s:content = [""]
  498. endif
  499. " find the first non-empty line (strip whitespace)
  500. let l:cmp_y = s:pos_y + 1
  501. while l:cmp_y < line('$') && getline(l:cmp_y) =~? '^\s*$'
  502. let l:cmp_y += 1
  503. endwhile
  504. if (s:line_cur_prefix . s:content[0]) == getline(l:cmp_y)
  505. " truncate the suggestion if it repeats the next line
  506. if len(s:content) == 1
  507. let s:content = [""]
  508. endif
  509. " ... or if the second line of the suggestion is the prefix of line l:cmp_y + 1
  510. if len(s:content) == 2 && s:content[-1] == getline(l:cmp_y + 1)[:len(s:content[-1]) - 1]
  511. let s:content = [""]
  512. endif
  513. " ... or if the middle chunk of lines of the suggestion is the same as [l:cmp_y + 1, l:cmp_y + len(s:content) - 1)
  514. if len(s:content) > 2 && join(s:content[1:-1], "\n") == join(getline(l:cmp_y + 1, l:cmp_y + len(s:content) - 1), "\n")
  515. let s:content = [""]
  516. endif
  517. endif
  518. " keep only lines that have the same or larger whitespace prefix as s:line_cur_prefix
  519. "let l:indent = strlen(matchstr(s:line_cur_prefix, '^\s*'))
  520. "for i in range(1, len(s:content) - 1)
  521. " if strlen(matchstr(s:content[i], '^\s*')) < l:indent
  522. " let s:content = s:content[:i - 1]
  523. " break
  524. " endif
  525. "endfor
  526. let s:pos_dx = len(s:content[-1])
  527. let s:content[-1] .= s:line_cur_suffix
  528. call llama#fim_cancel()
  529. " display virtual text with the suggestion
  530. let l:bufnr = bufnr('%')
  531. let l:id_vt_fim = nvim_create_namespace('vt_fim')
  532. let l:id_vt_info = nvim_create_namespace('vt_info')
  533. " construct the info message
  534. if g:llama_config.show_info > 0 && l:has_info
  535. " prefix the info string with whitespace in order to offset it to the right of the fim overlay
  536. let l:prefix = repeat(' ', len(s:content[0]) - len(s:line_cur_suffix) + 3)
  537. if l:truncated
  538. let l:info = printf("%s | WARNING: the context is full: %d / %d, increase the server context size or reduce g:llama_config.ring_n_chunks",
  539. \ g:llama_config.show_info == 2 ? l:prefix : 'llama.vim',
  540. \ l:n_cached, l:n_ctx
  541. \ )
  542. else
  543. let l:info = printf("%s | c: %d / %d, r: %d / %d, e: %d, q: %d / 16 | p: %d (%.2f ms, %.2f t/s) | g: %d (%.2f ms, %.2f t/s) | t: %.2f ms",
  544. \ g:llama_config.show_info == 2 ? l:prefix : 'llama.vim',
  545. \ l:n_cached, l:n_ctx, len(s:ring_chunks), g:llama_config.ring_n_chunks, s:ring_n_evict, len(s:ring_queued),
  546. \ l:n_prompt, l:t_prompt_ms, l:s_prompt,
  547. \ l:n_predict, l:t_predict_ms, l:s_predict,
  548. \ 1000.0 * reltimefloat(reltime(s:t_fim_start))
  549. \ )
  550. endif
  551. if g:llama_config.show_info == 1
  552. "" display it in the statusline
  553. let &statusline = l:info
  554. elseif g:llama_config.show_info == 2
  555. " display it to the right of the current line
  556. call nvim_buf_set_extmark(l:bufnr, l:id_vt_info, s:pos_y - 1, s:pos_x - 1, {
  557. \ 'virt_text': [[l:info, 'llama_hl_info']],
  558. \ 'virt_text_pos': 'eol',
  559. \ })
  560. endif
  561. endif
  562. " display the suggestion
  563. call nvim_buf_set_extmark(l:bufnr, l:id_vt_fim, s:pos_y - 1, s:pos_x - 1, {
  564. \ 'virt_text': [[s:content[0], 'llama_hl_hint']],
  565. \ 'virt_text_win_col': virtcol('.') - 1
  566. \ })
  567. call nvim_buf_set_extmark(l:bufnr, l:id_vt_fim, s:pos_y - 1, 0, {
  568. \ 'virt_lines': map(s:content[1:], {idx, val -> [[val, 'llama_hl_hint']]}),
  569. \ 'virt_text_win_col': virtcol('.')
  570. \ })
  571. " setup accept shortcuts
  572. inoremap <buffer> <Tab> <C-O>:call llama#fim_accept(v:false)<CR>
  573. inoremap <buffer> <S-Tab> <C-O>:call llama#fim_accept(v:true)<CR>
  574. let s:hint_shown = v:true
  575. endfunction
  576. function! s:fim_on_exit(job_id, exit_code, event) dict
  577. if a:exit_code != 0
  578. echom "Job failed with exit code: " . a:exit_code
  579. endif
  580. let s:current_job = v:null
  581. endfunction