llama-kv-cache-unified.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. #pragma once
  2. #include "llama-batch.h"
  3. #include "llama-graph.h"
  4. #include "llama-kv-cells.h"
  5. #include "llama-memory.h"
  6. #include <unordered_map>
  7. #include <vector>
  8. struct llama_cparams;
  9. struct llama_hparams;
  10. struct llama_model;
  11. struct llama_context;
  12. //
  13. // llama_kv_cache_unified
  14. //
  15. class llama_kv_cache_unified : public llama_memory_i {
  16. public:
  17. static uint32_t get_padding(const llama_cparams & cparams);
  18. // this callback is used to filter out layers that should not be included in the cache
  19. using layer_filter_cb = std::function<bool(int32_t il)>;
  20. struct defrag_info {
  21. bool empty() const {
  22. return ids.empty();
  23. }
  24. // contains information about which cell moves where:
  25. // - cell i moves to ids[i]
  26. // - if ids[i] == i || ids[i] == ids.size(), then cell i is not moved
  27. std::vector<uint32_t> ids;
  28. };
  29. // for each ubatch, create a slot_info that contains information about where the ubatch should be inserted in the
  30. // KV cells. for example, cell indices for each token, such that: token[i] -> goes to cells[idxs[i]]
  31. struct slot_info {
  32. // data for ggml_set_rows
  33. using idx_vec_t = std::vector<uint32_t>;
  34. idx_vec_t idxs;
  35. uint32_t head() const {
  36. return idxs.at(0);
  37. }
  38. bool empty() const {
  39. return idxs.empty();
  40. }
  41. void clear() {
  42. idxs.clear();
  43. }
  44. // TODO: implement
  45. //std::vector<idx_vec_t> seq_idxs;
  46. };
  47. using slot_info_vec_t = std::vector<slot_info>;
  48. llama_kv_cache_unified(
  49. const llama_model & model,
  50. layer_filter_cb && filter,
  51. ggml_type type_k,
  52. ggml_type type_v,
  53. bool v_trans,
  54. bool offload,
  55. uint32_t kv_size,
  56. uint32_t n_seq_max,
  57. uint32_t n_pad,
  58. uint32_t n_swa,
  59. llama_swa_type swa_type);
  60. ~llama_kv_cache_unified() = default;
  61. //
  62. // llama_memory_i
  63. //
  64. llama_memory_context_ptr init_batch(
  65. llama_batch_allocr & balloc,
  66. uint32_t n_ubatch,
  67. bool embd_all) override;
  68. llama_memory_context_ptr init_full() override;
  69. llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
  70. bool get_can_shift() const override;
  71. void clear(bool data) override;
  72. bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
  73. void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
  74. void seq_keep(llama_seq_id seq_id) override;
  75. void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
  76. void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
  77. llama_pos seq_pos_min(llama_seq_id seq_id) const override;
  78. llama_pos seq_pos_max(llama_seq_id seq_id) const override;
  79. // state write/load
  80. void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
  81. void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
  82. //
  83. // llama_kv_cache_unified specific API
  84. //
  85. uint32_t get_size() const;
  86. bool get_has_shift() const;
  87. //
  88. // graph_build API
  89. //
  90. uint32_t get_n_kv() const;
  91. // get views of the current state of the cache
  92. ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv) const;
  93. ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv) const;
  94. // store k_cur and v_cur in the cache based on the provided head location
  95. ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const;
  96. ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const;
  97. //
  98. // preparation API
  99. //
  100. // find places for the provided ubatches in the cache, returns the slot infos
  101. // return empty vector on failure
  102. slot_info_vec_t prepare(const std::vector<llama_ubatch> & ubatches);
  103. bool update(llama_context * lctx, bool do_shift, const defrag_info & dinfo);
  104. // find a slot of kv cells that can hold the ubatch
  105. // if cont == true, then the slot must be continuous
  106. // return empty slot_info on failure
  107. slot_info find_slot(const llama_ubatch & ubatch, bool cont) const;
  108. // emplace the ubatch context into slot: [sinfo.idxs[0...ubatch.n_tokens - 1]]
  109. void apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch);
  110. //
  111. // input API
  112. //
  113. ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  114. ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  115. void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
  116. void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
  117. void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
  118. void set_input_k_shift (ggml_tensor * dst) const;
  119. void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  120. private:
  121. const llama_model & model;
  122. const llama_hparams & hparams;
  123. struct kv_layer {
  124. // layer index in the model
  125. // note: can be different from the layer index in the KV cache
  126. uint32_t il;
  127. ggml_tensor * k;
  128. ggml_tensor * v;
  129. };
  130. bool v_trans = true; // the value tensor is transposed
  131. // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot())
  132. // note: this is not part of the KV state and it's only used to speed-up the find_slot() method
  133. uint32_t head = 0;
  134. const uint32_t n_seq_max = 1;
  135. // required padding
  136. const uint32_t n_pad = 1;
  137. // SWA
  138. const uint32_t n_swa = 0;
  139. // env: LLAMA_KV_CACHE_DEBUG
  140. int debug = 0;
  141. // env: LLAMA_SET_ROWS (temporary)
  142. // ref: https://github.com/ggml-org/llama.cpp/pull/14285
  143. int supports_set_rows = false;
  144. const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
  145. std::vector<ggml_context_ptr> ctxs;
  146. std::vector<ggml_backend_buffer_ptr> bufs;
  147. llama_kv_cells_unified cells;
  148. std::vector<kv_layer> layers;
  149. // model layer id -> KV cache layer id
  150. std::unordered_map<int32_t, int32_t> map_layer_ids;
  151. // return non-empty vector if cells have been moved
  152. defrag_info defrag_prepare(int32_t n_max_nodes) const;
  153. size_t total_size() const;
  154. size_t size_k_bytes() const;
  155. size_t size_v_bytes() const;
  156. bool is_masked_swa(llama_pos p0, llama_pos p1) const;
  157. ggml_tensor * build_rope_shift(
  158. const llama_cparams & cparams,
  159. ggml_context * ctx,
  160. ggml_tensor * cur,
  161. ggml_tensor * shift,
  162. ggml_tensor * factors,
  163. float freq_base,
  164. float freq_scale) const;
  165. llm_graph_result_ptr build_graph_shift(
  166. const llama_cparams & cparams,
  167. ggml_context * ctx,
  168. ggml_cgraph * gf) const;
  169. llm_graph_result_ptr build_graph_defrag(
  170. const llama_cparams & cparams,
  171. ggml_context * ctx,
  172. ggml_cgraph * gf,
  173. const defrag_info & dinfo) const;
  174. void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
  175. void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
  176. bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
  177. bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
  178. };
  179. class llama_kv_cache_unified_context : public llama_memory_context_i {
  180. public:
  181. // some shorthands
  182. using slot_info_vec_t = llama_kv_cache_unified::slot_info_vec_t;
  183. using defrag_info = llama_kv_cache_unified::defrag_info;
  184. // used for errors
  185. llama_kv_cache_unified_context(llama_memory_status status);
  186. // used to create a full-cache context
  187. llama_kv_cache_unified_context(
  188. llama_kv_cache_unified * kv);
  189. // used to create an update context
  190. llama_kv_cache_unified_context(
  191. llama_kv_cache_unified * kv,
  192. llama_context * lctx,
  193. bool do_shift,
  194. defrag_info dinfo);
  195. // used to create a batch procesing context from a batch
  196. llama_kv_cache_unified_context(
  197. llama_kv_cache_unified * kv,
  198. slot_info_vec_t sinfos,
  199. std::vector<llama_ubatch> ubatches);
  200. virtual ~llama_kv_cache_unified_context();
  201. //
  202. // llama_memory_context_i
  203. //
  204. bool next() override;
  205. bool apply() override;
  206. llama_memory_status get_status() const override;
  207. const llama_ubatch & get_ubatch() const override;
  208. //
  209. // llama_kv_cache_unified_context specific API
  210. //
  211. uint32_t get_n_kv() const;
  212. // get views of the current state of the cache
  213. ggml_tensor * get_k(ggml_context * ctx, int32_t il) const;
  214. ggml_tensor * get_v(ggml_context * ctx, int32_t il) const;
  215. // store k_cur and v_cur in the cache based on the provided head location
  216. ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const;
  217. ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const;
  218. ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  219. ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  220. void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  221. void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  222. void set_input_k_shift (ggml_tensor * dst) const;
  223. void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
  224. void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  225. private:
  226. llama_memory_status status;
  227. llama_kv_cache_unified * kv;
  228. llama_context * lctx;
  229. //
  230. // update context
  231. //
  232. bool do_shift = false;
  233. defrag_info dinfo;
  234. //
  235. // batch processing context
  236. //
  237. // the index of the cur ubatch to process
  238. size_t i_cur = 0;
  239. slot_info_vec_t sinfos;
  240. std::vector<llama_ubatch> ubatches;
  241. //
  242. // data needed for building the compute graph for the current ubatch:
  243. //
  244. // a heuristic, to avoid attending the full cache if it is not yet utilized
  245. // as the cache gets filled, the benefit from this heuristic disappears
  246. int32_t n_kv;
  247. };