llama-kv-cache-unified.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. #pragma once
  2. #include "llama-batch.h"
  3. #include "llama-graph.h"
  4. #include "llama-kv-cells.h"
  5. #include "llama-memory.h"
  6. #include <unordered_map>
  7. #include <vector>
  8. struct llama_cparams;
  9. struct llama_hparams;
  10. struct llama_model;
  11. struct llama_context;
  12. //
  13. // llama_kv_cache_unified
  14. //
  15. class llama_kv_cache_unified : public llama_memory_i {
  16. public:
  17. static uint32_t get_padding(const llama_cparams & cparams);
  18. // this callback is used to filter out layers that should not be included in the cache
  19. using layer_filter_cb = std::function<bool(int32_t il)>;
  20. using ubatch_heads = std::vector<uint32_t>;
  21. struct defrag_info {
  22. bool empty() const {
  23. return ids.empty();
  24. }
  25. // contains information about which cell moves where:
  26. // - cell i moves to ids[i]
  27. // - if ids[i] == i || ids[i] == ids.size(), then cell i is not moved
  28. std::vector<uint32_t> ids;
  29. };
  30. llama_kv_cache_unified(
  31. const llama_model & model,
  32. layer_filter_cb && filter,
  33. ggml_type type_k,
  34. ggml_type type_v,
  35. bool v_trans,
  36. bool offload,
  37. uint32_t kv_size,
  38. uint32_t n_seq_max,
  39. uint32_t n_pad,
  40. uint32_t n_swa,
  41. llama_swa_type swa_type);
  42. ~llama_kv_cache_unified() = default;
  43. //
  44. // llama_memory_i
  45. //
  46. llama_memory_state_ptr init_batch(
  47. const llama_batch & batch,
  48. uint32_t n_ubatch,
  49. bool embd_all) override;
  50. llama_memory_state_ptr init_full() override;
  51. llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override;
  52. bool get_can_shift() const override;
  53. void clear(bool data) override;
  54. bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
  55. void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
  56. void seq_keep(llama_seq_id seq_id) override;
  57. void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
  58. void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
  59. llama_pos seq_pos_min(llama_seq_id seq_id) const override;
  60. llama_pos seq_pos_max(llama_seq_id seq_id) const override;
  61. // state write/load
  62. void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
  63. void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
  64. //
  65. // llama_kv_cache_unified specific API
  66. //
  67. uint32_t get_size() const;
  68. bool get_has_shift() const;
  69. //
  70. // graph_build API
  71. //
  72. uint32_t get_n_kv() const;
  73. // get views of the current state of the cache
  74. ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv) const;
  75. ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv) const;
  76. // store k_cur and v_cur in the cache based on the provided head location
  77. ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il, uint32_t head_cur) const;
  78. ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il, uint32_t head_cur) const;
  79. //
  80. // preparation API
  81. //
  82. // find places for the provided ubatches in the cache, returns the head locations
  83. // return empty vector on failure
  84. ubatch_heads prepare(const std::vector<llama_ubatch> & ubatches);
  85. bool update(llama_context * lctx, bool do_shift, const defrag_info & dinfo);
  86. // return the cell position where we can insert the ubatch
  87. // return -1 on failure to find a contiguous slot of kv cells
  88. int32_t find_slot(const llama_ubatch & ubatch) const;
  89. // emplace the ubatch context into slot: [head_cur, head_cur + ubatch.n_tokens)
  90. void apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch);
  91. //
  92. // set_input API
  93. //
  94. void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
  95. void set_input_k_shift (ggml_tensor * dst) const;
  96. void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  97. private:
  98. const llama_model & model;
  99. const llama_hparams & hparams;
  100. struct kv_layer {
  101. // layer index in the model
  102. // note: can be different from the layer index in the KV cache
  103. uint32_t il;
  104. ggml_tensor * k;
  105. ggml_tensor * v;
  106. };
  107. bool v_trans = true; // the value tensor is transposed
  108. // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot())
  109. // note: this is not part of the KV state and it's only used to speed-up the find_slot() method
  110. uint32_t head = 0;
  111. const uint32_t n_seq_max = 1;
  112. // required padding
  113. const uint32_t n_pad = 1;
  114. // SWA
  115. const uint32_t n_swa = 0;
  116. int debug = 0;
  117. const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
  118. std::vector<ggml_context_ptr> ctxs;
  119. std::vector<ggml_backend_buffer_ptr> bufs;
  120. llama_kv_cells_unified cells;
  121. std::vector<kv_layer> layers;
  122. // model layer id -> KV cache layer id
  123. std::unordered_map<int32_t, int32_t> map_layer_ids;
  124. // return non-empty vector if cells have been moved
  125. defrag_info defrag_prepare(int32_t n_max_nodes) const;
  126. size_t total_size() const;
  127. size_t size_k_bytes() const;
  128. size_t size_v_bytes() const;
  129. bool is_masked_swa(llama_pos p0, llama_pos p1) const;
  130. ggml_tensor * build_rope_shift(
  131. const llama_cparams & cparams,
  132. ggml_context * ctx,
  133. ggml_tensor * cur,
  134. ggml_tensor * shift,
  135. ggml_tensor * factors,
  136. float freq_base,
  137. float freq_scale) const;
  138. llm_graph_result_ptr build_graph_shift(
  139. const llama_cparams & cparams,
  140. ggml_context * ctx,
  141. ggml_cgraph * gf) const;
  142. llm_graph_result_ptr build_graph_defrag(
  143. const llama_cparams & cparams,
  144. ggml_context * ctx,
  145. ggml_cgraph * gf,
  146. const defrag_info & dinfo) const;
  147. void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
  148. void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
  149. bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
  150. bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
  151. };
  152. class llama_kv_cache_unified_state : public llama_memory_state_i {
  153. public:
  154. // some shorthands
  155. using ubatch_heads = llama_kv_cache_unified::ubatch_heads;
  156. using defrag_info = llama_kv_cache_unified::defrag_info;
  157. // used for errors
  158. llama_kv_cache_unified_state(llama_memory_status status);
  159. // used to create a full-cache state
  160. llama_kv_cache_unified_state(
  161. llama_kv_cache_unified * kv);
  162. // used to create an update state
  163. llama_kv_cache_unified_state(
  164. llama_kv_cache_unified * kv,
  165. llama_context * lctx,
  166. bool do_shift,
  167. defrag_info dinfo);
  168. // used to create a decode state from a batch
  169. llama_kv_cache_unified_state(
  170. llama_kv_cache_unified * kv,
  171. llama_sbatch sbatch,
  172. ubatch_heads heads,
  173. std::vector<llama_ubatch> ubatches);
  174. virtual ~llama_kv_cache_unified_state();
  175. //
  176. // llama_memory_state_i
  177. //
  178. bool next() override;
  179. bool apply() override;
  180. std::vector<int64_t> & out_ids() override;
  181. llama_memory_status get_status() const override;
  182. const llama_ubatch & get_ubatch() const override;
  183. //
  184. // llama_kv_cache_unified_state specific API
  185. //
  186. uint32_t get_n_kv() const;
  187. // get views of the current state of the cache
  188. ggml_tensor * get_k(ggml_context * ctx, int32_t il) const;
  189. ggml_tensor * get_v(ggml_context * ctx, int32_t il) const;
  190. // store k_cur and v_cur in the cache based on the provided head location
  191. ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const;
  192. ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const;
  193. void set_input_k_shift(ggml_tensor * dst) const;
  194. void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
  195. void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  196. private:
  197. llama_memory_status status;
  198. llama_kv_cache_unified * kv;
  199. llama_context * lctx;
  200. //
  201. // update state
  202. //
  203. bool do_shift = false;
  204. defrag_info dinfo;
  205. //
  206. // batch processing state
  207. //
  208. llama_sbatch sbatch;
  209. // the index of the next ubatch to process
  210. size_t i_next = 0;
  211. ubatch_heads heads;
  212. std::vector<llama_ubatch> ubatches;
  213. //
  214. // data needed for building the compute graph for the current ubatch:
  215. //
  216. // a heuristic, to avoid attending the full cache if it is not yet utilized
  217. // as the cache gets filled, the benefit from this heuristic disappears
  218. int32_t n_kv;
  219. // the beginning of the current slot in which the ubatch will be inserted
  220. int32_t head;
  221. };