llama-kv-cache.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. #pragma once
  2. #include "llama-batch.h"
  3. #include "llama-graph.h"
  4. #include "llama-kv-cells.h"
  5. #include "llama-memory.h"
  6. #include <unordered_map>
  7. #include <vector>
  8. struct llama_cparams;
  9. struct llama_hparams;
  10. struct llama_model;
  11. struct llama_context;
  12. //
  13. // llama_kv_cache
  14. //
  15. class llama_kv_cache : public llama_memory_i {
  16. public:
  17. static uint32_t get_padding(const llama_cparams & cparams);
  18. struct stream_copy_info {
  19. bool empty() const {
  20. assert(ssrc.size() == sdst.size());
  21. return ssrc.empty();
  22. }
  23. std::vector<uint32_t> ssrc;
  24. std::vector<uint32_t> sdst;
  25. };
  26. // for each ubatch, create a slot_info that contains information about where the ubatch should be inserted in the
  27. // KV cells. for example, cell indices for each token, such that: token[i] -> goes to cells[idxs[i]]
  28. struct slot_info {
  29. // data for ggml_set_rows
  30. using idx_vec_t = std::vector<uint32_t>;
  31. // number of streams: ns = s1 - s0 + 1
  32. uint32_t s0;
  33. uint32_t s1;
  34. std::vector<llama_seq_id> strm; // [ns]
  35. std::vector<idx_vec_t> idxs; // [ns]
  36. uint32_t head() const {
  37. GGML_ASSERT(idxs.size() == 1);
  38. GGML_ASSERT(!idxs[0].empty());
  39. return idxs[0][0];
  40. }
  41. void resize(size_t n) {
  42. strm.resize(n);
  43. idxs.resize(n);
  44. }
  45. size_t size() const {
  46. GGML_ASSERT(idxs.size() == strm.size());
  47. GGML_ASSERT(!idxs.empty());
  48. return idxs[0].size();
  49. }
  50. size_t n_stream() const {
  51. return strm.size();
  52. }
  53. bool empty() const {
  54. return idxs.empty();
  55. }
  56. void clear() {
  57. idxs.clear();
  58. }
  59. };
  60. using slot_info_vec_t = std::vector<slot_info>;
  61. llama_kv_cache(
  62. const llama_model & model,
  63. ggml_type type_k,
  64. ggml_type type_v,
  65. bool v_trans,
  66. bool offload,
  67. bool unified,
  68. uint32_t kv_size,
  69. uint32_t n_seq_max,
  70. uint32_t n_pad,
  71. uint32_t n_swa,
  72. llama_swa_type swa_type,
  73. const layer_filter_cb & filter,
  74. const layer_reuse_cb & reuse);
  75. ~llama_kv_cache() = default;
  76. //
  77. // llama_memory_i
  78. //
  79. llama_memory_context_ptr init_batch(
  80. llama_batch_allocr & balloc,
  81. uint32_t n_ubatch,
  82. bool embd_all) override;
  83. llama_memory_context_ptr init_full() override;
  84. llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
  85. bool get_can_shift() const override;
  86. void clear(bool data) override;
  87. bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
  88. void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
  89. void seq_keep(llama_seq_id seq_id) override;
  90. void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
  91. void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
  92. llama_pos seq_pos_min(llama_seq_id seq_id) const override;
  93. llama_pos seq_pos_max(llama_seq_id seq_id) const override;
  94. std::map<ggml_backend_buffer_type_t, size_t> memory_breakdown() const override;
  95. // state write/load
  96. void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) const override;
  97. void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) override;
  98. //
  99. // llama_kv_cache specific API
  100. //
  101. uint32_t get_size() const;
  102. uint32_t get_n_stream() const;
  103. bool get_has_shift() const;
  104. //
  105. // graph_build API
  106. //
  107. uint32_t get_n_kv(const slot_info & sinfo) const;
  108. // get views of the current state of the cache
  109. ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const;
  110. ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const;
  111. // store k_cur and v_cur in the cache based on the provided head location
  112. ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const;
  113. ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const;
  114. //
  115. // preparation API
  116. //
  117. // find places for the provided ubatches in the cache, returns the slot infos
  118. // return empty vector on failure
  119. slot_info_vec_t prepare(const std::vector<llama_ubatch> & ubatches);
  120. bool update(llama_context * lctx, bool do_shift, const stream_copy_info & sc_info);
  121. // find a slot of kv cells that can hold the ubatch
  122. // if cont == true, then the slot must be continuous
  123. // return empty slot_info on failure
  124. slot_info find_slot(const llama_ubatch & ubatch, bool cont) const;
  125. // emplace the ubatch context into slot: [sinfo.idxs[0...ubatch.n_tokens - 1]]
  126. void apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch);
  127. //
  128. // input API
  129. //
  130. ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  131. ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  132. void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
  133. void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
  134. void set_input_k_shift(ggml_tensor * dst) const;
  135. void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
  136. void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  137. private:
  138. const llama_model & model;
  139. const llama_hparams & hparams;
  140. struct kv_layer {
  141. // layer index in the model
  142. // note: can be different from the layer index in the KV cache
  143. uint32_t il;
  144. ggml_tensor * k;
  145. ggml_tensor * v;
  146. std::vector<ggml_tensor *> k_stream;
  147. std::vector<ggml_tensor *> v_stream;
  148. };
  149. bool v_trans = true; // the value tensor is transposed
  150. const uint32_t n_seq_max = 1;
  151. const uint32_t n_stream = 1;
  152. // required padding
  153. const uint32_t n_pad = 1;
  154. // SWA
  155. const uint32_t n_swa = 0;
  156. // env: LLAMA_KV_CACHE_DEBUG
  157. int debug = 0;
  158. // this is the SWA type of the cache - not to be confused with the model SWA type
  159. const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
  160. std::vector<ggml_context_ptr> ctxs;
  161. std::vector<ggml_backend_buffer_ptr> bufs;
  162. // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot())
  163. // note: this is not part of the KV state and it's only used to speed-up the find_slot() method
  164. std::vector<uint32_t> v_heads;
  165. std::vector<llama_kv_cells> v_cells;
  166. // maps from a sequence id to a stream id
  167. std::vector<uint32_t> seq_to_stream;
  168. // pending stream copies that will be applied during the next update
  169. stream_copy_info sc_info;
  170. std::vector<kv_layer> layers;
  171. // model layer id -> KV cache layer id
  172. std::unordered_map<int32_t, int32_t> map_layer_ids;
  173. size_t total_size() const;
  174. size_t size_k_bytes() const;
  175. size_t size_v_bytes() const;
  176. bool is_masked_swa(llama_pos p0, llama_pos p1) const;
  177. ggml_tensor * build_rope_shift(
  178. const llama_cparams & cparams,
  179. ggml_context * ctx,
  180. ggml_tensor * cur,
  181. ggml_tensor * shift,
  182. ggml_tensor * factors,
  183. float freq_base,
  184. float freq_scale) const;
  185. ggml_cgraph * build_graph_shift(
  186. llm_graph_result * res,
  187. llama_context * lctx) const;
  188. struct cell_ranges_t {
  189. uint32_t strm;
  190. std::vector<std::pair<uint32_t, uint32_t>> data; // ranges, from inclusive, to exclusive
  191. };
  192. void state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id = -1) const;
  193. void state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const;
  194. bool state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
  195. bool state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count);
  196. };
  197. class llama_kv_cache_context : public llama_memory_context_i {
  198. public:
  199. // some shorthands
  200. using slot_info_vec_t = llama_kv_cache::slot_info_vec_t;
  201. using stream_copy_info = llama_kv_cache::stream_copy_info;
  202. // used for errors
  203. llama_kv_cache_context(llama_memory_status status);
  204. // used to create a full-cache context
  205. llama_kv_cache_context(
  206. llama_kv_cache * kv);
  207. // used to create an update context
  208. llama_kv_cache_context(
  209. llama_kv_cache * kv,
  210. llama_context * lctx,
  211. bool do_shift,
  212. stream_copy_info sc_info);
  213. // used to create a batch procesing context from a batch
  214. llama_kv_cache_context(
  215. llama_kv_cache * kv,
  216. slot_info_vec_t sinfos,
  217. std::vector<llama_ubatch> ubatches);
  218. virtual ~llama_kv_cache_context();
  219. //
  220. // llama_memory_context_i
  221. //
  222. bool next() override;
  223. bool apply() override;
  224. llama_memory_status get_status() const override;
  225. const llama_ubatch & get_ubatch() const override;
  226. //
  227. // llama_kv_cache_context specific API
  228. //
  229. uint32_t get_n_kv() const;
  230. // get views of the current state of the cache
  231. ggml_tensor * get_k(ggml_context * ctx, int32_t il) const;
  232. ggml_tensor * get_v(ggml_context * ctx, int32_t il) const;
  233. // store k_cur and v_cur in the cache based on the provided head location
  234. // note: the heads in k_cur and v_cur should be layed out contiguously in memory
  235. // - k_cur [n_embd_head_k, n_head_k, n_tokens]
  236. // - k_idxs [n_tokens]
  237. // - v_cur [n_embd_head_v, n_head_v, n_tokens]
  238. // - v_idxs [n_tokens] or [n_tokens*n_embd_v_gqa] depending if V cache is transposed
  239. ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const;
  240. ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const;
  241. // create destination indices for each head of the current batch for where it would be written in the KV cache
  242. // the indices address the global KV cache (not per stream) - this is not relevant for the user of this API, but
  243. // helps understand the implementation logic of cpy_k and cpy_v
  244. ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  245. ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
  246. void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  247. void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  248. void set_input_k_shift (ggml_tensor * dst) const;
  249. void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
  250. void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
  251. private:
  252. llama_memory_status status;
  253. llama_kv_cache * kv;
  254. llama_context * lctx;
  255. //
  256. // update context
  257. //
  258. bool do_shift = false;
  259. stream_copy_info sc_info;
  260. //
  261. // batch processing context
  262. //
  263. // the index of the cur ubatch to process
  264. size_t i_cur = 0;
  265. slot_info_vec_t sinfos;
  266. std::vector<llama_ubatch> ubatches;
  267. //
  268. // data needed for building the compute graph for the current ubatch:
  269. //
  270. // a heuristic, to avoid attending the full cache if it is not yet utilized
  271. // as the cache gets filled, the benefit from this heuristic disappears
  272. int32_t n_kv;
  273. };