llama-memory-hybrid.cpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. #include "llama-memory-hybrid.h"
  2. #include "llama-impl.h"
  3. #include "llama-model.h"
  4. #include "llama-context.h"
  5. //
  6. // llama_memory_hybrid
  7. //
  8. llama_memory_hybrid::llama_memory_hybrid(
  9. const llama_model & model,
  10. /* attn */
  11. ggml_type type_k,
  12. ggml_type type_v,
  13. bool v_trans,
  14. uint32_t kv_size,
  15. uint32_t n_pad,
  16. uint32_t n_swa,
  17. llama_swa_type swa_type,
  18. /* recurrent */
  19. ggml_type type_r,
  20. ggml_type type_s,
  21. uint32_t rs_size,
  22. /* common */
  23. uint32_t n_seq_max,
  24. bool offload,
  25. bool unified,
  26. /* layer filters */
  27. const layer_filter_cb & filter_attn,
  28. const layer_filter_cb & filter_recr) :
  29. hparams(model.hparams),
  30. mem_attn(new llama_kv_cache(
  31. model,
  32. type_k,
  33. type_v,
  34. v_trans,
  35. offload,
  36. unified,
  37. kv_size,
  38. n_seq_max,
  39. n_pad,
  40. n_swa,
  41. swa_type,
  42. filter_attn == nullptr ?
  43. [&](int32_t il) { return !hparams.is_recurrent(il); }
  44. : filter_attn,
  45. nullptr
  46. )),
  47. mem_recr(new llama_memory_recurrent(
  48. model,
  49. type_r,
  50. type_s,
  51. offload,
  52. rs_size,
  53. n_seq_max,
  54. filter_recr == nullptr ?
  55. [&](int32_t il) { return hparams.is_recurrent(il); }
  56. : filter_recr
  57. )) {}
  58. llama_memory_context_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
  59. do {
  60. balloc.split_reset();
  61. // follow the recurrent pattern for creating the ubatch splits
  62. std::vector<llama_ubatch> ubatches;
  63. while (true) {
  64. llama_ubatch ubatch;
  65. if (embd_all) {
  66. // if all tokens are output, split by sequence
  67. ubatch = balloc.split_seq(n_ubatch);
  68. } else {
  69. ubatch = balloc.split_equal(n_ubatch, false);
  70. }
  71. if (ubatch.n_tokens == 0) {
  72. break;
  73. }
  74. ubatches.push_back(std::move(ubatch)); // NOLINT
  75. }
  76. if (balloc.get_n_used() < balloc.get_n_tokens()) {
  77. // failed to find a suitable split
  78. break;
  79. }
  80. // prepare the recurrent batches first
  81. if (!mem_recr->prepare(ubatches)) {
  82. // TODO: will the recurrent cache be in an undefined context at this point?
  83. LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__);
  84. return std::make_unique<llama_memory_hybrid_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
  85. }
  86. // prepare the attention cache
  87. auto heads_attn = mem_attn->prepare(ubatches);
  88. if (heads_attn.empty()) {
  89. LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__);
  90. return std::make_unique<llama_memory_hybrid_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
  91. }
  92. return std::make_unique<llama_memory_hybrid_context>(
  93. this, std::move(heads_attn), std::move(ubatches));
  94. } while(false);
  95. return std::make_unique<llama_memory_hybrid_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
  96. }
  97. llama_memory_context_ptr llama_memory_hybrid::init_full() {
  98. return std::make_unique<llama_memory_hybrid_context>(this);
  99. }
  100. llama_memory_context_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) {
  101. return std::make_unique<llama_memory_hybrid_context>(this, lctx, optimize);
  102. }
  103. bool llama_memory_hybrid::get_can_shift() const {
  104. // Shifting is trivially supported for recurrent
  105. return mem_attn->get_can_shift();
  106. }
  107. void llama_memory_hybrid::clear(bool data) {
  108. mem_attn->clear(data);
  109. mem_recr->clear(data);
  110. }
  111. bool llama_memory_hybrid::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  112. // Try removing from the recurrent cache first since it may fail. If it does
  113. // fail, the cache will not have been mutated.
  114. if (!mem_recr->seq_rm(seq_id, p0, p1)) {
  115. return false;
  116. }
  117. return mem_attn->seq_rm(seq_id, p0, p1);
  118. }
  119. void llama_memory_hybrid::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  120. mem_attn->seq_cp(seq_id_src, seq_id_dst, p0, p1);
  121. mem_recr->seq_cp(seq_id_src, seq_id_dst, p0, p1);
  122. }
  123. void llama_memory_hybrid::seq_keep(llama_seq_id seq_id) {
  124. mem_attn->seq_keep(seq_id);
  125. mem_recr->seq_keep(seq_id);
  126. }
  127. void llama_memory_hybrid::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
  128. mem_attn->seq_add(seq_id, p0, p1, shift);
  129. mem_recr->seq_add(seq_id, p0, p1, shift);
  130. }
  131. void llama_memory_hybrid::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  132. mem_attn->seq_div(seq_id, p0, p1, d);
  133. mem_recr->seq_div(seq_id, p0, p1, d);
  134. }
  135. llama_pos llama_memory_hybrid::seq_pos_min(llama_seq_id seq_id) const {
  136. // the min of the total cache is the max of the two caches' min values
  137. return std::max(mem_attn->seq_pos_min(seq_id), mem_recr->seq_pos_min(seq_id));
  138. }
  139. llama_pos llama_memory_hybrid::seq_pos_max(llama_seq_id seq_id) const {
  140. // the max of the total cache is the min of the two caches' max values
  141. return std::min(mem_attn->seq_pos_max(seq_id), mem_recr->seq_pos_max(seq_id));
  142. }
  143. std::map<ggml_backend_buffer_type_t, size_t> llama_memory_hybrid::memory_breakdown() const {
  144. std::map<ggml_backend_buffer_type_t, size_t> mb = mem_attn->memory_breakdown();
  145. for (const auto & buft_size : mem_recr->memory_breakdown()) {
  146. mb[buft_size.first] += buft_size.second;
  147. }
  148. return mb;
  149. }
  150. void llama_memory_hybrid::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const {
  151. GGML_UNUSED(flags);
  152. mem_attn->state_write(io, seq_id);
  153. mem_recr->state_write(io, seq_id);
  154. }
  155. void llama_memory_hybrid::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
  156. GGML_UNUSED(flags);
  157. mem_attn->state_read(io, seq_id);
  158. mem_recr->state_read(io, seq_id);
  159. }
  160. llama_kv_cache * llama_memory_hybrid::get_mem_attn() const {
  161. return mem_attn.get();
  162. }
  163. llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const {
  164. return mem_recr.get();
  165. }
  166. llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_status status) : status(status) {}
  167. llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_hybrid * mem) :
  168. ctx_attn(mem->get_mem_attn()->init_full()),
  169. ctx_recr(mem->get_mem_recr()->init_full()),
  170. status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
  171. }
  172. llama_memory_hybrid_context::llama_memory_hybrid_context(
  173. llama_memory_hybrid * mem,
  174. llama_context * lctx,
  175. bool optimize) :
  176. ctx_attn(mem->get_mem_attn()->init_update(lctx, optimize)),
  177. ctx_recr(mem->get_mem_recr()->init_update(lctx, optimize)),
  178. status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
  179. }
  180. llama_memory_hybrid_context::llama_memory_hybrid_context(
  181. llama_memory_hybrid * mem,
  182. slot_info_vec_t sinfos_attn,
  183. std::vector<llama_ubatch> ubatches) :
  184. ubatches(std::move(ubatches)),
  185. // note: here we copy the ubatches. not sure if this is ideal
  186. ctx_attn(new llama_kv_cache_context(mem->get_mem_attn(), std::move(sinfos_attn), this->ubatches)),
  187. ctx_recr(new llama_memory_recurrent_context(mem->get_mem_recr(), this->ubatches)),
  188. status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) {
  189. }
  190. bool llama_memory_hybrid_context::next() {
  191. assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
  192. ctx_attn->next();
  193. ctx_recr->next();
  194. if (++i_next >= ubatches.size()) {
  195. return false;
  196. }
  197. return true;
  198. }
  199. bool llama_memory_hybrid_context::apply() {
  200. assert(!llama_memory_status_is_fail(status));
  201. bool res = true;
  202. res = res & ctx_attn->apply();
  203. res = res & ctx_recr->apply();
  204. return res;
  205. }
  206. llama_memory_status llama_memory_hybrid_context::get_status() const {
  207. return status;
  208. }
  209. const llama_ubatch & llama_memory_hybrid_context::get_ubatch() const {
  210. assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
  211. return ubatches[i_next];
  212. }
  213. const llama_kv_cache_context * llama_memory_hybrid_context::get_attn() const {
  214. return static_cast<const llama_kv_cache_context *>(ctx_attn.get());
  215. }
  216. const llama_memory_recurrent_context * llama_memory_hybrid_context::get_recr() const {
  217. return static_cast<const llama_memory_recurrent_context *>(ctx_recr.get());
  218. }