llama-kv-cache.cpp 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739
  1. #include "llama-kv-cache.h"
  2. #include "llama-impl.h"
  3. #include "llama-batch.h"
  4. #include "llama-cparams.h"
  5. #include "llama-model.h"
  6. #include "llama-context.h"
  7. #include <algorithm>
  8. #include <cassert>
  9. #include <cmath>
  10. #include <limits>
  11. #include <map>
  12. #include <stdexcept>
  13. //
  14. // llama_kv_cache_unified
  15. //
  16. uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) {
  17. // the FA kernels require padding to avoid extra runtime boundary checks
  18. return cparams.flash_attn ? 256u : 32u;
  19. }
  20. llama_kv_cache_unified::llama_kv_cache_unified(
  21. const llama_model & model,
  22. layer_filter_cb && filter,
  23. ggml_type type_k,
  24. ggml_type type_v,
  25. bool v_trans,
  26. bool offload,
  27. uint32_t kv_size,
  28. uint32_t n_seq_max,
  29. uint32_t n_pad,
  30. uint32_t n_swa,
  31. llama_swa_type swa_type) :
  32. model(model), hparams(model.hparams), v_trans(v_trans),
  33. n_seq_max(n_seq_max), n_pad(n_pad), n_swa(n_swa), swa_type(swa_type) {
  34. GGML_ASSERT(kv_size % n_pad == 0);
  35. // create a context for each buffer type
  36. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  37. auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
  38. auto it = ctx_map.find(buft);
  39. if (it == ctx_map.end()) {
  40. ggml_init_params params = {
  41. /*.mem_size =*/ size_t(2u*hparams.n_layer*ggml_tensor_overhead()),
  42. /*.mem_buffer =*/ NULL,
  43. /*.no_alloc =*/ true,
  44. };
  45. ggml_context * ctx = ggml_init(params);
  46. if (!ctx) {
  47. return nullptr;
  48. }
  49. ctx_map[buft] = ctx;
  50. ctxs.emplace_back(ctx);
  51. return ctx;
  52. }
  53. return it->second;
  54. };
  55. head = 0;
  56. cells.resize(kv_size);
  57. for (uint32_t il = 0; il < hparams.n_layer; il++) {
  58. if (filter && !filter(il)) {
  59. LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, il);
  60. continue;
  61. }
  62. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
  63. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  64. const char * dev_name = "CPU";
  65. ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
  66. if (offload) {
  67. auto * dev = model.dev_layer(il);
  68. buft = ggml_backend_dev_buffer_type(dev);
  69. dev_name = ggml_backend_dev_name(dev);
  70. }
  71. LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, il, dev_name);
  72. ggml_context * ctx = ctx_for_buft(buft);
  73. if (!ctx) {
  74. throw std::runtime_error("failed to create ggml context for kv cache");
  75. }
  76. ggml_tensor * k;
  77. ggml_tensor * v;
  78. k = ggml_new_tensor_2d(ctx, type_k, n_embd_k_gqa, kv_size);
  79. v = ggml_new_tensor_2d(ctx, type_v, n_embd_v_gqa, kv_size);
  80. ggml_format_name(k, "cache_k_l%d", il);
  81. ggml_format_name(v, "cache_v_l%d", il);
  82. map_layer_ids[il] = layers.size();
  83. layers.push_back({ il, k, v });
  84. }
  85. // allocate tensors and initialize the buffers to avoid NaNs in the padding
  86. for (auto it : ctx_map) {
  87. auto * buft = it.first;
  88. auto * ctx = it.second;
  89. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  90. if (!buf) {
  91. throw std::runtime_error("failed to allocate buffer for kv cache");
  92. }
  93. LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
  94. ggml_backend_buffer_clear(buf, 0);
  95. bufs.emplace_back(buf);
  96. }
  97. {
  98. const size_t memory_size_k = size_k_bytes();
  99. const size_t memory_size_v = size_v_bytes();
  100. LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u seqs), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  101. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(), n_seq_max,
  102. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  103. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  104. }
  105. }
  106. void llama_kv_cache_unified::clear() {
  107. cells.reset();
  108. head = 0;
  109. for (auto & buf : bufs) {
  110. ggml_backend_buffer_clear(buf.get(), 0);
  111. }
  112. }
  113. bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  114. uint32_t new_head = cells.size();
  115. if (p0 < 0) {
  116. p0 = 0;
  117. }
  118. if (p1 < 0) {
  119. p1 = std::numeric_limits<llama_pos>::max();
  120. }
  121. for (uint32_t i = 0; i < cells.size(); ++i) {
  122. if (!cells.pos_in(i, p0, p1)) {
  123. continue;
  124. }
  125. if (cells.seq_has(i, seq_id) && cells.seq_rm(i, seq_id)) {
  126. if (new_head == cells.size()) {
  127. new_head = i;
  128. }
  129. }
  130. }
  131. // If we freed up a slot, set head to it so searching can start there.
  132. if (new_head != cells.size() && new_head < head) {
  133. head = new_head;
  134. }
  135. return true;
  136. }
  137. void llama_kv_cache_unified::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  138. if (seq_id_src == seq_id_dst) {
  139. return;
  140. }
  141. if (p0 < 0) {
  142. p0 = 0;
  143. }
  144. if (p1 < 0) {
  145. p1 = std::numeric_limits<llama_pos>::max();
  146. }
  147. for (uint32_t i = 0; i < cells.size(); ++i) {
  148. if (!cells.pos_in(i, p0, p1)) {
  149. continue;
  150. }
  151. if (cells.seq_has(i, seq_id_src)) {
  152. cells.seq_add(i, seq_id_dst);
  153. }
  154. }
  155. }
  156. void llama_kv_cache_unified::seq_keep(llama_seq_id seq_id) {
  157. uint32_t new_head = cells.size();
  158. for (uint32_t i = 0; i < cells.size(); ++i) {
  159. if (cells.seq_keep(i, seq_id)) {
  160. if (new_head == cells.size()) {
  161. new_head = i;
  162. }
  163. }
  164. }
  165. // If we freed up a slot, set head to it so searching can start there.
  166. if (new_head != cells.size() && new_head < head) {
  167. head = new_head;
  168. }
  169. }
  170. void llama_kv_cache_unified::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
  171. if (shift == 0) {
  172. return;
  173. }
  174. uint32_t new_head = cells.size();
  175. if (p0 < 0) {
  176. p0 = 0;
  177. }
  178. if (p1 < 0) {
  179. p1 = std::numeric_limits<llama_pos>::max();
  180. }
  181. // If there is no range then return early to avoid looping over all cells.
  182. if (p0 == p1) {
  183. return;
  184. }
  185. for (uint32_t i = 0; i < cells.size(); ++i) {
  186. if (!cells.pos_in(i, p0, p1)) {
  187. continue;
  188. }
  189. if (cells.seq_has(i, seq_id)) {
  190. if (cells.pos_add(i, shift)) {
  191. if (new_head == cells.size()) {
  192. new_head = i;
  193. }
  194. }
  195. }
  196. }
  197. // If we freed up a slot, set head to it so searching can start there.
  198. // Otherwise we just start the next search from the beginning.
  199. head = new_head != cells.size() ? new_head : 0;
  200. }
  201. void llama_kv_cache_unified::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  202. if (d == 1) {
  203. return;
  204. }
  205. if (p0 < 0) {
  206. p0 = 0;
  207. }
  208. if (p1 < 0) {
  209. p1 = std::numeric_limits<llama_pos>::max();
  210. }
  211. // If there is no range then return early to avoid looping over the cache.
  212. if (p0 == p1) {
  213. return;
  214. }
  215. for (uint32_t i = 0; i < cells.size(); ++i) {
  216. if (!cells.pos_in(i, p0, p1)) {
  217. continue;
  218. }
  219. if (cells.seq_has(i, seq_id)) {
  220. cells.pos_div(i, d);
  221. }
  222. }
  223. }
  224. llama_pos llama_kv_cache_unified::seq_pos_min(llama_seq_id seq_id) const {
  225. return cells.seq_pos_min(seq_id);
  226. }
  227. llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const {
  228. return cells.seq_pos_max(seq_id);
  229. }
  230. void llama_kv_cache_unified::restore() {
  231. for (auto & state : recovery.states) {
  232. cells.set(state.i, state.cells);
  233. }
  234. recovery.clear();
  235. }
  236. void llama_kv_cache_unified::commit() {
  237. if (recovery.states.empty()) {
  238. LLAMA_LOG_WARN("%s: the recovery information upon a commit was empty - might indicate a bug (ref: %s)\n",
  239. __func__, "https://github.com/ggml-org/llama.cpp/pull/13194");
  240. return;
  241. }
  242. recovery.clear();
  243. }
  244. bool llama_kv_cache_unified::update(llama_context & lctx) {
  245. bool need_reserve = false;
  246. auto * sched = lctx.get_sched();
  247. if (cells.get_has_shift()) {
  248. if (!get_can_shift()) {
  249. GGML_ABORT("The current KV cache / model configuration does not support K-shift");
  250. }
  251. LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__);
  252. // apply K-shift if needed
  253. if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
  254. ggml_backend_sched_reset(sched);
  255. auto * gf = lctx.graph_init();
  256. auto res = build_graph_shift(lctx.get_cparams(), lctx.get_ctx_compute(), gf);
  257. ggml_backend_sched_alloc_graph(sched, gf);
  258. res->set_inputs(nullptr);
  259. lctx.graph_compute(gf, false);
  260. need_reserve = true;
  261. }
  262. cells.reset_shift();
  263. }
  264. if (do_defrag) {
  265. LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__);
  266. if (defrag_prepare(lctx.graph_max_nodes())) {
  267. ggml_backend_sched_reset(sched);
  268. auto * gf = lctx.graph_init();
  269. auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf);
  270. ggml_backend_sched_alloc_graph(sched, gf);
  271. res->set_inputs(nullptr);
  272. lctx.graph_compute(gf, false);
  273. need_reserve = true;
  274. }
  275. do_defrag = false;
  276. }
  277. return need_reserve;
  278. }
  279. void llama_kv_cache_unified::defrag_sched(float thold) {
  280. // - do not defrag small contexts (i.e. < 2048 tokens)
  281. // - count the padding towards the number of used tokens
  282. const float fragmentation = n >= 2048 ? std::max(0.0f, 1.0f - (float(cells.get_used() + n_pad)/n)) : 0.0f;
  283. // queue defragmentation for next llama_kv_cache_update
  284. if (fragmentation > thold) {
  285. LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation);
  286. do_defrag = true;
  287. }
  288. }
  289. void llama_kv_cache_unified::set_full() {
  290. n = cells.size();
  291. // when simulating a full KV cache, the specific value of the "head" pointer is not important because it does not
  292. // affect the shapes of the tensors in the compute graph - it only affects the offsets of the K/V views.
  293. // we should only guarantee that the head position won't cause out-of-bounds view of the K, V tensors, so
  294. // setting it to 0 is the simplest way to achieve that
  295. // ref: https://github.com/ggml-org/llama.cpp/issues/13359
  296. head = 0;
  297. }
  298. llama_sbatch llama_kv_cache_unified::sbatch_init(const llama_batch & batch, bool logits_all) {
  299. return llama_sbatch(batch, hparams.n_embd, true, logits_all);
  300. }
  301. llama_ubatch llama_kv_cache_unified::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
  302. GGML_UNUSED(embd_pooled);
  303. return sbatch.split_simple(n_ubatch);
  304. }
  305. bool llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) {
  306. const uint32_t n_tokens = ubatch.n_tokens;
  307. // if we have enough unused cells before the current head ->
  308. // better to start searching from the beginning of the cache, hoping to fill it
  309. if (head > cells.get_used() + 2*ubatch.n_tokens) {
  310. head = 0;
  311. }
  312. // otherwise, one cell per token.
  313. if (n_tokens > cells.size()) {
  314. LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size());
  315. return false;
  316. }
  317. //#define FIND_SLOT_DEBUG 1
  318. #if FIND_SLOT_DEBUG
  319. LLAMA_LOG_WARN("begin: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", n, used, head, n_swa);
  320. // for debugging
  321. {
  322. std::string ss;
  323. if (n_swa > 0) {
  324. for (uint32_t i = 0; i < size; ++i) {
  325. if (cells.is_empty(i)) {
  326. ss += '.';
  327. } else {
  328. ss += 'x';
  329. }
  330. if (i%256 == 255) {
  331. ss += '\n';
  332. }
  333. }
  334. }
  335. LLAMA_LOG_WARN("\n%s\n", ss.c_str());
  336. }
  337. #endif
  338. uint32_t n_tested = 0;
  339. while (true) {
  340. if (head + n_tokens > cells.size()) {
  341. n_tested += cells.size() - head;
  342. head = 0;
  343. continue;
  344. }
  345. bool found = true;
  346. for (uint32_t i = 0; i < n_tokens; i++) {
  347. // TODO: improve to accept cells that are masked by the SWA
  348. if (!cells.is_empty(head + i)) {
  349. found = false;
  350. head += i + 1;
  351. n_tested += i + 1;
  352. break;
  353. }
  354. }
  355. if (found) {
  356. break;
  357. }
  358. if (n_tested >= cells.size()) {
  359. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  360. return false;
  361. }
  362. }
  363. // store the old state of the cells in the recovery stack
  364. recovery.states.push_back({head, cells.cp(head, n_tokens)});
  365. for (uint32_t i = 0; i < n_tokens; ++i) {
  366. cells.pos_set(head + i, ubatch.pos[i]);
  367. for (int32_t j = 0; j < ubatch.n_seq_id[i]; j++) {
  368. cells.seq_add(head + i, ubatch.seq_id[i][j]);
  369. }
  370. }
  371. // a heuristic, to avoid attending the full cache if it is not yet utilized
  372. // after enough generations, the benefit from this heuristic disappears
  373. // if we start defragmenting the cache, the benefit from this will be more important
  374. n = std::min(cells.size(), std::max(n_pad, GGML_PAD(cells.used_max_p1(), n_pad)));
  375. #ifdef FIND_SLOT_DEBUG
  376. LLAMA_LOG_WARN("end: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", n, used, head, n_swa);
  377. #endif
  378. return true;
  379. }
  380. bool llama_kv_cache_unified::get_can_shift() const {
  381. return true;
  382. }
  383. uint32_t llama_kv_cache_unified::get_n() const {
  384. return n;
  385. }
  386. uint32_t llama_kv_cache_unified::get_size() const {
  387. return cells.size();
  388. }
  389. ggml_tensor * llama_kv_cache_unified::get_k(ggml_context * ctx, int32_t il) const {
  390. const int32_t ikv = map_layer_ids.at(il);
  391. auto * k = layers[ikv].k;
  392. return ggml_view_3d(ctx, k,
  393. hparams.n_embd_head_k, hparams.n_head_kv(il), n,
  394. ggml_row_size(k->type, hparams.n_embd_head_k),
  395. ggml_row_size(k->type, hparams.n_embd_k_gqa(il)),
  396. 0);
  397. }
  398. ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il) const {
  399. const int32_t ikv = map_layer_ids.at(il);
  400. auto * v = layers[ikv].v;
  401. if (!v_trans) {
  402. // note: v->nb[1] <= v->nb[2]
  403. return ggml_view_3d(ctx, v,
  404. hparams.n_embd_head_v, hparams.n_head_kv(il), n,
  405. ggml_row_size(v->type, hparams.n_embd_head_v), // v->nb[1]
  406. ggml_row_size(v->type, hparams.n_embd_v_gqa(il)), // v->nb[2]
  407. 0);
  408. }
  409. // note: v->nb[1] > v->nb[2]
  410. return ggml_view_3d(ctx, v,
  411. n, hparams.n_head_kv(il), hparams.n_embd_head_v,
  412. ggml_row_size(v->type, v->ne[1]*hparams.n_embd_head_v), // v->nb[1]
  413. ggml_row_size(v->type, v->ne[1]), // v->nb[2]
  414. 0);
  415. }
  416. ggml_tensor * llama_kv_cache_unified::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const {
  417. const int32_t ikv = map_layer_ids.at(il);
  418. auto * k = layers[ikv].k;
  419. const int64_t n_tokens = k_cur->ne[2];
  420. ggml_tensor * k_view = ggml_view_1d(ctx, k,
  421. n_tokens*hparams.n_embd_k_gqa(il),
  422. ggml_row_size(k->type, hparams.n_embd_k_gqa(il))*head);
  423. return ggml_cpy(ctx, k_cur, k_view);
  424. }
  425. ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const {
  426. const int32_t ikv = map_layer_ids.at(il);
  427. auto * v = layers[ikv].v;
  428. const int64_t n_tokens = v_cur->ne[2];
  429. v_cur = ggml_reshape_2d(ctx, v_cur, hparams.n_embd_v_gqa(il), n_tokens);
  430. ggml_tensor * v_view = nullptr;
  431. if (!v_trans) {
  432. v_view = ggml_view_1d(ctx, v,
  433. n_tokens*hparams.n_embd_v_gqa(il),
  434. ggml_row_size(v->type, hparams.n_embd_v_gqa(il))*head);
  435. } else {
  436. // note: the V cache is transposed when not using flash attention
  437. v_view = ggml_view_2d(ctx, v, n_tokens, hparams.n_embd_v_gqa(il),
  438. (v->ne[1])*ggml_element_size(v),
  439. ( head)*ggml_element_size(v));
  440. v_cur = ggml_transpose(ctx, v_cur);
  441. }
  442. return ggml_cpy(ctx, v_cur, v_view);
  443. }
  444. void llama_kv_cache_unified::prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax) {
  445. // no pruning is needed when the cache does not use SWA
  446. GGML_ASSERT(swa_type != LLAMA_SWA_TYPE_NONE && "do not prune non-SWA cache");
  447. int n_attended = 0;
  448. for (uint32_t i = 0; i < cells.size(); ++i) {
  449. if (!cells.seq_has(i, seq_id)) {
  450. continue;
  451. }
  452. const llama_pos p0 = cells.pos_get(i);
  453. if (p0 <= pmin && !is_masked_swa(p0, pmin)) {
  454. n_attended++;
  455. }
  456. if (is_masked_swa(p0, pmax)) {
  457. cells.seq_rm(i, seq_id);
  458. }
  459. }
  460. if (n_attended < std::min<int>(n_swa, pmin)) {
  461. LLAMA_LOG_WARN("%s: partial SWA cache detected - possible loss of information, pmin = %d, n_attended = %d, n_swa = %d\n", __func__, pmin, n_attended, n_swa);
  462. }
  463. }
  464. void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
  465. const int64_t n_tokens = ubatch->n_tokens;
  466. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  467. const int64_t n_seqs = ubatch->n_seqs;
  468. GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
  469. float * data = (float *) dst->data;
  470. const int64_t n_kv = n;
  471. // Use only the previous KV cells of the correct sequence for each token of the ubatch.
  472. // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
  473. // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch:
  474. // Causal mask:
  475. // xxx-------
  476. // xxxx------
  477. // xxxxx-----
  478. // Non-causal mask:
  479. // xxxxx-----
  480. // xxxxx-----
  481. // xxxxx-----
  482. // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
  483. for (int h = 0; h < 1; ++h) {
  484. for (int s = 0; s < n_seqs; ++s) {
  485. const llama_seq_id seq_id = ubatch->seq_id[s][0];
  486. for (int j = 0; j < n_seq_tokens; ++j) {
  487. const llama_pos p1 = ubatch->pos[s*n_seq_tokens + j];
  488. for (int i = 0; i < n_kv; ++i) {
  489. float f = 0.0f;
  490. bool masked = false;
  491. if (cells.is_empty(i)) {
  492. masked = true;
  493. } else {
  494. const llama_pos p0 = cells.pos_get(i);
  495. // mask the token if not the same sequence
  496. masked = masked || (!cells.seq_has(i, seq_id));
  497. // mask future tokens
  498. masked = masked || (causal_attn && p0 > p1);
  499. // apply SWA if any
  500. masked = masked || (is_masked_swa(p0, p1));
  501. if (!masked && hparams.use_alibi) {
  502. f = -std::abs(p0 - p1);
  503. }
  504. }
  505. if (masked) {
  506. f = -INFINITY;
  507. }
  508. data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
  509. }
  510. }
  511. }
  512. // mask padded tokens
  513. if (data) {
  514. for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
  515. for (int j = 0; j < n_kv; ++j) {
  516. data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
  517. }
  518. }
  519. }
  520. }
  521. }
  522. void llama_kv_cache_unified::set_input_k_shift(ggml_tensor * dst) const {
  523. GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
  524. int32_t * data = (int32_t *) dst->data;
  525. for (uint32_t i = 0; i < cells.size(); ++i) {
  526. data[i] = cells.is_empty(i) ? 0 : cells.get_shift(i);
  527. }
  528. }
  529. void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const {
  530. const int64_t n_tokens = ubatch->n_tokens;
  531. GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
  532. GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing
  533. int32_t * data = (int32_t *) dst->data;
  534. const int64_t n_kv = n;
  535. for (int h = 0; h < 1; ++h) {
  536. for (int j = 0; j < n_tokens; ++j) {
  537. for (int i = 0; i < n_kv; ++i) {
  538. // the position when the cells is empty is irrelevant - it will be masked out later in the attention
  539. const llama_pos p0 = cells.is_empty(i) ? -1 : cells.pos_get(i);
  540. data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(p0, ubatch->pos[j], hparams.n_rel_attn_bkts, false);
  541. }
  542. }
  543. }
  544. }
  545. size_t llama_kv_cache_unified::total_size() const {
  546. size_t size = 0;
  547. for (const auto & buf : bufs) {
  548. size += ggml_backend_buffer_get_size(buf.get());
  549. }
  550. return size;
  551. }
  552. size_t llama_kv_cache_unified::size_k_bytes() const {
  553. size_t size_k_bytes = 0;
  554. for (const auto & layer : layers) {
  555. size_k_bytes += ggml_nbytes(layer.k);
  556. }
  557. return size_k_bytes;
  558. }
  559. size_t llama_kv_cache_unified::size_v_bytes() const {
  560. size_t size_v_bytes = 0;
  561. for (const auto & layer : layers) {
  562. size_v_bytes += ggml_nbytes(layer.v);
  563. }
  564. return size_v_bytes;
  565. }
  566. ggml_tensor * llama_kv_cache_unified::build_rope_shift(
  567. const llama_cparams & cparams,
  568. ggml_context * ctx,
  569. ggml_tensor * cur,
  570. ggml_tensor * shift,
  571. ggml_tensor * factors,
  572. float freq_base,
  573. float freq_scale) const {
  574. const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
  575. const auto & yarn_ext_factor = cparams.yarn_ext_factor;
  576. const auto & yarn_beta_fast = cparams.yarn_beta_fast;
  577. const auto & yarn_beta_slow = cparams.yarn_beta_slow;
  578. const auto & n_rot = hparams.n_rot;
  579. const auto & rope_type = hparams.rope_type;
  580. // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly.
  581. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
  582. const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor;
  583. ggml_tensor * tmp;
  584. if (ggml_is_quantized(cur->type)) {
  585. // dequantize to f32 -> RoPE -> quantize back
  586. tmp = ggml_cast(ctx, cur, GGML_TYPE_F32);
  587. tmp = ggml_rope_ext(ctx, tmp,
  588. shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  589. yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
  590. tmp = ggml_cpy(ctx, tmp, cur);
  591. } else {
  592. // we rotate only the first n_rot dimensions
  593. tmp = ggml_rope_ext_inplace(ctx, cur,
  594. shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  595. yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
  596. }
  597. return tmp;
  598. }
  599. class llm_graph_input_k_shift : public llm_graph_input_i {
  600. public:
  601. llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
  602. virtual ~llm_graph_input_k_shift() = default;
  603. void set_input(const llama_ubatch * ubatch) override;
  604. ggml_tensor * k_shift; // I32 [kv_size]
  605. const llama_kv_cache_unified * kv_self;
  606. };
  607. void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
  608. GGML_UNUSED(ubatch);
  609. if (k_shift) {
  610. kv_self->set_input_k_shift(k_shift);
  611. }
  612. }
  613. llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift(
  614. const llama_cparams & cparams,
  615. ggml_context * ctx,
  616. ggml_cgraph * gf) const {
  617. auto res = std::make_unique<llm_graph_result>();
  618. const auto & n_embd_head_k = hparams.n_embd_head_k;
  619. //const auto & n_embd_head_v = hparams.n_embd_head_v;
  620. //GGML_ASSERT(kv_self->size == n_ctx);
  621. auto inp = std::make_unique<llm_graph_input_k_shift>(this);
  622. inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cparams.n_ctx);
  623. ggml_set_input(inp->k_shift);
  624. for (const auto & layer : layers) {
  625. const uint32_t il = layer.il;
  626. const int64_t n_head_kv = hparams.n_head_kv(il);
  627. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
  628. const float freq_base_l = model.get_rope_freq_base (cparams, il);
  629. const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
  630. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  631. ggml_tensor * k =
  632. ggml_view_3d(ctx, layer.k,
  633. n_embd_head_k, n_head_kv, cells.size(),
  634. ggml_row_size(layer.k->type, n_embd_head_k),
  635. ggml_row_size(layer.k->type, n_embd_k_gqa),
  636. 0);
  637. ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
  638. ggml_build_forward_expand(gf, cur);
  639. }
  640. res->add_input(std::move(inp));
  641. return res;
  642. }
  643. llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag(
  644. const llama_cparams & cparams,
  645. ggml_context * ctx,
  646. ggml_cgraph * gf) const {
  647. auto res = std::make_unique<llm_graph_result>();
  648. const auto & ids = defrag_info.ids;
  649. #if 0
  650. // CPU defrag
  651. //
  652. // TODO: optimizations are possible:
  653. // - multiple threads
  654. // - avoid copying to the host memory when already there
  655. //
  656. // likely not worth the effort, as we have ggml_graph based defrag
  657. //
  658. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  659. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  660. const uint32_t kv_size = size;
  661. std::vector<uint8_t> buf_k;
  662. std::vector<uint8_t> buf_v;
  663. for (uint32_t il = 0; il < n_layer; ++il) {
  664. const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
  665. const size_t k_size = ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size);
  666. const size_t v_size_el = ggml_type_size(v_l[il]->type);
  667. const size_t v_size = ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size);
  668. buf_k.resize(k_size);
  669. buf_v.resize(v_size);
  670. ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size());
  671. ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size());
  672. // batch move [i, i+nm) to [id, id+nm)
  673. // note: cells can move only to a lower index
  674. for (uint32_t i = 0; i < n_kv; ++i) {
  675. const uint32_t id = ids[i];
  676. if (i == id || id == n_kv) {
  677. continue;
  678. }
  679. uint32_t nm = 1;
  680. while (i + nm < n_kv && ids[i + nm] == id + nm) {
  681. nm++;
  682. }
  683. // move keys
  684. {
  685. const int64_t os = i*k_size_row;
  686. const int64_t od = id*k_size_row;
  687. memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
  688. }
  689. // move values (note: they are transposed)
  690. {
  691. const int64_t os = i;
  692. const int64_t od = id;
  693. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  694. memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
  695. }
  696. }
  697. i += nm - 1;
  698. }
  699. ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size());
  700. ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size());
  701. }
  702. #else
  703. for (uint32_t i = 0; i < ids.size(); ++i) {
  704. const uint32_t id = ids[i];
  705. if (i == id || id == ids.size()) {
  706. continue;
  707. }
  708. uint32_t nm = 1;
  709. while (i + nm < ids.size() && ids[i + nm] == id + nm) {
  710. nm++;
  711. }
  712. for (const auto & layer : layers) {
  713. const uint32_t il = layer.il;
  714. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
  715. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
  716. ggml_tensor * view_k_src = ggml_view_2d(ctx, layer.k,
  717. n_embd_k_gqa, nm,
  718. ggml_row_size(layer.k->type, n_embd_k_gqa),
  719. ggml_row_size(layer.k->type, n_embd_k_gqa*i));
  720. ggml_tensor * view_k_dst = ggml_view_2d(ctx, layer.k,
  721. n_embd_k_gqa, nm,
  722. ggml_row_size(layer.k->type, n_embd_k_gqa),
  723. ggml_row_size(layer.k->type, n_embd_k_gqa*id));
  724. ggml_tensor * view_v_src;
  725. ggml_tensor * view_v_dst;
  726. if (cparams.flash_attn) {
  727. // NOTE: the V cache is not transposed when using flash attention
  728. view_v_src = ggml_view_2d(ctx, layer.v,
  729. n_embd_v_gqa, nm,
  730. ggml_row_size(layer.v->type, n_embd_v_gqa),
  731. ggml_row_size(layer.v->type, n_embd_v_gqa*i));
  732. view_v_dst = ggml_view_2d(ctx, layer.v,
  733. n_embd_v_gqa, nm,
  734. ggml_row_size(layer.v->type, n_embd_v_gqa),
  735. ggml_row_size(layer.v->type, n_embd_v_gqa*id));
  736. } else {
  737. view_v_src = ggml_view_2d(ctx, layer.v,
  738. nm, n_embd_v_gqa,
  739. ggml_row_size(layer.v->type, cells.size()),
  740. ggml_row_size(layer.v->type, i));
  741. view_v_dst = ggml_view_2d(ctx, layer.v,
  742. nm, n_embd_v_gqa,
  743. ggml_row_size(layer.v->type, cells.size()),
  744. ggml_row_size(layer.v->type, id));
  745. }
  746. ggml_build_forward_expand(gf, ggml_cpy(ctx, view_k_src, view_k_dst));
  747. ggml_build_forward_expand(gf, ggml_cpy(ctx, view_v_src, view_v_dst));
  748. }
  749. i += nm - 1;
  750. }
  751. //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
  752. #endif
  753. return res;
  754. }
  755. bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) {
  756. const uint32_t n_layer = layers.size();
  757. const uint32_t n_kv = cells.used_max_p1();
  758. const uint32_t n_used = cells.get_used();
  759. assert(n_used <= n_kv);
  760. //const int64_t t_start = ggml_time_us();
  761. // number of cells moved
  762. uint32_t n_moves = 0;
  763. // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag)
  764. // - source view, destination view, copy operation
  765. // - x2 for keys and values
  766. //const uint32_t max_moves = max_nodes()/(6*n_layer);
  767. // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
  768. const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer);
  769. // determine which KV cells to move where
  770. //
  771. // cell i moves to ids[i]
  772. //
  773. // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
  774. //
  775. auto & ids = defrag_info.ids;
  776. ids.clear();
  777. ids.resize(n_kv, n_kv);
  778. for (uint32_t i0 = 0; i0 < n_used; ++i0) {
  779. if (!cells.is_empty(i0)) {
  780. ids[i0] = i0;
  781. continue;
  782. }
  783. // found a hole - fill it with data from the end of the cache
  784. uint32_t nh = 1;
  785. // determine the size of the hole
  786. while (i0 + nh < n_used && cells.is_empty(i0 + nh)) {
  787. nh++;
  788. }
  789. uint32_t nf = 0;
  790. uint32_t is = n_kv - 1;
  791. // starting from the end, find nh non-empty cells
  792. for (; is > i0; --is) {
  793. if (cells.is_empty(is) || ids[is] != n_kv) {
  794. continue;
  795. }
  796. // non-empty cell which is not yet moved
  797. nf++;
  798. if (nf == nh) {
  799. break;
  800. }
  801. }
  802. // this can only happen if `n_used` is not accurate, which would be a bug
  803. GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh");
  804. nf = 0;
  805. uint32_t i1 = is;
  806. // are we moving a continuous block of memory?
  807. bool cont = false;
  808. // should we stop searching for the next move?
  809. bool stop = false;
  810. // go back and move the nf cells to the hole
  811. for (; i1 < n_kv; ++i1) {
  812. if (cells.is_empty(i1) || ids[i1] != n_kv) {
  813. if (n_moves == max_moves) {
  814. stop = true;
  815. break;
  816. }
  817. cont = false;
  818. continue;
  819. }
  820. // this cell goes to (i0 + nf)
  821. ids[i1] = i0 + nf;
  822. // move the cell meta data
  823. cells.mv(i1, i0 + nf);
  824. head = n_used;
  825. if (!cont) {
  826. n_moves++;
  827. cont = true;
  828. }
  829. nf++;
  830. if (nf == nh) {
  831. break;
  832. }
  833. }
  834. if (stop || n_moves == max_moves) {
  835. break;
  836. }
  837. //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh);
  838. i0 += nh - 1;
  839. }
  840. if (n_moves == 0) {
  841. return false;
  842. }
  843. LLAMA_LOG_DEBUG("%s: (tmp log) KV defrag cell moves: %u\n", __func__, n_moves);
  844. LLAMA_LOG_DEBUG("%s: expected gf nodes: %u\n", __func__, 6*n_moves*n_layer);
  845. return true;
  846. }
  847. bool llama_kv_cache_unified::is_masked_swa(llama_pos p0, llama_pos p1) const {
  848. assert(p0 >= 0 && p1 >= 0);
  849. switch (swa_type) {
  850. case LLAMA_SWA_TYPE_NONE:
  851. {
  852. } break;
  853. case LLAMA_SWA_TYPE_STANDARD:
  854. {
  855. if (p1 - p0 >= (int32_t) n_swa) {
  856. return true;
  857. }
  858. } break;
  859. case LLAMA_SWA_TYPE_CHUNKED:
  860. {
  861. const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa;
  862. if (p0 < pos_chunk_start) {
  863. return true;
  864. }
  865. } break;
  866. }
  867. return false;
  868. }
  869. void llama_kv_cache_unified::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
  870. std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
  871. uint32_t cell_count = 0;
  872. // Count the number of cells with the specified seq_id
  873. // Find all the ranges of cells with this seq id (or all, when -1)
  874. uint32_t cell_range_begin = cells.size();
  875. for (uint32_t i = 0; i < cells.size(); ++i) {
  876. if (!cells.is_empty(i) && (seq_id == -1 || cells.seq_has(i, seq_id))) {
  877. ++cell_count;
  878. if (cell_range_begin == cells.size()) {
  879. cell_range_begin = i;
  880. }
  881. } else {
  882. if (cell_range_begin != cells.size()) {
  883. cell_ranges.emplace_back(cell_range_begin, i);
  884. cell_range_begin = cells.size();
  885. }
  886. }
  887. }
  888. if (cell_range_begin != cells.size()) {
  889. cell_ranges.emplace_back(cell_range_begin, cells.size());
  890. }
  891. // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
  892. uint32_t cell_count_check = 0;
  893. for (const auto & range : cell_ranges) {
  894. cell_count_check += range.second - range.first;
  895. }
  896. GGML_ASSERT(cell_count == cell_count_check);
  897. io.write(&cell_count, sizeof(cell_count));
  898. state_write_meta(io, cell_ranges, seq_id);
  899. state_write_data(io, cell_ranges);
  900. }
  901. void llama_kv_cache_unified::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
  902. uint32_t cell_count;
  903. io.read_to(&cell_count, sizeof(cell_count));
  904. bool res = true;
  905. res = res && state_read_meta(io, cell_count, seq_id);
  906. res = res && state_read_data(io, cell_count);
  907. if (!res) {
  908. if (seq_id == -1) {
  909. clear();
  910. } else {
  911. seq_rm(seq_id, -1, -1);
  912. }
  913. throw std::runtime_error("failed to restore kv cache");
  914. }
  915. }
  916. void llama_kv_cache_unified::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
  917. for (const auto & range : cell_ranges) {
  918. for (uint32_t i = range.first; i < range.second; ++i) {
  919. std::vector<llama_seq_id> seq_ids;
  920. for (llama_seq_id cur = 0; cur < (int) n_seq_max; ++cur) {
  921. if (cur == seq_id || seq_id == -1) {
  922. if (cells.seq_has(i, cur)) {
  923. seq_ids.push_back(cur);
  924. }
  925. }
  926. }
  927. const llama_pos pos = cells.pos_get(i);
  928. const uint32_t n_seq_id = seq_ids.size();
  929. io.write(&pos, sizeof(pos));
  930. io.write(&n_seq_id, sizeof(n_seq_id));
  931. for (const auto & seq_id : seq_ids) {
  932. io.write(&seq_id, sizeof(seq_id));
  933. }
  934. }
  935. }
  936. }
  937. void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
  938. const uint32_t v_trans = this->v_trans ? 1 : 0;
  939. const uint32_t n_layer = layers.size();
  940. io.write(&v_trans, sizeof(v_trans));
  941. io.write(&n_layer, sizeof(n_layer));
  942. std::vector<uint8_t> tmp_buf;
  943. // Iterate and write all the keys first, each row is a cell
  944. // Get whole range at a time
  945. for (const auto & layer : layers) {
  946. const uint32_t il = layer.il;
  947. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
  948. // Write key type
  949. const int32_t k_type_i = (int32_t)layer.k->type;
  950. io.write(&k_type_i, sizeof(k_type_i));
  951. // Write row size of key
  952. const uint64_t k_size_row = ggml_row_size(layer.k->type, n_embd_k_gqa);
  953. io.write(&k_size_row, sizeof(k_size_row));
  954. // Read each range of cells of k_size length each into tmp_buf and write out
  955. for (const auto & range : cell_ranges) {
  956. const size_t range_size = range.second - range.first;
  957. const size_t buf_size = range_size * k_size_row;
  958. io.write_tensor(layer.k, range.first * k_size_row, buf_size);
  959. }
  960. }
  961. if (!v_trans) {
  962. for (const auto & layer : layers) {
  963. const uint32_t il = layer.il;
  964. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  965. // Write value type
  966. const int32_t v_type_i = (int32_t)layer.v->type;
  967. io.write(&v_type_i, sizeof(v_type_i));
  968. // Write row size of value
  969. const uint64_t v_size_row = ggml_row_size(layer.v->type, n_embd_v_gqa);
  970. io.write(&v_size_row, sizeof(v_size_row));
  971. // Read each range of cells of v_size length each into tmp_buf and write out
  972. for (const auto & range : cell_ranges) {
  973. const size_t range_size = range.second - range.first;
  974. const size_t buf_size = range_size * v_size_row;
  975. io.write_tensor(layer.v, range.first * v_size_row, buf_size);
  976. }
  977. }
  978. } else {
  979. // When v is transposed, we also need the element size and get the element ranges from each row
  980. const uint32_t kv_size = cells.size();
  981. for (const auto & layer : layers) {
  982. const uint32_t il = layer.il;
  983. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  984. // Write value type
  985. const int32_t v_type_i = (int32_t)layer.v->type;
  986. io.write(&v_type_i, sizeof(v_type_i));
  987. // Write element size
  988. const uint32_t v_size_el = ggml_type_size(layer.v->type);
  989. io.write(&v_size_el, sizeof(v_size_el));
  990. // Write GQA embedding size
  991. io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
  992. // For each row, we get the element values of each cell
  993. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  994. // Read each range of cells of v_size_el length each into tmp_buf and write out
  995. for (const auto & range : cell_ranges) {
  996. const size_t range_size = range.second - range.first;
  997. const size_t src_offset = (range.first + j * kv_size) * v_size_el;
  998. const size_t buf_size = range_size * v_size_el;
  999. io.write_tensor(layer.v, src_offset, buf_size);
  1000. }
  1001. }
  1002. }
  1003. }
  1004. }
  1005. bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
  1006. if (dest_seq_id != -1) {
  1007. // single sequence
  1008. seq_rm(dest_seq_id, -1, -1);
  1009. llama_sbatch sbatch;
  1010. llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
  1011. batch.n_tokens = cell_count;
  1012. for (uint32_t i = 0; i < cell_count; ++i) {
  1013. llama_pos pos;
  1014. uint32_t n_seq_id;
  1015. io.read_to(&pos, sizeof(pos));
  1016. io.read_to(&n_seq_id, sizeof(n_seq_id));
  1017. if (n_seq_id != 1) {
  1018. LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
  1019. return false;
  1020. }
  1021. // read the sequence id, but directly discard it - we will use dest_seq_id instead
  1022. {
  1023. llama_seq_id seq_id;
  1024. io.read_to(&seq_id, sizeof(seq_id));
  1025. }
  1026. batch.pos[i] = pos;
  1027. batch.n_seq_id[i] = n_seq_id;
  1028. batch.seq_id[i] = &dest_seq_id;
  1029. }
  1030. if (!find_slot(batch)) {
  1031. LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
  1032. return false;
  1033. }
  1034. commit();
  1035. // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
  1036. // Assume that this is one contiguous block of cells
  1037. GGML_ASSERT(head + cell_count <= cells.size());
  1038. GGML_ASSERT(cells.pos_get(head) == batch.pos[0]);
  1039. GGML_ASSERT(cells.pos_get(head + cell_count - 1) == batch.pos[cell_count - 1]);
  1040. GGML_ASSERT(cells.seq_has(head, dest_seq_id));
  1041. GGML_ASSERT(cells.seq_has(head + cell_count - 1, dest_seq_id));
  1042. } else {
  1043. // whole KV cache restore
  1044. if (cell_count > cells.size()) {
  1045. LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
  1046. return false;
  1047. }
  1048. clear();
  1049. for (uint32_t i = 0; i < cell_count; ++i) {
  1050. llama_pos pos;
  1051. uint32_t n_seq_id;
  1052. io.read_to(&pos, sizeof(pos));
  1053. io.read_to(&n_seq_id, sizeof(n_seq_id));
  1054. cells.pos_set(i, pos);
  1055. for (uint32_t j = 0; j < n_seq_id; ++j) {
  1056. llama_seq_id seq_id;
  1057. io.read_to(&seq_id, sizeof(seq_id));
  1058. if (seq_id < 0 || (uint32_t) seq_id >= n_seq_max) {
  1059. LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, n_seq_max);
  1060. return false;
  1061. }
  1062. cells.seq_add(i, seq_id);
  1063. }
  1064. }
  1065. head = 0;
  1066. }
  1067. return true;
  1068. }
  1069. bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
  1070. uint32_t v_trans;
  1071. uint32_t n_layer;
  1072. io.read_to(&v_trans, sizeof(v_trans));
  1073. io.read_to(&n_layer, sizeof(n_layer));
  1074. if (n_layer != layers.size()) {
  1075. LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size());
  1076. return false;
  1077. }
  1078. if (cell_count > cells.size()) {
  1079. LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size());
  1080. return false;
  1081. }
  1082. if (this->v_trans != (bool) v_trans) {
  1083. LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
  1084. return false;
  1085. }
  1086. // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
  1087. for (const auto & layer : layers) {
  1088. const uint32_t il = layer.il;
  1089. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
  1090. // Read type of key
  1091. int32_t k_type_i_ref;
  1092. io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
  1093. const int32_t k_type_i = (int32_t) layer.k->type;
  1094. if (k_type_i != k_type_i_ref) {
  1095. LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
  1096. return false;
  1097. }
  1098. // Read row size of key
  1099. uint64_t k_size_row_ref;
  1100. io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
  1101. const size_t k_size_row = ggml_row_size(layer.k->type, n_embd_k_gqa);
  1102. if (k_size_row != k_size_row_ref) {
  1103. LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
  1104. return false;
  1105. }
  1106. if (cell_count) {
  1107. // Read and set the keys for the whole cell range
  1108. ggml_backend_tensor_set(layer.k, io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
  1109. }
  1110. }
  1111. if (!this->v_trans) {
  1112. for (const auto & layer : layers) {
  1113. const uint32_t il = layer.il;
  1114. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  1115. // Read type of value
  1116. int32_t v_type_i_ref;
  1117. io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
  1118. const int32_t v_type_i = (int32_t)layer.v->type;
  1119. if (v_type_i != v_type_i_ref) {
  1120. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  1121. return false;
  1122. }
  1123. // Read row size of value
  1124. uint64_t v_size_row_ref;
  1125. io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
  1126. const size_t v_size_row = ggml_row_size(layer.v->type, n_embd_v_gqa);
  1127. if (v_size_row != v_size_row_ref) {
  1128. LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
  1129. return false;
  1130. }
  1131. if (cell_count) {
  1132. // Read and set the values for the whole cell range
  1133. ggml_backend_tensor_set(layer.v, io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
  1134. }
  1135. }
  1136. } else {
  1137. // For each layer, read the values for each cell (transposed)
  1138. for (const auto & layer : layers) {
  1139. const uint32_t il = layer.il;
  1140. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  1141. // Read type of value
  1142. int32_t v_type_i_ref;
  1143. io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
  1144. const int32_t v_type_i = (int32_t)layer.v->type;
  1145. if (v_type_i != v_type_i_ref) {
  1146. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  1147. return false;
  1148. }
  1149. // Read element size of value
  1150. uint32_t v_size_el_ref;
  1151. io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
  1152. const size_t v_size_el = ggml_type_size(layer.v->type);
  1153. if (v_size_el != v_size_el_ref) {
  1154. LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
  1155. return false;
  1156. }
  1157. // Read GQA embedding size
  1158. uint32_t n_embd_v_gqa_ref;
  1159. io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
  1160. if (n_embd_v_gqa != n_embd_v_gqa_ref) {
  1161. LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
  1162. return false;
  1163. }
  1164. if (cell_count) {
  1165. // For each row in the transposed matrix, read the values for the whole cell range
  1166. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  1167. const size_t dst_offset = (head + j * cells.size()) * v_size_el;
  1168. ggml_backend_tensor_set(layer.v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
  1169. }
  1170. }
  1171. }
  1172. }
  1173. return true;
  1174. }
  1175. //
  1176. // llama_kv_cache_unified_iswa
  1177. //
  1178. llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa(
  1179. const llama_model & model,
  1180. ggml_type type_k,
  1181. ggml_type type_v,
  1182. bool v_trans,
  1183. bool offload,
  1184. bool swa_full,
  1185. uint32_t kv_size,
  1186. uint32_t n_seq_max,
  1187. uint32_t n_batch,
  1188. uint32_t n_pad) : hparams(model.hparams) {
  1189. llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
  1190. llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
  1191. const uint32_t size_base = kv_size;
  1192. uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad));
  1193. // when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size and disable pruning
  1194. if (swa_full) {
  1195. LLAMA_LOG_WARN("%s: using full-size SWA cache (ref: %s)\n",
  1196. __func__, "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");
  1197. size_swa = size_base;
  1198. do_prune = false;
  1199. }
  1200. LLAMA_LOG_INFO("%s: creating non-SWA KV cache, size = %u cells\n", __func__, size_base);
  1201. kv_base = std::make_unique<llama_kv_cache_unified>(
  1202. model, std::move(filter_base), type_k, type_v,
  1203. v_trans, offload, size_base, n_seq_max, n_pad,
  1204. 0, LLAMA_SWA_TYPE_NONE);
  1205. LLAMA_LOG_INFO("%s: creating SWA KV cache, size = %u cells\n", __func__, size_swa);
  1206. kv_swa = std::make_unique<llama_kv_cache_unified>(
  1207. model, std::move(filter_swa), type_k, type_v,
  1208. v_trans, offload, size_swa, n_seq_max, n_pad,
  1209. hparams.n_swa, hparams.swa_type);
  1210. }
  1211. void llama_kv_cache_unified_iswa::clear() {
  1212. kv_base->clear();
  1213. kv_swa ->clear();
  1214. }
  1215. bool llama_kv_cache_unified_iswa::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  1216. bool res = true;
  1217. res = res & kv_base->seq_rm(seq_id, p0, p1);
  1218. res = res & kv_swa ->seq_rm(seq_id, p0, p1);
  1219. return res;
  1220. }
  1221. void llama_kv_cache_unified_iswa::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  1222. kv_base->seq_cp(seq_id_src, seq_id_dst, p0, p1);
  1223. kv_swa ->seq_cp(seq_id_src, seq_id_dst, p0, p1);
  1224. }
  1225. void llama_kv_cache_unified_iswa::seq_keep(llama_seq_id seq_id) {
  1226. kv_base->seq_keep(seq_id);
  1227. kv_swa ->seq_keep(seq_id);
  1228. }
  1229. void llama_kv_cache_unified_iswa::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
  1230. kv_base->seq_add(seq_id, p0, p1, shift);
  1231. kv_swa ->seq_add(seq_id, p0, p1, shift);
  1232. }
  1233. void llama_kv_cache_unified_iswa::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  1234. kv_base->seq_div(seq_id, p0, p1, d);
  1235. kv_swa ->seq_div(seq_id, p0, p1, d);
  1236. }
  1237. llama_pos llama_kv_cache_unified_iswa::seq_pos_min(llama_seq_id seq_id) const {
  1238. // the base cache is a superset of the SWA cache, so we can just check the SWA cache
  1239. return kv_swa->seq_pos_min(seq_id);
  1240. }
  1241. llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const {
  1242. return kv_swa->seq_pos_max(seq_id);
  1243. }
  1244. void llama_kv_cache_unified_iswa::restore() {
  1245. kv_base->restore();
  1246. kv_swa ->restore();
  1247. }
  1248. void llama_kv_cache_unified_iswa::commit() {
  1249. kv_base->commit();
  1250. kv_swa ->commit();
  1251. // slide the attention window, forgetting/pruning old tokens that are outside the window
  1252. if (do_prune) {
  1253. for (const auto & [seq_id, entry] : pending.pos) {
  1254. kv_swa->prune_swa(seq_id, entry.pmin, entry.pmax);
  1255. }
  1256. }
  1257. pending.clear();
  1258. }
  1259. bool llama_kv_cache_unified_iswa::update(llama_context & lctx) {
  1260. bool res = true;
  1261. res = res & kv_base->update(lctx);
  1262. res = res & kv_swa ->update(lctx);
  1263. return res;
  1264. }
  1265. void llama_kv_cache_unified_iswa::defrag_sched(float thold) {
  1266. kv_base->defrag_sched(thold);
  1267. kv_swa ->defrag_sched(thold);
  1268. }
  1269. void llama_kv_cache_unified_iswa::set_full() {
  1270. kv_base->set_full();
  1271. kv_swa ->set_full();
  1272. }
  1273. llama_sbatch llama_kv_cache_unified_iswa::sbatch_init(const llama_batch & batch, bool logits_all) {
  1274. pending.clear();
  1275. if (do_prune) {
  1276. for (int i = 0; i < batch.n_tokens; ++i) {
  1277. for (int s = 0; s < batch.n_seq_id[i]; ++s) {
  1278. const llama_seq_id seq_id = batch.seq_id[i][s];
  1279. const llama_pos pos = batch.pos[i];
  1280. if (pending.pos.find(seq_id) == pending.pos.end()) {
  1281. pending.pos[seq_id].pmin = pos;
  1282. pending.pos[seq_id].pmax = pos;
  1283. } else {
  1284. pending.pos[seq_id].pmin = std::min(pending.pos[seq_id].pmin, pos);
  1285. pending.pos[seq_id].pmax = std::max(pending.pos[seq_id].pmax, pos);
  1286. }
  1287. }
  1288. }
  1289. }
  1290. return llama_sbatch(batch, hparams.n_embd, true, logits_all);
  1291. }
  1292. llama_ubatch llama_kv_cache_unified_iswa::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
  1293. GGML_UNUSED(embd_pooled);
  1294. return sbatch.split_simple(n_ubatch);
  1295. }
  1296. bool llama_kv_cache_unified_iswa::find_slot(const llama_ubatch & batch) {
  1297. bool res = true;
  1298. res = res & kv_base->find_slot(batch);
  1299. res = res & kv_swa ->find_slot(batch);
  1300. return res;
  1301. }
  1302. bool llama_kv_cache_unified_iswa::get_can_shift() const {
  1303. return kv_base->get_size() == kv_swa->get_size();
  1304. }
  1305. void llama_kv_cache_unified_iswa::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
  1306. kv_base->state_write(io, seq_id);
  1307. kv_swa ->state_write(io, seq_id);
  1308. }
  1309. void llama_kv_cache_unified_iswa::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
  1310. kv_base->state_read(io, seq_id);
  1311. kv_swa ->state_read(io, seq_id);
  1312. }
  1313. llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_kv_base() const {
  1314. return kv_base.get();
  1315. }
  1316. llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_kv_swa() const {
  1317. return kv_swa.get();
  1318. }
  1319. //
  1320. // llama_kv_cache_recurrent
  1321. //
  1322. llama_kv_cache_recurrent::llama_kv_cache_recurrent(
  1323. const llama_model & model,
  1324. ggml_type type_k,
  1325. ggml_type type_v,
  1326. bool offload,
  1327. uint32_t kv_size,
  1328. uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) {
  1329. const int32_t n_layer = hparams.n_layer;
  1330. LLAMA_LOG_INFO("%s: kv_size = %u, n_seq_max = %u, type_k = '%s', type_v = '%s', n_layer = %d\n",
  1331. __func__, kv_size, n_seq_max, ggml_type_name(type_k), ggml_type_name(type_v), n_layer);
  1332. head = 0;
  1333. size = kv_size;
  1334. used = 0;
  1335. cells.clear();
  1336. cells.resize(kv_size);
  1337. // create a context for each buffer type
  1338. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  1339. auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
  1340. auto it = ctx_map.find(buft);
  1341. if (it == ctx_map.end()) {
  1342. ggml_init_params params = {
  1343. /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
  1344. /*.mem_buffer =*/ NULL,
  1345. /*.no_alloc =*/ true,
  1346. };
  1347. ggml_context * ctx = ggml_init(params);
  1348. if (!ctx) {
  1349. return nullptr;
  1350. }
  1351. ctx_map[buft] = ctx;
  1352. ctxs.emplace_back(ctx);
  1353. return ctx;
  1354. }
  1355. return it->second;
  1356. };
  1357. k_l.reserve(n_layer);
  1358. v_l.reserve(n_layer);
  1359. for (int i = 0; i < n_layer; i++) {
  1360. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
  1361. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
  1362. const char * dev_name = "CPU";
  1363. ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
  1364. if (offload) {
  1365. auto * dev = model.dev_layer(i);
  1366. buft = ggml_backend_dev_buffer_type(dev);
  1367. dev_name = ggml_backend_dev_name(dev);
  1368. }
  1369. LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
  1370. ggml_context * ctx = ctx_for_buft(buft);
  1371. if (!ctx) {
  1372. throw std::runtime_error("failed to create ggml context for kv cache");
  1373. }
  1374. ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
  1375. ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
  1376. ggml_format_name(k, "cache_k_l%d", i);
  1377. ggml_format_name(v, "cache_v_l%d", i);
  1378. k_l.push_back(k);
  1379. v_l.push_back(v);
  1380. }
  1381. // allocate tensors and initialize the buffers to avoid NaNs in the padding
  1382. for (auto it : ctx_map) {
  1383. auto * buft = it.first;
  1384. auto * ctx = it.second;
  1385. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  1386. if (!buf) {
  1387. throw std::runtime_error("failed to allocate buffer for kv cache");
  1388. }
  1389. ggml_backend_buffer_clear(buf, 0);
  1390. LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
  1391. bufs.emplace_back(buf);
  1392. }
  1393. {
  1394. const size_t memory_size_k = size_k_bytes();
  1395. const size_t memory_size_v = size_v_bytes();
  1396. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  1397. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  1398. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  1399. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  1400. }
  1401. }
  1402. void llama_kv_cache_recurrent::clear() {
  1403. for (int32_t i = 0; i < (int32_t) size; ++i) {
  1404. cells[i].pos = -1;
  1405. cells[i].seq_id.clear();
  1406. cells[i].src = -1;
  1407. cells[i].tail = -1;
  1408. }
  1409. head = 0;
  1410. used = 0;
  1411. for (auto & buf : bufs) {
  1412. ggml_backend_buffer_clear(buf.get(), 0);
  1413. }
  1414. }
  1415. bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  1416. uint32_t new_head = size;
  1417. if (p0 < 0) {
  1418. p0 = 0;
  1419. }
  1420. if (p1 < 0) {
  1421. p1 = std::numeric_limits<llama_pos>::max();
  1422. }
  1423. // models like Mamba or RWKV can't have a state partially erased
  1424. if (seq_id >= (int64_t) size) {
  1425. // could be fatal
  1426. return false;
  1427. }
  1428. if (0 <= seq_id) {
  1429. int32_t & tail_id = cells[seq_id].tail;
  1430. if (tail_id >= 0) {
  1431. const kv_cell & cell = cells[tail_id];
  1432. // partial intersection is invalid
  1433. if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
  1434. return false;
  1435. }
  1436. // invalidate tails which will be cleared
  1437. if (p0 <= cell.pos && cell.pos < p1) {
  1438. tail_id = -1;
  1439. }
  1440. }
  1441. } else {
  1442. // seq_id is negative, then the range should include everything or nothing
  1443. if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
  1444. return false;
  1445. }
  1446. }
  1447. for (uint32_t i = 0; i < size; ++i) {
  1448. if (cells[i].pos >= p0 && cells[i].pos < p1) {
  1449. if (seq_id < 0) {
  1450. cells[i].seq_id.clear();
  1451. } else if (cells[i].has_seq_id(seq_id)) {
  1452. cells[i].seq_id.erase(seq_id);
  1453. } else {
  1454. continue;
  1455. }
  1456. if (cells[i].is_empty()) {
  1457. // keep count of the number of used cells
  1458. if (cells[i].pos >= 0) {
  1459. used--;
  1460. }
  1461. cells[i].pos = -1;
  1462. cells[i].src = -1;
  1463. if (new_head == size) {
  1464. new_head = i;
  1465. }
  1466. }
  1467. }
  1468. }
  1469. // If we freed up a slot, set head to it so searching can start there.
  1470. if (new_head != size && new_head < head) {
  1471. head = new_head;
  1472. }
  1473. return true;
  1474. }
  1475. void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  1476. if (seq_id_src == seq_id_dst) {
  1477. return;
  1478. }
  1479. if (p0 < 0) {
  1480. p0 = 0;
  1481. }
  1482. if (p1 < 0) {
  1483. p1 = std::numeric_limits<llama_pos>::max();
  1484. }
  1485. if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
  1486. kv_cell & tail_src = cells[seq_id_src];
  1487. kv_cell & tail_dst = cells[seq_id_dst];
  1488. if (tail_dst.tail >= 0) {
  1489. // clear destination seq_id if it wasn't empty
  1490. kv_cell & cell_dst = cells[tail_dst.tail];
  1491. cell_dst.seq_id.erase(seq_id_dst);
  1492. tail_dst.tail = -1;
  1493. if (cell_dst.seq_id.empty()) {
  1494. cell_dst.pos = -1;
  1495. cell_dst.src = -1;
  1496. used -= 1;
  1497. }
  1498. }
  1499. if (tail_src.tail >= 0) {
  1500. kv_cell & cell_src = cells[tail_src.tail];
  1501. cell_src.seq_id.insert(seq_id_dst);
  1502. tail_dst.tail = tail_src.tail;
  1503. }
  1504. }
  1505. }
  1506. void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) {
  1507. uint32_t new_head = size;
  1508. for (uint32_t i = 0; i < size; ++i) {
  1509. if ((llama_seq_id) i != seq_id) {
  1510. cells[i].tail = -1;
  1511. }
  1512. if (!cells[i].has_seq_id(seq_id)) {
  1513. if (cells[i].pos >= 0) {
  1514. used--;
  1515. }
  1516. cells[i].pos = -1;
  1517. cells[i].src = -1;
  1518. cells[i].seq_id.clear();
  1519. if (new_head == size){
  1520. new_head = i;
  1521. }
  1522. } else {
  1523. cells[i].seq_id.clear();
  1524. cells[i].seq_id.insert(seq_id);
  1525. }
  1526. }
  1527. // If we freed up a slot, set head to it so searching can start there.
  1528. if (new_head != size && new_head < head) {
  1529. head = new_head;
  1530. }
  1531. }
  1532. void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
  1533. if (shift == 0) {
  1534. return;
  1535. }
  1536. if (p0 < 0) {
  1537. p0 = 0;
  1538. }
  1539. if (p1 < 0) {
  1540. p1 = std::numeric_limits<llama_pos>::max();
  1541. }
  1542. // If there is no range then return early to avoid looping over the
  1543. if (p0 == p1) {
  1544. return;
  1545. }
  1546. // for Mamba-like or RWKV models, only the pos needs to be shifted
  1547. if (0 <= seq_id && seq_id < (int64_t) size) {
  1548. const int32_t tail_id = cells[seq_id].tail;
  1549. if (tail_id >= 0) {
  1550. kv_cell & cell = cells[tail_id];
  1551. if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
  1552. cell.pos += shift;
  1553. }
  1554. }
  1555. }
  1556. }
  1557. void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  1558. if (d == 1) {
  1559. return;
  1560. }
  1561. if (p0 < 0) {
  1562. p0 = 0;
  1563. }
  1564. if (p1 < 0) {
  1565. p1 = std::numeric_limits<llama_pos>::max();
  1566. }
  1567. // If there is no range then return early to avoid looping over the cache.
  1568. if (p0 == p1) {
  1569. return;
  1570. }
  1571. // for Mamba-like or RWKV models, only the pos needs to be changed
  1572. if (0 <= seq_id && seq_id < (int64_t) size) {
  1573. const int32_t tail_id = cells[seq_id].tail;
  1574. if (tail_id >= 0) {
  1575. kv_cell & cell = cells[tail_id];
  1576. if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
  1577. cell.pos /= d;
  1578. }
  1579. }
  1580. }
  1581. }
  1582. llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const {
  1583. llama_pos result = std::numeric_limits<llama_pos>::max();
  1584. for (uint32_t i = 0; i < size; ++i) {
  1585. if (cells[i].has_seq_id(seq_id)) {
  1586. result = std::min(result, cells[i].pos);
  1587. }
  1588. }
  1589. if (result == std::numeric_limits<llama_pos>::max()) {
  1590. result = -1;
  1591. }
  1592. return result;
  1593. }
  1594. llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const {
  1595. llama_pos result = -1;
  1596. for (uint32_t i = 0; i < size; ++i) {
  1597. if (cells[i].has_seq_id(seq_id)) {
  1598. result = std::max(result, cells[i].pos);
  1599. }
  1600. }
  1601. return result;
  1602. }
  1603. void llama_kv_cache_recurrent::restore() {
  1604. if (pending.ranges.empty()) {
  1605. return;
  1606. }
  1607. seq_rm(-1, -1, -1);
  1608. }
  1609. void llama_kv_cache_recurrent::commit() {
  1610. pending.ranges.clear();
  1611. }
  1612. bool llama_kv_cache_recurrent::update(llama_context & ctx) {
  1613. GGML_UNUSED(ctx);
  1614. return false;
  1615. }
  1616. void llama_kv_cache_recurrent::defrag_sched(float thold) {
  1617. GGML_UNUSED(thold);
  1618. // noop
  1619. }
  1620. void llama_kv_cache_recurrent::set_full() {
  1621. n = size;
  1622. head = 0;
  1623. }
  1624. llama_sbatch llama_kv_cache_recurrent::sbatch_init(
  1625. const llama_batch & batch,
  1626. bool logits_all) {
  1627. return llama_sbatch(batch, hparams.n_embd, false, logits_all);
  1628. }
  1629. llama_ubatch llama_kv_cache_recurrent::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
  1630. if (embd_pooled) {
  1631. // Pooled embeddings cannot be split across ubatches (yet)
  1632. return sbatch.split_seq(n_ubatch);
  1633. }
  1634. return sbatch.split_equal(n_ubatch);
  1635. }
  1636. bool llama_kv_cache_recurrent::find_slot(
  1637. const llama_ubatch & ubatch) {
  1638. const uint32_t n_tokens = ubatch.n_tokens;
  1639. const uint32_t n_seqs = ubatch.n_seqs;
  1640. const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
  1641. // if we have enough unused cells before the current head ->
  1642. // better to start searching from the beginning of the cache, hoping to fill it
  1643. if (head > used + 2*n_tokens) {
  1644. head = 0;
  1645. }
  1646. // For recurrent state architectures (like Mamba or RWKV),
  1647. // each cache cell can store the state for a whole sequence.
  1648. // A slot should be always be contiguous.
  1649. // can only process batches with an equal number of new tokens in each sequence
  1650. GGML_ASSERT(ubatch.equal_seqs);
  1651. int32_t min = size - 1;
  1652. int32_t max = 0;
  1653. // everything should fit if all seq_ids are smaller than the max
  1654. for (uint32_t s = 0; s < n_seqs; ++s) {
  1655. const uint32_t n_seq_id = ubatch.n_seq_id[s];
  1656. for (uint32_t j = 0; j < n_seq_id; ++j) {
  1657. const llama_seq_id seq_id = ubatch.seq_id[s][j];
  1658. if (seq_id < 0 || (uint32_t) seq_id >= size) {
  1659. // too big seq_id
  1660. // TODO: would it be possible to resize the cache instead?
  1661. LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
  1662. return false;
  1663. }
  1664. if (j > 0) {
  1665. kv_cell & seq = cells[seq_id];
  1666. if (seq.tail >= 0) {
  1667. kv_cell & cell = cells[seq.tail];
  1668. // clear cells from seq_ids that become shared
  1669. // (should not normally happen, but let's handle it anyway)
  1670. cell.seq_id.erase(seq_id);
  1671. seq.tail = -1;
  1672. if (cell.seq_id.empty()) {
  1673. cell.pos = -1;
  1674. cell.src = -1;
  1675. used -= 1;
  1676. }
  1677. }
  1678. }
  1679. }
  1680. }
  1681. #ifndef NDEBUG
  1682. {
  1683. std::vector<int32_t> tails_verif;
  1684. tails_verif.assign(size, -1);
  1685. for (uint32_t i = 0; i < size; ++i) {
  1686. kv_cell & cell = cells[i];
  1687. for (llama_seq_id seq_id : cell.seq_id) {
  1688. if (tails_verif[seq_id] != -1) {
  1689. LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
  1690. }
  1691. tails_verif[seq_id] = i;
  1692. }
  1693. }
  1694. for (uint32_t i = 0; i < size; ++i) {
  1695. if (tails_verif[i] != cells[i].tail) {
  1696. LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
  1697. }
  1698. }
  1699. }
  1700. #endif
  1701. // find next empty cell
  1702. uint32_t next_empty_cell = head;
  1703. for (uint32_t i = 0; i < size; ++i) {
  1704. if (next_empty_cell >= size) { next_empty_cell -= size; }
  1705. kv_cell & cell = cells[next_empty_cell];
  1706. if (cell.is_empty()) { break; }
  1707. next_empty_cell += 1;
  1708. }
  1709. // find usable cell range
  1710. for (uint32_t s = 0; s < n_seqs; ++s) {
  1711. const llama_seq_id seq_id = ubatch.seq_id[s][0];
  1712. kv_cell & seq_meta = cells[seq_id];
  1713. bool has_cell = false;
  1714. if (seq_meta.tail >= 0) {
  1715. kv_cell & cell = cells[seq_meta.tail];
  1716. GGML_ASSERT(cell.has_seq_id(seq_id));
  1717. // does this seq_id "own" the cell?
  1718. if (cell.seq_id.size() == 1) { has_cell = true; }
  1719. }
  1720. if (!has_cell) {
  1721. kv_cell & empty_cell = cells[next_empty_cell];
  1722. GGML_ASSERT(empty_cell.is_empty());
  1723. // copy old tail into the empty cell
  1724. if (seq_meta.tail >= 0) {
  1725. kv_cell & orig_cell = cells[seq_meta.tail];
  1726. empty_cell.pos = orig_cell.pos;
  1727. empty_cell.src = orig_cell.src;
  1728. orig_cell.seq_id.erase(seq_id);
  1729. empty_cell.seq_id.insert(seq_id); // will be overwritten
  1730. }
  1731. seq_meta.tail = next_empty_cell;
  1732. // find next empty cell
  1733. if (s + 1 < n_seqs) {
  1734. next_empty_cell += 1;
  1735. for (uint32_t i = 0; i < size; ++i) {
  1736. if (next_empty_cell >= size) { next_empty_cell -= size; }
  1737. kv_cell & cell = cells[next_empty_cell];
  1738. if (cell.is_empty()) { break; }
  1739. next_empty_cell += 1;
  1740. }
  1741. }
  1742. }
  1743. if (min > seq_meta.tail) { min = seq_meta.tail; }
  1744. if (max < seq_meta.tail) { max = seq_meta.tail; }
  1745. }
  1746. // gather and re-order
  1747. for (uint32_t s = 0; s < n_seqs; ++s) {
  1748. int32_t dst_id = s + min;
  1749. int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
  1750. if (dst_id != src_id) {
  1751. kv_cell & dst_cell = cells[dst_id];
  1752. kv_cell & src_cell = cells[src_id];
  1753. std::swap(dst_cell.pos, src_cell.pos);
  1754. std::swap(dst_cell.src, src_cell.src);
  1755. std::swap(dst_cell.seq_id, src_cell.seq_id);
  1756. // swap tails (assuming they NEVER overlap)
  1757. for (const llama_seq_id seq_id : src_cell.seq_id) {
  1758. cells[seq_id].tail = src_id;
  1759. }
  1760. for (const llama_seq_id seq_id : dst_cell.seq_id) {
  1761. cells[seq_id].tail = dst_id;
  1762. }
  1763. }
  1764. }
  1765. // update the pos of the used seqs
  1766. for (uint32_t s = 0; s < n_seqs; ++s) {
  1767. const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
  1768. int32_t cell_id = s + min;
  1769. kv_cell & cell = cells[cell_id];
  1770. if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
  1771. // What should happen when the pos backtracks or skips a value?
  1772. // Clearing the state mid-batch would require special-casing which isn't done.
  1773. LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
  1774. __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
  1775. }
  1776. cell.pos = last_pos;
  1777. cell.seq_id.clear();
  1778. for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
  1779. const llama_seq_id seq_id = ubatch.seq_id[s][j];
  1780. cell.seq_id.insert(seq_id);
  1781. cells[seq_id].tail = cell_id;
  1782. }
  1783. }
  1784. // allow getting the range of used cells, from head to head + n
  1785. head = min;
  1786. n = max - min + 1;
  1787. used = std::count_if(cells.begin(), cells.end(),
  1788. [](const kv_cell & cell){ return !cell.is_empty(); });
  1789. // sanity check
  1790. return n >= n_seqs;
  1791. }
  1792. bool llama_kv_cache_recurrent::get_can_shift() const {
  1793. return false;
  1794. }
  1795. int32_t llama_kv_cache_recurrent::s_copy(int i) const {
  1796. const uint32_t cell_id = i + head;
  1797. //////////////////////////////////////////////
  1798. // TODO: this should not mutate the KV cache !
  1799. kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
  1800. // prevent out-of-bound sources
  1801. if (cell.src < 0 || (uint32_t) cell.src >= size) {
  1802. cell.src = cell_id;
  1803. }
  1804. int32_t res = cell.src;
  1805. // TODO: do not mutate the KV cache
  1806. // ensure copy only happens once
  1807. if (cell.src != (int32_t) cell_id) {
  1808. cell.src = cell_id;
  1809. }
  1810. return res;
  1811. }
  1812. float llama_kv_cache_recurrent::s_mask(int i) const {
  1813. const uint32_t cell_id = i + head;
  1814. //////////////////////////////////////////////
  1815. // TODO: this should not mutate the KV cache !
  1816. kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
  1817. float res = (float) (cell.src >= 0);
  1818. // only clear once
  1819. if (cell.src < 0) {
  1820. cell.src = cell_id;
  1821. }
  1822. return res;
  1823. }
  1824. uint32_t llama_kv_cache_recurrent::cell_max() const {
  1825. for (uint32_t i = size; i > 0; --i) {
  1826. const kv_cell & cell = cells[i - 1];
  1827. if (cell.pos >= 0 && !cell.is_empty()) {
  1828. return i;
  1829. }
  1830. }
  1831. return 0;
  1832. }
  1833. size_t llama_kv_cache_recurrent::total_size() const {
  1834. size_t size = 0;
  1835. for (const auto & buf : bufs) {
  1836. size += ggml_backend_buffer_get_size(buf.get());
  1837. }
  1838. return size;
  1839. }
  1840. size_t llama_kv_cache_recurrent::size_k_bytes() const {
  1841. size_t size_k_bytes = 0;
  1842. for (const auto & k : k_l) {
  1843. size_k_bytes += ggml_nbytes(k);
  1844. }
  1845. return size_k_bytes;
  1846. }
  1847. size_t llama_kv_cache_recurrent::size_v_bytes() const {
  1848. size_t size_v_bytes = 0;
  1849. for (const auto & v : v_l) {
  1850. size_v_bytes += ggml_nbytes(v);
  1851. }
  1852. return size_v_bytes;
  1853. }
  1854. void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
  1855. std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
  1856. uint32_t cell_count = 0;
  1857. // Count the number of cells with the specified seq_id
  1858. // Find all the ranges of cells with this seq id (or all, when -1)
  1859. uint32_t cell_range_begin = size;
  1860. for (uint32_t i = 0; i < size; ++i) {
  1861. const auto & cell = cells[i];
  1862. if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
  1863. ++cell_count;
  1864. if (cell_range_begin == size) {
  1865. cell_range_begin = i;
  1866. }
  1867. } else {
  1868. if (cell_range_begin != size) {
  1869. cell_ranges.emplace_back(cell_range_begin, i);
  1870. cell_range_begin = size;
  1871. }
  1872. }
  1873. }
  1874. if (cell_range_begin != size) {
  1875. cell_ranges.emplace_back(cell_range_begin, size);
  1876. }
  1877. // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
  1878. uint32_t cell_count_check = 0;
  1879. for (const auto & range : cell_ranges) {
  1880. cell_count_check += range.second - range.first;
  1881. }
  1882. GGML_ASSERT(cell_count == cell_count_check);
  1883. io.write(&cell_count, sizeof(cell_count));
  1884. state_write_meta(io, cell_ranges, seq_id);
  1885. state_write_data(io, cell_ranges);
  1886. }
  1887. void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
  1888. uint32_t cell_count;
  1889. io.read_to(&cell_count, sizeof(cell_count));
  1890. bool res = true;
  1891. res = res && state_read_meta(io, cell_count, seq_id);
  1892. res = res && state_read_data(io, cell_count);
  1893. if (!res) {
  1894. if (seq_id == -1) {
  1895. clear();
  1896. } else {
  1897. seq_rm(seq_id, -1, -1);
  1898. }
  1899. throw std::runtime_error("failed to restore kv cache");
  1900. }
  1901. }
  1902. void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
  1903. for (const auto & range : cell_ranges) {
  1904. for (uint32_t i = range.first; i < range.second; ++i) {
  1905. const auto & cell = cells[i];
  1906. const llama_pos pos = cell.pos;
  1907. const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
  1908. io.write(&pos, sizeof(pos));
  1909. io.write(&n_seq_id, sizeof(n_seq_id));
  1910. if (n_seq_id) {
  1911. for (auto seq_id : cell.seq_id) {
  1912. io.write(&seq_id, sizeof(seq_id));
  1913. }
  1914. }
  1915. }
  1916. }
  1917. }
  1918. void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
  1919. const uint32_t v_trans = 0;
  1920. const uint32_t n_layer = hparams.n_layer;
  1921. io.write(&v_trans, sizeof(v_trans));
  1922. io.write(&n_layer, sizeof(n_layer));
  1923. std::vector<uint8_t> tmp_buf;
  1924. // Iterate and write all the keys first, each row is a cell
  1925. // Get whole range at a time
  1926. for (uint32_t il = 0; il < n_layer; ++il) {
  1927. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
  1928. // Write key type
  1929. const int32_t k_type_i = (int32_t)k_l[il]->type;
  1930. io.write(&k_type_i, sizeof(k_type_i));
  1931. // Write row size of key
  1932. const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
  1933. io.write(&k_size_row, sizeof(k_size_row));
  1934. // Read each range of cells of k_size length each into tmp_buf and write out
  1935. for (const auto & range : cell_ranges) {
  1936. const size_t range_size = range.second - range.first;
  1937. const size_t buf_size = range_size * k_size_row;
  1938. io.write_tensor(k_l[il], range.first * k_size_row, buf_size);
  1939. }
  1940. }
  1941. if (!v_trans) {
  1942. for (uint32_t il = 0; il < n_layer; ++il) {
  1943. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  1944. // Write value type
  1945. const int32_t v_type_i = (int32_t)v_l[il]->type;
  1946. io.write(&v_type_i, sizeof(v_type_i));
  1947. // Write row size of value
  1948. const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
  1949. io.write(&v_size_row, sizeof(v_size_row));
  1950. // Read each range of cells of v_size length each into tmp_buf and write out
  1951. for (const auto & range : cell_ranges) {
  1952. const size_t range_size = range.second - range.first;
  1953. const size_t buf_size = range_size * v_size_row;
  1954. io.write_tensor(v_l[il], range.first * v_size_row, buf_size);
  1955. }
  1956. }
  1957. } else {
  1958. // When v is transposed, we also need the element size and get the element ranges from each row
  1959. const uint32_t kv_size = size;
  1960. for (uint32_t il = 0; il < n_layer; ++il) {
  1961. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  1962. // Write value type
  1963. const int32_t v_type_i = (int32_t)v_l[il]->type;
  1964. io.write(&v_type_i, sizeof(v_type_i));
  1965. // Write element size
  1966. const uint32_t v_size_el = ggml_type_size(v_l[il]->type);
  1967. io.write(&v_size_el, sizeof(v_size_el));
  1968. // Write GQA embedding size
  1969. io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
  1970. // For each row, we get the element values of each cell
  1971. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  1972. // Read each range of cells of v_size_el length each into tmp_buf and write out
  1973. for (const auto & range : cell_ranges) {
  1974. const size_t range_size = range.second - range.first;
  1975. const size_t src_offset = (range.first + j * kv_size) * v_size_el;
  1976. const size_t buf_size = range_size * v_size_el;
  1977. io.write_tensor(v_l[il], src_offset, buf_size);
  1978. }
  1979. }
  1980. }
  1981. }
  1982. }
  1983. bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
  1984. if (dest_seq_id != -1) {
  1985. // single sequence
  1986. seq_rm(dest_seq_id, -1, -1);
  1987. llama_sbatch sbatch;
  1988. llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
  1989. batch.n_tokens = cell_count;
  1990. batch.n_seq_tokens = cell_count;
  1991. batch.n_seqs = 1;
  1992. for (uint32_t i = 0; i < cell_count; ++i) {
  1993. llama_pos pos;
  1994. uint32_t n_seq_id;
  1995. io.read_to(&pos, sizeof(pos));
  1996. io.read_to(&n_seq_id, sizeof(n_seq_id));
  1997. if (n_seq_id != 0) {
  1998. LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
  1999. return false;
  2000. }
  2001. batch.pos[i] = pos;
  2002. }
  2003. batch.n_seq_id[0] = 1;
  2004. batch.seq_id[0] = &dest_seq_id;
  2005. if (!find_slot(batch)) {
  2006. LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
  2007. return false;
  2008. }
  2009. commit();
  2010. // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
  2011. // Assume that this is one contiguous block of cells
  2012. GGML_ASSERT(head + cell_count <= size);
  2013. GGML_ASSERT(cells[head].pos == batch.pos[0]);
  2014. GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]);
  2015. GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
  2016. GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
  2017. } else {
  2018. // whole KV cache restore
  2019. if (cell_count > size) {
  2020. LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
  2021. return false;
  2022. }
  2023. clear();
  2024. for (uint32_t i = 0; i < cell_count; ++i) {
  2025. kv_cell & cell = cells[i];
  2026. llama_pos pos;
  2027. uint32_t n_seq_id;
  2028. io.read_to(&pos, sizeof(pos));
  2029. io.read_to(&n_seq_id, sizeof(n_seq_id));
  2030. cell.pos = pos;
  2031. for (uint32_t j = 0; j < n_seq_id; ++j) {
  2032. llama_seq_id seq_id;
  2033. io.read_to(&seq_id, sizeof(seq_id));
  2034. // TODO: llama_kv_cache_recurrent should have a notion of max sequences
  2035. //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
  2036. if (seq_id < 0) {
  2037. //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
  2038. LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
  2039. return false;
  2040. }
  2041. cell.seq_id.insert(seq_id);
  2042. int32_t & tail = cells[seq_id].tail;
  2043. if (tail != -1) {
  2044. LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
  2045. return false;
  2046. }
  2047. tail = i;
  2048. }
  2049. }
  2050. head = 0;
  2051. used = cell_count;
  2052. }
  2053. for (uint32_t i = 0; i < cell_count; ++i) {
  2054. uint32_t cell_id = head + i;
  2055. // make sure the recurrent states will keep their restored state
  2056. cells[cell_id].src = cell_id;
  2057. }
  2058. return true;
  2059. }
  2060. bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
  2061. uint32_t v_trans;
  2062. uint32_t n_layer;
  2063. io.read_to(&v_trans, sizeof(v_trans));
  2064. io.read_to(&n_layer, sizeof(n_layer));
  2065. if (n_layer != hparams.n_layer) {
  2066. LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
  2067. return false;
  2068. }
  2069. if (cell_count > size) {
  2070. LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
  2071. return false;
  2072. }
  2073. if (false != (bool) v_trans) {
  2074. LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
  2075. return false;
  2076. }
  2077. // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
  2078. for (uint32_t il = 0; il < n_layer; ++il) {
  2079. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
  2080. // Read type of key
  2081. int32_t k_type_i_ref;
  2082. io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
  2083. const int32_t k_type_i = (int32_t) k_l[il]->type;
  2084. if (k_type_i != k_type_i_ref) {
  2085. LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
  2086. return false;
  2087. }
  2088. // Read row size of key
  2089. uint64_t k_size_row_ref;
  2090. io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
  2091. const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
  2092. if (k_size_row != k_size_row_ref) {
  2093. LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
  2094. return false;
  2095. }
  2096. if (cell_count) {
  2097. // Read and set the keys for the whole cell range
  2098. ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
  2099. }
  2100. }
  2101. if (!v_trans) {
  2102. for (uint32_t il = 0; il < n_layer; ++il) {
  2103. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  2104. // Read type of value
  2105. int32_t v_type_i_ref;
  2106. io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
  2107. const int32_t v_type_i = (int32_t)v_l[il]->type;
  2108. if (v_type_i != v_type_i_ref) {
  2109. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  2110. return false;
  2111. }
  2112. // Read row size of value
  2113. uint64_t v_size_row_ref;
  2114. io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
  2115. const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
  2116. if (v_size_row != v_size_row_ref) {
  2117. LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
  2118. return false;
  2119. }
  2120. if (cell_count) {
  2121. // Read and set the values for the whole cell range
  2122. ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
  2123. }
  2124. }
  2125. } else {
  2126. // For each layer, read the values for each cell (transposed)
  2127. for (uint32_t il = 0; il < n_layer; ++il) {
  2128. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
  2129. // Read type of value
  2130. int32_t v_type_i_ref;
  2131. io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
  2132. const int32_t v_type_i = (int32_t)v_l[il]->type;
  2133. if (v_type_i != v_type_i_ref) {
  2134. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  2135. return false;
  2136. }
  2137. // Read element size of value
  2138. uint32_t v_size_el_ref;
  2139. io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
  2140. const size_t v_size_el = ggml_type_size(v_l[il]->type);
  2141. if (v_size_el != v_size_el_ref) {
  2142. LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
  2143. return false;
  2144. }
  2145. // Read GQA embedding size
  2146. uint32_t n_embd_v_gqa_ref;
  2147. io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
  2148. if (n_embd_v_gqa != n_embd_v_gqa_ref) {
  2149. LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
  2150. return false;
  2151. }
  2152. if (cell_count) {
  2153. // For each row in the transposed matrix, read the values for the whole cell range
  2154. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  2155. const size_t dst_offset = (head + j * size) * v_size_el;
  2156. ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
  2157. }
  2158. }
  2159. }
  2160. }
  2161. return true;
  2162. }