llama-graph.cpp 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948
  1. #include "llama-graph.h"
  2. #include "llama-impl.h"
  3. #include "llama-batch.h"
  4. #include "llama-cparams.h"
  5. #include "llama-kv-cache.h"
  6. #include "llama-kv-cache-iswa.h"
  7. #include "llama-memory-hybrid.h"
  8. #include "llama-memory-recurrent.h"
  9. #include <cassert>
  10. #include <cmath>
  11. #include <cstring>
  12. void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) {
  13. if (ubatch->token) {
  14. const int64_t n_tokens = ubatch->n_tokens;
  15. ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens));
  16. }
  17. if (ubatch->embd) {
  18. const int64_t n_embd = embd->ne[0];
  19. const int64_t n_tokens = ubatch->n_tokens;
  20. ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd));
  21. }
  22. }
  23. bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) {
  24. bool res = true;
  25. res &= (!tokens && !params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens);
  26. res &= (!embd && !params.ubatch.embd) || (embd && embd->ne[0] == params.ubatch.n_tokens);
  27. return res;
  28. }
  29. void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) {
  30. if (ubatch->pos && pos) {
  31. const int64_t n_tokens = ubatch->n_tokens;
  32. if (ubatch->token && n_pos_per_embd == 4) {
  33. // in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
  34. // the 3 first dims are the same, and 4th dim is all 0
  35. std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd);
  36. // copy the first dimension
  37. for (int i = 0; i < n_tokens; ++i) {
  38. pos_data[ i] = ubatch->pos[i];
  39. pos_data[ n_tokens + i] = ubatch->pos[i];
  40. pos_data[2 * n_tokens + i] = ubatch->pos[i];
  41. pos_data[3 * n_tokens + i] = 0; // 4th dim is 0
  42. }
  43. ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
  44. } else {
  45. ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos));
  46. }
  47. }
  48. }
  49. bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) {
  50. bool res = true;
  51. res &= pos->ne[0] == params.ubatch.n_tokens;
  52. return res;
  53. }
  54. void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) {
  55. if (ubatch->pos && attn_scale) {
  56. const int64_t n_tokens = ubatch->n_tokens;
  57. std::vector<float> attn_scale_data(n_tokens, 0.0f);
  58. for (int i = 0; i < n_tokens; ++i) {
  59. const float pos = ubatch->pos[i];
  60. attn_scale_data[i] = std::log(
  61. std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0
  62. ) * f_attn_temp_scale + 1.0;
  63. }
  64. ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale));
  65. }
  66. }
  67. void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) {
  68. if (pos_bucket) {
  69. const int64_t n_tokens = ubatch->n_tokens;
  70. GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer));
  71. GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
  72. int32_t * data = (int32_t *) pos_bucket->data;
  73. for (int h = 0; h < 1; ++h) {
  74. for (int j = 0; j < n_tokens; ++j) {
  75. for (int i = 0; i < n_tokens; ++i) {
  76. data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true);
  77. }
  78. }
  79. }
  80. }
  81. }
  82. void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
  83. if (pos_bucket) {
  84. mctx->set_input_pos_bucket(pos_bucket, ubatch);
  85. }
  86. }
  87. void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
  88. GGML_ASSERT(out_ids);
  89. const int64_t n_tokens = ubatch->n_tokens;
  90. GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
  91. int32_t * data = (int32_t *) out_ids->data;
  92. if (n_outputs == n_tokens) {
  93. for (int i = 0; i < n_tokens; ++i) {
  94. data[i] = i;
  95. }
  96. return;
  97. }
  98. GGML_ASSERT(ubatch->output);
  99. int n_outputs = 0;
  100. for (int i = 0; i < n_tokens; ++i) {
  101. if (ubatch->output[i]) {
  102. data[n_outputs++] = i;
  103. }
  104. }
  105. }
  106. bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) {
  107. bool res = true;
  108. res &= n_outputs == params.n_outputs;
  109. return res;
  110. }
  111. void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
  112. if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
  113. const int64_t n_tokens = ubatch->n_tokens;
  114. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  115. const int64_t n_seqs_unq = ubatch->n_seqs_unq;
  116. GGML_ASSERT(mean);
  117. GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
  118. float * data = (float *) mean->data;
  119. memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
  120. std::vector<uint64_t> sums(n_seqs_unq, 0);
  121. for (int i = 0; i < n_tokens; i += n_seq_tokens) {
  122. for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
  123. const llama_seq_id seq_id = ubatch->seq_id[i][s];
  124. const int32_t seq_idx = ubatch->seq_idx[seq_id];
  125. sums[seq_idx] += ubatch->n_seq_tokens;
  126. }
  127. }
  128. std::vector<float> div(n_seqs_unq, 0.0f);
  129. for (int s = 0; s < n_seqs_unq; ++s) {
  130. const uint64_t sum = sums[s];
  131. if (sum > 0) {
  132. div[s] = 1.0f/float(sum);
  133. }
  134. }
  135. for (int i = 0; i < n_tokens; i += n_seq_tokens) {
  136. for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
  137. const llama_seq_id seq_id = ubatch->seq_id[i][s];
  138. const int32_t seq_idx = ubatch->seq_idx[seq_id];
  139. for (int j = 0; j < n_seq_tokens; ++j) {
  140. data[seq_idx*n_tokens + i + j] = div[seq_idx];
  141. }
  142. }
  143. }
  144. }
  145. }
  146. void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
  147. const int64_t n_tokens = ubatch->n_tokens;
  148. const int64_t n_seqs_unq = ubatch->n_seqs_unq;
  149. if (cparams.embeddings && (
  150. cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
  151. cparams.pooling_type == LLAMA_POOLING_TYPE_RANK ||
  152. cparams.pooling_type == LLAMA_POOLING_TYPE_LAST
  153. )) {
  154. GGML_ASSERT(cls);
  155. GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
  156. uint32_t * data = (uint32_t *) cls->data;
  157. memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
  158. std::vector<int> target_pos(n_seqs_unq, -1);
  159. std::vector<int> target_row(n_seqs_unq, -1);
  160. const bool last = (
  161. cparams.pooling_type == LLAMA_POOLING_TYPE_LAST ||
  162. (cparams.pooling_type == LLAMA_POOLING_TYPE_RANK && arch == LLM_ARCH_QWEN3) // qwen3 reranking & embedding models use last token
  163. );
  164. for (int i = 0; i < n_tokens; ++i) {
  165. const llama_pos pos = ubatch->pos[i];
  166. for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
  167. const llama_seq_id seq_id = ubatch->seq_id[i][s];
  168. const int32_t seq_idx = ubatch->seq_idx[seq_id];
  169. if (
  170. (target_pos[seq_idx] == -1) ||
  171. ( last && pos >= target_pos[seq_idx]) ||
  172. (!last && pos < target_pos[seq_idx])
  173. ) {
  174. target_pos[seq_idx] = pos;
  175. target_row[seq_idx] = i;
  176. }
  177. }
  178. }
  179. for (int s = 0; s < n_seqs_unq; ++s) {
  180. if (target_row[s] >= 0) {
  181. data[s] = target_row[s];
  182. }
  183. }
  184. }
  185. }
  186. void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
  187. GGML_UNUSED(ubatch);
  188. const int64_t n_rs = mctx->get_n_rs();
  189. if (s_copy) {
  190. GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
  191. int32_t * data = (int32_t *) s_copy->data;
  192. // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
  193. for (uint32_t i = 0; i < n_rs; ++i) {
  194. data[i] = mctx->s_copy(i);
  195. }
  196. }
  197. }
  198. void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
  199. GGML_UNUSED(ubatch);
  200. if (cross_embd && !cross->v_embd.empty()) {
  201. assert(cross_embd->type == GGML_TYPE_F32);
  202. ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd));
  203. }
  204. }
  205. static void print_mask(float * data, int64_t n_tokens, int64_t n_kv, int64_t n_swa, llama_swa_type swa_type) {
  206. LLAMA_LOG_DEBUG("%s: === Attention mask ===\n", __func__);
  207. const char * swa_type_str = (swa_type == LLAMA_SWA_TYPE_NONE) ? "LLAMA_SWA_TYPE_NONE" :
  208. (swa_type == LLAMA_SWA_TYPE_STANDARD) ? "LLAMA_SWA_TYPE_STANDARD" :
  209. (swa_type == LLAMA_SWA_TYPE_CHUNKED) ? "LLAMA_SWA_TYPE_CHUNKED" :
  210. (swa_type == LLAMA_SWA_TYPE_SYMMETRIC) ? "LLAMA_SWA_TYPE_SYMMETRIC" : "unknown";
  211. LLAMA_LOG_DEBUG("%s: n_swa : %d, n_kv: %d, swq_type: %s\n", __func__, (int)n_swa, (int)n_kv, swa_type_str);
  212. LLAMA_LOG_DEBUG("%s: '0' = can attend, '∞' = masked\n", __func__);
  213. LLAMA_LOG_DEBUG("%s: Rows = query tokens, Columns = key/value tokens\n\n", __func__);
  214. LLAMA_LOG_DEBUG(" ");
  215. for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) {
  216. LLAMA_LOG_DEBUG("%2d", j);
  217. }
  218. LLAMA_LOG_DEBUG("\n");
  219. for (int i = 0; i < std::min((int64_t)20, n_tokens); ++i) {
  220. LLAMA_LOG_DEBUG(" %2d ", i);
  221. for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) {
  222. float val = data[i * n_kv + j];
  223. if (val == -INFINITY) {
  224. LLAMA_LOG_DEBUG(" ∞");
  225. } else {
  226. LLAMA_LOG_DEBUG(" 0");
  227. }
  228. }
  229. LLAMA_LOG_DEBUG("\n");
  230. }
  231. }
  232. void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
  233. const int64_t n_kv = ubatch->n_tokens;
  234. const int64_t n_tokens = ubatch->n_tokens;
  235. GGML_ASSERT(kq_mask);
  236. GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
  237. float * data = (float *) kq_mask->data;
  238. // [TAG_NO_CACHE_ISWA]
  239. GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "TODO: implement");
  240. for (int h = 0; h < 1; ++h) {
  241. for (int i1 = 0; i1 < n_tokens; ++i1) {
  242. const llama_seq_id s1 = ubatch->seq_id[i1][0];
  243. for (int i0 = 0; i0 < n_tokens; ++i0) {
  244. float f = -INFINITY;
  245. for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) {
  246. const llama_seq_id s0 = ubatch->seq_id[i0][0];
  247. if (s0 != s1) {
  248. continue; // skip different sequences
  249. }
  250. if (cparams.causal_attn && ubatch->pos[i0] > ubatch->pos[i1]) {
  251. continue; // skip future tokens for causal attention
  252. }
  253. // TODO: this does not take into account that some layers are SWA and others are note (i.e. iSWA) [TAG_NO_CACHE_ISWA]
  254. //if (hparams.is_masked_swa(ubatch->pos[i0], ubatch->pos[i1])) {
  255. // continue; // skip masked tokens for SWA
  256. //}
  257. // TODO: reimplement this like in llama_kv_cache_unified
  258. if (hparams.use_alibi) {
  259. f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]);
  260. } else {
  261. f = 0.0f;
  262. }
  263. }
  264. data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f;
  265. }
  266. }
  267. }
  268. if (debug) {
  269. print_mask(data, n_tokens, n_kv, hparams.n_swa, hparams.swa_type);
  270. }
  271. }
  272. void llm_graph_input_attn_kv::set_input(const llama_ubatch * ubatch) {
  273. mctx->set_input_k_idxs(self_k_idxs, ubatch);
  274. mctx->set_input_v_idxs(self_v_idxs, ubatch);
  275. mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
  276. }
  277. bool llm_graph_input_attn_kv::can_reuse(const llm_graph_params & params) {
  278. const auto * mctx = static_cast<const llama_kv_cache_context *>(params.mctx);
  279. this->mctx = mctx;
  280. bool res = true;
  281. res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
  282. //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
  283. res &= self_kq_mask->ne[0] == mctx->get_n_kv();
  284. res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
  285. return res;
  286. }
  287. void llm_graph_input_attn_kv_iswa::set_input(const llama_ubatch * ubatch) {
  288. mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch);
  289. mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch);
  290. mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
  291. mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch);
  292. mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch);
  293. mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn);
  294. }
  295. bool llm_graph_input_attn_kv_iswa::can_reuse(const llm_graph_params & params) {
  296. const auto * mctx = static_cast<const llama_kv_cache_iswa_context *>(params.mctx);
  297. this->mctx = mctx;
  298. bool res = true;
  299. res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
  300. //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
  301. res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens;
  302. //res &= self_v_idxs_swa->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
  303. res &= self_kq_mask->ne[0] == mctx->get_base()->get_n_kv();
  304. res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
  305. res &= self_kq_mask_swa->ne[0] == mctx->get_swa()->get_n_kv();
  306. res &= self_kq_mask_swa->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
  307. return res;
  308. }
  309. void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
  310. GGML_ASSERT(cross_kq_mask);
  311. const int64_t n_enc = cross_kq_mask->ne[0];
  312. const int64_t n_tokens = ubatch->n_tokens;
  313. GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
  314. GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
  315. float * data = (float *) cross_kq_mask->data;
  316. for (int h = 0; h < 1; ++h) {
  317. for (int i = 0; i < n_tokens; ++i) {
  318. for (int j = 0; j < n_enc; ++j) {
  319. float f = -INFINITY;
  320. for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
  321. const llama_seq_id seq_id = ubatch->seq_id[i][s];
  322. if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
  323. f = 0.0f;
  324. }
  325. }
  326. data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
  327. }
  328. }
  329. for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
  330. for (int j = 0; j < n_enc; ++j) {
  331. data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
  332. }
  333. }
  334. }
  335. }
  336. void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
  337. inp_attn->set_input(ubatch);
  338. inp_rs->set_input(ubatch);
  339. }
  340. //
  341. // llm_graph_result
  342. //
  343. llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) {
  344. reset();
  345. const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG");
  346. debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0;
  347. }
  348. int64_t llm_graph_result::get_max_nodes() const {
  349. return max_nodes;
  350. }
  351. void llm_graph_result::reset() {
  352. t_tokens = nullptr;
  353. t_logits = nullptr;
  354. t_embd = nullptr;
  355. t_embd_pooled = nullptr;
  356. params = {};
  357. inputs.clear();
  358. buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
  359. ggml_init_params params = {
  360. /*.mem_size =*/ buf_compute_meta.size(),
  361. /*.mem_buffer =*/ buf_compute_meta.data(),
  362. /*.no_alloc =*/ true,
  363. };
  364. ctx_compute.reset(ggml_init(params));
  365. gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false);
  366. }
  367. void llm_graph_result::set_inputs(const llama_ubatch * ubatch) {
  368. for (auto & input : inputs) {
  369. input->set_input(ubatch);
  370. }
  371. }
  372. bool llm_graph_result::can_reuse(const llm_graph_params & params) {
  373. if (!this->params.allow_reuse(params)) {
  374. if (debug > 1) {
  375. LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__);
  376. }
  377. return false;
  378. }
  379. if (debug > 1) {
  380. LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size());
  381. }
  382. bool res = true;
  383. for (auto & input : inputs) {
  384. const bool cur = input->can_reuse(params);
  385. if (debug > 1) {
  386. LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur);
  387. }
  388. res = res && cur;
  389. }
  390. if (debug > 0) {
  391. LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res);
  392. }
  393. return res;
  394. }
  395. llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) {
  396. inputs.emplace_back(std::move(input));
  397. return inputs.back().get();
  398. }
  399. void llm_graph_result::set_params(const llm_graph_params & params) {
  400. this->params = params;
  401. }
  402. //
  403. // llm_graph_context
  404. //
  405. llm_graph_context::llm_graph_context(const llm_graph_params & params) :
  406. arch (params.arch),
  407. hparams (params.hparams),
  408. cparams (params.cparams),
  409. ubatch (params.ubatch),
  410. n_embd (hparams.n_embd),
  411. n_layer (hparams.n_layer),
  412. n_rot (hparams.n_rot),
  413. n_ctx (cparams.n_ctx),
  414. n_head (hparams.n_head()),
  415. n_head_kv (hparams.n_head_kv()),
  416. n_embd_head_k (hparams.n_embd_head_k),
  417. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  418. n_embd_head_v (hparams.n_embd_head_v),
  419. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  420. n_expert (hparams.n_expert),
  421. n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used),
  422. freq_base (cparams.rope_freq_base),
  423. freq_scale (cparams.rope_freq_scale),
  424. ext_factor (cparams.yarn_ext_factor),
  425. attn_factor (cparams.yarn_attn_factor),
  426. beta_fast (cparams.yarn_beta_fast),
  427. beta_slow (cparams.yarn_beta_slow),
  428. norm_eps (hparams.f_norm_eps),
  429. norm_rms_eps (hparams.f_norm_rms_eps),
  430. n_tokens (ubatch.n_tokens),
  431. n_outputs (params.n_outputs),
  432. n_ctx_orig (cparams.n_ctx_orig_yarn),
  433. pooling_type (cparams.pooling_type),
  434. rope_type (hparams.rope_type),
  435. sched (params.sched),
  436. backend_cpu (params.backend_cpu),
  437. cvec (params.cvec),
  438. loras (params.loras),
  439. mctx (params.mctx),
  440. cross (params.cross),
  441. cb_func (params.cb),
  442. res (params.res),
  443. ctx0 (res->get_ctx()),
  444. gf (res->get_gf()) {
  445. res->set_params(params);
  446. }
  447. void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
  448. if (cb_func) {
  449. cb_func(ubatch, cur, name, il);
  450. }
  451. }
  452. ggml_tensor * llm_graph_context::build_cvec(
  453. ggml_tensor * cur,
  454. int il) const {
  455. return cvec->apply_to(ctx0, cur, il);
  456. }
  457. ggml_tensor * llm_graph_context::build_lora_mm(
  458. ggml_tensor * w,
  459. ggml_tensor * cur) const {
  460. ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
  461. for (const auto & lora : *loras) {
  462. llama_adapter_lora_weight * lw = lora.first->get_weight(w);
  463. if (lw == nullptr) {
  464. continue;
  465. }
  466. const float adapter_scale = lora.second;
  467. const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
  468. ggml_tensor * ab_cur = ggml_mul_mat(
  469. ctx0, lw->b,
  470. ggml_mul_mat(ctx0, lw->a, cur)
  471. );
  472. ab_cur = ggml_scale(ctx0, ab_cur, scale);
  473. res = ggml_add(ctx0, res, ab_cur);
  474. }
  475. return res;
  476. }
  477. ggml_tensor * llm_graph_context::build_lora_mm_id(
  478. ggml_tensor * w, // ggml_tensor * as
  479. ggml_tensor * cur, // ggml_tensor * b
  480. ggml_tensor * ids) const {
  481. ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
  482. for (const auto & lora : *loras) {
  483. llama_adapter_lora_weight * lw = lora.first->get_weight(w);
  484. if (lw == nullptr) {
  485. continue;
  486. }
  487. const float alpha = lora.first->alpha;
  488. const float rank = (float) lw->b->ne[0];
  489. const float scale = alpha ? lora.second * alpha / rank : lora.second;
  490. ggml_tensor * ab_cur = ggml_mul_mat_id(
  491. ctx0, lw->b,
  492. ggml_mul_mat_id(ctx0, lw->a, cur, ids),
  493. ids
  494. );
  495. ab_cur = ggml_scale(ctx0, ab_cur, scale);
  496. res = ggml_add(ctx0, res, ab_cur);
  497. }
  498. return res;
  499. }
  500. ggml_tensor * llm_graph_context::build_norm(
  501. ggml_tensor * cur,
  502. ggml_tensor * mw,
  503. ggml_tensor * mb,
  504. llm_norm_type type,
  505. int il) const {
  506. switch (type) {
  507. case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break;
  508. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break;
  509. case LLM_NORM_GROUP:
  510. {
  511. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]);
  512. cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps);
  513. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]);
  514. } break;
  515. }
  516. if (mw || mb) {
  517. cb(cur, "norm", il);
  518. }
  519. if (mw) {
  520. cur = ggml_mul(ctx0, cur, mw);
  521. if (mb) {
  522. cb(cur, "norm_w", il);
  523. }
  524. }
  525. if (mb) {
  526. cur = ggml_add(ctx0, cur, mb);
  527. }
  528. return cur;
  529. }
  530. ggml_tensor * llm_graph_context::build_ffn(
  531. ggml_tensor * cur,
  532. ggml_tensor * up,
  533. ggml_tensor * up_b,
  534. ggml_tensor * up_s,
  535. ggml_tensor * gate,
  536. ggml_tensor * gate_b,
  537. ggml_tensor * gate_s,
  538. ggml_tensor * down,
  539. ggml_tensor * down_b,
  540. ggml_tensor * down_s,
  541. ggml_tensor * act_scales,
  542. llm_ffn_op_type type_op,
  543. llm_ffn_gate_type type_gate,
  544. int il) const {
  545. ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur;
  546. cb(tmp, "ffn_up", il);
  547. if (up_b) {
  548. tmp = ggml_add(ctx0, tmp, up_b);
  549. cb(tmp, "ffn_up_b", il);
  550. }
  551. if (up_s) {
  552. tmp = ggml_mul(ctx0, tmp, up_s);
  553. cb(tmp, "ffn_up_s", il);
  554. }
  555. if (gate) {
  556. switch (type_gate) {
  557. case LLM_FFN_SEQ:
  558. {
  559. cur = build_lora_mm(gate, tmp);
  560. cb(cur, "ffn_gate", il);
  561. } break;
  562. case LLM_FFN_PAR:
  563. {
  564. cur = build_lora_mm(gate, cur);
  565. cb(cur, "ffn_gate", il);
  566. } break;
  567. }
  568. if (gate_b) {
  569. cur = ggml_add(ctx0, cur, gate_b);
  570. cb(cur, "ffn_gate_b", il);
  571. }
  572. if (gate_s) {
  573. cur = ggml_mul(ctx0, cur, gate_s);
  574. cb(cur, "ffn_gate_s", il);
  575. }
  576. } else {
  577. cur = tmp;
  578. }
  579. switch (type_op) {
  580. case LLM_FFN_SILU:
  581. if (gate && type_gate == LLM_FFN_PAR) {
  582. cur = ggml_swiglu_split(ctx0, cur, tmp);
  583. cb(cur, "ffn_swiglu", il);
  584. type_gate = LLM_FFN_SEQ;
  585. } else {
  586. cur = ggml_silu(ctx0, cur);
  587. cb(cur, "ffn_silu", il);
  588. } break;
  589. case LLM_FFN_GELU:
  590. if (gate && type_gate == LLM_FFN_PAR) {
  591. cur = ggml_geglu_split(ctx0, cur, tmp);
  592. cb(cur, "ffn_geglu", il);
  593. type_gate = LLM_FFN_SEQ;
  594. } else {
  595. cur = ggml_gelu(ctx0, cur);
  596. cb(cur, "ffn_gelu", il);
  597. if (act_scales != NULL) {
  598. cur = ggml_div(ctx0, cur, act_scales);
  599. cb(cur, "ffn_act", il);
  600. }
  601. } break;
  602. case LLM_FFN_RELU:
  603. if (gate && type_gate == LLM_FFN_PAR) {
  604. cur = ggml_reglu_split(ctx0, cur, tmp);
  605. cb(cur, "ffn_reglu", il);
  606. type_gate = LLM_FFN_SEQ;
  607. } else {
  608. cur = ggml_relu(ctx0, cur);
  609. cb(cur, "ffn_relu", il);
  610. } break;
  611. case LLM_FFN_RELU_SQR:
  612. {
  613. cur = ggml_relu(ctx0, cur);
  614. cb(cur, "ffn_relu", il);
  615. cur = ggml_sqr(ctx0, cur);
  616. cb(cur, "ffn_sqr(relu)", il);
  617. } break;
  618. case LLM_FFN_SWIGLU:
  619. {
  620. cur = ggml_swiglu(ctx0, cur);
  621. cb(cur, "ffn_swiglu", il);
  622. } break;
  623. case LLM_FFN_GEGLU:
  624. {
  625. cur = ggml_geglu(ctx0, cur);
  626. cb(cur, "ffn_geglu", il);
  627. } break;
  628. case LLM_FFN_REGLU:
  629. {
  630. cur = ggml_reglu(ctx0, cur);
  631. cb(cur, "ffn_reglu", il);
  632. } break;
  633. default:
  634. GGML_ABORT("fatal error");
  635. }
  636. if (gate && type_gate == LLM_FFN_PAR) {
  637. cur = ggml_mul(ctx0, cur, tmp);
  638. cb(cur, "ffn_gate_par", il);
  639. }
  640. if (down) {
  641. cur = build_lora_mm(down, cur);
  642. if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
  643. // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
  644. ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
  645. }
  646. }
  647. if (down_b) {
  648. cb(cur, "ffn_down", il);
  649. }
  650. if (down_b) {
  651. cur = ggml_add(ctx0, cur, down_b);
  652. }
  653. if (down_s) {
  654. cur = ggml_mul(ctx0, cur, down_s);
  655. cb(cur, "ffn_down_s", il);
  656. }
  657. return cur;
  658. }
  659. ggml_tensor * llm_graph_context::build_moe_ffn(
  660. ggml_tensor * cur,
  661. ggml_tensor * gate_inp,
  662. ggml_tensor * up_exps,
  663. ggml_tensor * gate_exps,
  664. ggml_tensor * down_exps,
  665. ggml_tensor * exp_probs_b,
  666. int64_t n_expert,
  667. int64_t n_expert_used,
  668. llm_ffn_op_type type_op,
  669. bool norm_w,
  670. bool scale_w,
  671. float w_scale,
  672. llama_expert_gating_func_type gating_op,
  673. int il,
  674. ggml_tensor * probs_in) const {
  675. return build_moe_ffn(
  676. cur,
  677. gate_inp, /* gate_inp_b */ nullptr,
  678. up_exps, /* up_exps_b */ nullptr,
  679. gate_exps, /* gate_exps_b */ nullptr,
  680. down_exps, /* down_exps_b */ nullptr,
  681. exp_probs_b,
  682. n_expert,
  683. n_expert_used,
  684. type_op,
  685. norm_w,
  686. scale_w,
  687. w_scale,
  688. gating_op,
  689. il,
  690. probs_in
  691. );
  692. }
  693. ggml_tensor * llm_graph_context::build_moe_ffn(
  694. ggml_tensor * cur,
  695. ggml_tensor * gate_inp,
  696. ggml_tensor * gate_inp_b,
  697. ggml_tensor * up_exps,
  698. ggml_tensor * up_exps_b,
  699. ggml_tensor * gate_exps,
  700. ggml_tensor * gate_exps_b,
  701. ggml_tensor * down_exps,
  702. ggml_tensor * down_exps_b,
  703. ggml_tensor * exp_probs_b,
  704. int64_t n_expert,
  705. int64_t n_expert_used,
  706. llm_ffn_op_type type_op,
  707. bool norm_w,
  708. bool scale_w,
  709. float w_scale,
  710. llama_expert_gating_func_type gating_op,
  711. int il,
  712. ggml_tensor * probs_in) const {
  713. const int64_t n_embd = cur->ne[0];
  714. const int64_t n_tokens = cur->ne[1];
  715. const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN
  716. ggml_tensor * logits = nullptr;
  717. if (probs_in == nullptr) {
  718. logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens]
  719. cb(logits, "ffn_moe_logits", il);
  720. } else {
  721. logits = probs_in;
  722. }
  723. if (gate_inp_b) {
  724. logits = ggml_add(ctx0, logits, gate_inp_b);
  725. cb(logits, "ffn_moe_logits_biased", il);
  726. }
  727. ggml_tensor * probs = nullptr;
  728. switch (gating_op) {
  729. case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
  730. {
  731. probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens]
  732. } break;
  733. case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
  734. {
  735. probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
  736. } break;
  737. case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT:
  738. {
  739. probs = logits; // [n_expert, n_tokens]
  740. } break;
  741. default:
  742. GGML_ABORT("fatal error");
  743. }
  744. cb(probs, "ffn_moe_probs", il);
  745. // add experts selection bias - introduced in DeepSeek V3
  746. // leave probs unbiased as it's later used to get expert weights
  747. ggml_tensor * selection_probs = probs;
  748. if (exp_probs_b != nullptr) {
  749. selection_probs = ggml_add(ctx0, probs, exp_probs_b);
  750. cb(selection_probs, "ffn_moe_probs_biased", il);
  751. }
  752. // llama4 doesn't have exp_probs_b, and sigmoid is only used after top_k
  753. // see: https://github.com/meta-llama/llama-models/blob/699a02993512fb36936b1b0741e13c06790bcf98/models/llama4/moe.py#L183-L198
  754. if (arch == LLM_ARCH_LLAMA4) {
  755. selection_probs = logits;
  756. }
  757. // select experts
  758. ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
  759. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  760. cb(selected_experts, "ffn_moe_topk", il);
  761. ggml_tensor * weights = ggml_get_rows(ctx0,
  762. ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
  763. cb(weights, "ffn_moe_weights", il);
  764. if (gating_op == LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT) {
  765. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
  766. weights = ggml_soft_max(ctx0, weights); // [n_expert_used, n_tokens]
  767. weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
  768. cb(weights, "ffn_moe_weights_softmax", il);
  769. }
  770. if (norm_w) {
  771. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
  772. ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
  773. cb(weights_sum, "ffn_moe_weights_sum", il);
  774. weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
  775. cb(weights, "ffn_moe_weights_norm", il);
  776. weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
  777. }
  778. if (scale_w) {
  779. weights = ggml_scale(ctx0, weights, w_scale);
  780. cb(weights, "ffn_moe_weights_scaled", il);
  781. }
  782. cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
  783. if (weight_before_ffn) {
  784. // repeat cur to [n_embd, n_expert_used, n_tokens]
  785. ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1);
  786. cur = ggml_mul(ctx0, repeated, weights);
  787. cb(cur, "ffn_moe_weighted", il);
  788. }
  789. ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  790. cb(up, "ffn_moe_up", il);
  791. if (up_exps_b) {
  792. up = ggml_add_id(ctx0, up, up_exps_b, selected_experts);
  793. cb(up, "ffn_moe_up_biased", il);
  794. }
  795. ggml_tensor * experts = nullptr;
  796. if (gate_exps) {
  797. cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  798. cb(cur, "ffn_moe_gate", il);
  799. } else {
  800. cur = up;
  801. }
  802. if (gate_exps_b) {
  803. cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts);
  804. cb(cur, "ffn_moe_gate_biased", il);
  805. }
  806. switch (type_op) {
  807. case LLM_FFN_SILU:
  808. if (gate_exps) {
  809. cur = ggml_swiglu_split(ctx0, cur, up);
  810. cb(cur, "ffn_moe_swiglu", il);
  811. } else {
  812. cur = ggml_silu(ctx0, cur);
  813. cb(cur, "ffn_moe_silu", il);
  814. } break;
  815. case LLM_FFN_GELU:
  816. if (gate_exps) {
  817. cur = ggml_geglu_split(ctx0, cur, up);
  818. cb(cur, "ffn_moe_geglu", il);
  819. } else {
  820. cur = ggml_gelu(ctx0, cur);
  821. cb(cur, "ffn_moe_gelu", il);
  822. } break;
  823. case LLM_FFN_SWIGLU_OAI_MOE:
  824. {
  825. // TODO: move to hparams?
  826. constexpr float alpha = 1.702f;
  827. constexpr float limit = 7.0f;
  828. cur = ggml_swiglu_oai(ctx0, cur, up, alpha, limit);
  829. cb(cur, "ffn_moe_swiglu_oai", il);
  830. } break;
  831. case LLM_FFN_RELU:
  832. if (gate_exps) {
  833. cur = ggml_reglu_split(ctx0, cur, up);
  834. cb(cur, "ffn_moe_reglu", il);
  835. } else {
  836. cur = ggml_relu(ctx0, cur);
  837. cb(cur, "ffn_moe_relu", il);
  838. } break;
  839. default:
  840. GGML_ABORT("fatal error");
  841. }
  842. experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens]
  843. cb(experts, "ffn_moe_down", il);
  844. if (down_exps_b) {
  845. experts = ggml_add_id(ctx0, experts, down_exps_b, selected_experts);
  846. cb(experts, "ffn_moe_down_biased", il);
  847. }
  848. if (!weight_before_ffn) {
  849. experts = ggml_mul(ctx0, experts, weights);
  850. cb(cur, "ffn_moe_weighted", il);
  851. }
  852. ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr };
  853. assert(n_expert_used > 0);
  854. // order the views before the adds
  855. for (uint32_t i = 0; i < hparams.n_expert_used; ++i) {
  856. cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]);
  857. ggml_build_forward_expand(gf, cur_experts[i]);
  858. }
  859. // aggregate experts
  860. // note: here we explicitly use hparams.n_expert_used instead of n_expert_used
  861. // to avoid potentially a large number of add nodes during warmup
  862. // ref: https://github.com/ggml-org/llama.cpp/pull/14753
  863. ggml_tensor * moe_out = cur_experts[0];
  864. for (uint32_t i = 1; i < hparams.n_expert_used; ++i) {
  865. moe_out = ggml_add(ctx0, moe_out, cur_experts[i]);
  866. }
  867. if (hparams.n_expert_used == 1) {
  868. // avoid returning a non-contiguous tensor
  869. moe_out = ggml_cont(ctx0, moe_out);
  870. }
  871. cb(moe_out, "ffn_moe_out", il);
  872. return moe_out;
  873. }
  874. // input embeddings with optional lora
  875. ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
  876. const int64_t n_embd = hparams.n_embd;
  877. auto inp = std::make_unique<llm_graph_input_embd>();
  878. ggml_tensor * cur = nullptr;
  879. if (ubatch.token) {
  880. inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens);
  881. //cb(inp->tokens, "inp_tokens", -1);
  882. ggml_set_input(inp->tokens);
  883. res->t_tokens = inp->tokens;
  884. cur = ggml_get_rows(ctx0, tok_embd, inp->tokens);
  885. // apply lora for embedding tokens if needed
  886. for (const auto & lora : *loras) {
  887. llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd);
  888. if (lw == nullptr) {
  889. continue;
  890. }
  891. const float adapter_scale = lora.second;
  892. const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
  893. ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat(
  894. ctx0, lw->b, // non-transposed lora_b
  895. ggml_get_rows(ctx0, lw->a, inp->tokens)
  896. ), scale);
  897. cur = ggml_add(ctx0, cur, inpL_delta);
  898. }
  899. } else {
  900. inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
  901. ggml_set_input(inp->embd);
  902. cur = inp->embd;
  903. }
  904. // For Granite architecture
  905. if (hparams.f_embedding_scale != 0.0f) {
  906. cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale);
  907. }
  908. cb(cur, "inp_embd", -1);
  909. res->add_input(std::move(inp));
  910. return cur;
  911. }
  912. ggml_tensor * llm_graph_context::build_inp_pos() const {
  913. auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
  914. auto & cur = inp->pos;
  915. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
  916. ggml_set_input(cur);
  917. res->add_input(std::move(inp));
  918. return cur;
  919. }
  920. ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
  921. auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale);
  922. auto & cur = inp->attn_scale;
  923. // this need to be 1x1xN for broadcasting
  924. cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens);
  925. ggml_set_input(cur);
  926. res->add_input(std::move(inp));
  927. return cur;
  928. }
  929. ggml_tensor * llm_graph_context::build_inp_out_ids() const {
  930. // note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
  931. // but this would make the graph topology depend on the number of output tokens, which can interere with
  932. // features that require constant topology such as pipline parallelism
  933. // ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
  934. //if (n_outputs < n_tokens) {
  935. // return nullptr;
  936. //}
  937. auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
  938. auto & cur = inp->out_ids;
  939. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
  940. ggml_set_input(cur);
  941. res->add_input(std::move(inp));
  942. return cur;
  943. }
  944. ggml_tensor * llm_graph_context::build_inp_mean() const {
  945. auto inp = std::make_unique<llm_graph_input_mean>(cparams);
  946. auto & cur = inp->mean;
  947. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
  948. ggml_set_input(cur);
  949. res->add_input(std::move(inp));
  950. return cur;
  951. }
  952. ggml_tensor * llm_graph_context::build_inp_cls() const {
  953. auto inp = std::make_unique<llm_graph_input_cls>(cparams, arch);
  954. auto & cur = inp->cls;
  955. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
  956. ggml_set_input(cur);
  957. res->add_input(std::move(inp));
  958. return cur;
  959. }
  960. ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
  961. auto inp = std::make_unique<llm_graph_input_cross_embd>(cross);
  962. auto & cur = inp->cross_embd;
  963. // if we have the output embeddings from the encoder, use them directly
  964. // TODO: needs more work to be correct, for now just use the tensor shape
  965. //if (cross->t_embd) {
  966. // cur = ggml_view_tensor(ctx0, cross->t_embd);
  967. // return cur;
  968. //}
  969. const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd;
  970. const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
  971. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc);
  972. ggml_set_input(cur);
  973. res->add_input(std::move(inp));
  974. return cur;
  975. }
  976. ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const {
  977. auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams);
  978. auto & cur = inp->pos_bucket;
  979. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens);
  980. ggml_set_input(cur);
  981. res->add_input(std::move(inp));
  982. return cur;
  983. }
  984. ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const {
  985. const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx);
  986. auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, mctx_cur);
  987. const auto n_kv = mctx_cur->get_n_kv();
  988. auto & cur = inp->pos_bucket;
  989. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
  990. ggml_set_input(cur);
  991. res->add_input(std::move(inp));
  992. return cur;
  993. }
  994. ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const {
  995. ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]);
  996. cb(pos_bucket_1d, "pos_bucket_1d", -1);
  997. ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d);
  998. pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]);
  999. pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3);
  1000. pos_bias = ggml_cont (ctx0, pos_bias);
  1001. cb(pos_bias, "pos_bias", -1);
  1002. return pos_bias;
  1003. }
  1004. ggml_tensor * llm_graph_context::build_attn_mha(
  1005. ggml_tensor * q,
  1006. ggml_tensor * k,
  1007. ggml_tensor * v,
  1008. ggml_tensor * kq_b,
  1009. ggml_tensor * kq_mask,
  1010. ggml_tensor * sinks,
  1011. ggml_tensor * v_mla,
  1012. float kq_scale,
  1013. int il) const {
  1014. const bool v_trans = v->nb[1] > v->nb[2];
  1015. // split the batch into streams if needed
  1016. const auto n_stream = k->ne[3];
  1017. q = ggml_view_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream, q->nb[1], q->nb[2], q->nb[3]/n_stream, 0);
  1018. q = ggml_permute(ctx0, q, 0, 2, 1, 3);
  1019. k = ggml_permute(ctx0, k, 0, 2, 1, 3);
  1020. v = ggml_permute(ctx0, v, 0, 2, 1, 3);
  1021. const auto n_kv = k->ne[1];
  1022. ggml_tensor * cur;
  1023. // TODO: replace hardcoded padding with ggml-provided padding
  1024. if (cparams.flash_attn && (n_kv % 256 == 0) && kq_b == nullptr) {
  1025. GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet");
  1026. if (v_trans) {
  1027. v = ggml_transpose(ctx0, v);
  1028. }
  1029. // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn)
  1030. if (k->type == GGML_TYPE_F32) {
  1031. k = ggml_cast(ctx0, k, GGML_TYPE_F16);
  1032. }
  1033. if (v->type == GGML_TYPE_F32) {
  1034. v = ggml_cast(ctx0, v, GGML_TYPE_F16);
  1035. }
  1036. cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
  1037. hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
  1038. cb(cur, LLAMA_TENSOR_NAME_FATTN, il);
  1039. ggml_flash_attn_ext_add_sinks(cur, sinks);
  1040. ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32);
  1041. if (v_mla) {
  1042. #if 0
  1043. // v_mla can be applied as a matrix-vector multiplication with broadcasting across dimension 3 == n_tokens.
  1044. // However, the code is optimized for dimensions 0 and 1 being large, so this is ineffient.
  1045. cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens);
  1046. cur = ggml_mul_mat(ctx0, v_mla, cur);
  1047. #else
  1048. // It's preferable to do the calculation as a matrix-matrix multiplication with n_tokens in dimension 1.
  1049. // The permutations are noops and only change how the tensor data is interpreted.
  1050. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1051. cur = ggml_mul_mat(ctx0, v_mla, cur);
  1052. cb(cur, "fattn_mla", il);
  1053. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1054. cur = ggml_cont(ctx0, cur); // Needed because ggml_reshape_2d expects contiguous inputs.
  1055. #endif
  1056. }
  1057. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
  1058. } else {
  1059. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  1060. cb(kq, "kq", il);
  1061. // note: this op tends to require high floating point range
  1062. // while for some models F16 is enough, for others it is not, so we default to F32 here
  1063. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  1064. if (arch == LLM_ARCH_GROK) {
  1065. // need to do the following:
  1066. // multiply by attn_output_multiplier
  1067. // and then :
  1068. // kq = 30 * tanh(kq / 30)
  1069. // before the softmax below
  1070. kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, hparams.f_attn_out_scale / hparams.f_attn_logit_softcapping));
  1071. cb(kq, "kq_tanh", il);
  1072. kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
  1073. cb(kq, "kq_scaled", il);
  1074. }
  1075. if (hparams.attn_soft_cap) {
  1076. kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping);
  1077. cb(kq, "kq_scaled_1", il);
  1078. kq = ggml_tanh (ctx0, kq);
  1079. cb(kq, "kq_tanh", il);
  1080. kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
  1081. cb(kq, "kq_scaled_2", il);
  1082. }
  1083. if (kq_b) {
  1084. kq = ggml_add(ctx0, kq, kq_b);
  1085. cb(kq, "kq_plus_kq_b", il);
  1086. }
  1087. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
  1088. ggml_soft_max_add_sinks(kq, sinks);
  1089. cb(kq, "kq_soft_max", il);
  1090. if (!v_trans) {
  1091. // note: avoid this branch
  1092. v = ggml_cont(ctx0, ggml_transpose(ctx0, v));
  1093. cb(v, "v_cont", il);
  1094. }
  1095. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  1096. cb(kqv, "kqv", il);
  1097. // for MLA with the absorption optimization, we need to "decompress" from MQA back to MHA
  1098. if (v_mla) {
  1099. kqv = ggml_mul_mat(ctx0, v_mla, kqv);
  1100. cb(kqv, "kqv_mla", il);
  1101. }
  1102. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  1103. // recombine streams
  1104. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
  1105. if (!cparams.offload_kqv) {
  1106. // all nodes between the KV store and the attention output are run on the CPU
  1107. ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu);
  1108. }
  1109. }
  1110. ggml_build_forward_expand(gf, cur);
  1111. return cur;
  1112. }
  1113. llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const {
  1114. auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams);
  1115. // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch
  1116. inp->kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
  1117. ggml_set_input(inp->kq_mask);
  1118. inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask;
  1119. return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp));
  1120. }
  1121. ggml_tensor * llm_graph_context::build_attn(
  1122. llm_graph_input_attn_no_cache * inp,
  1123. ggml_tensor * wo,
  1124. ggml_tensor * wo_b,
  1125. ggml_tensor * q_cur,
  1126. ggml_tensor * k_cur,
  1127. ggml_tensor * v_cur,
  1128. ggml_tensor * kq_b,
  1129. ggml_tensor * sinks,
  1130. ggml_tensor * v_mla,
  1131. float kq_scale,
  1132. int il) const {
  1133. GGML_UNUSED(n_tokens);
  1134. // these nodes are added to the graph together so that they are not reordered
  1135. // by doing so, the number of splits in the graph is reduced
  1136. ggml_build_forward_expand(gf, q_cur);
  1137. ggml_build_forward_expand(gf, k_cur);
  1138. ggml_build_forward_expand(gf, v_cur);
  1139. const auto & kq_mask = inp->get_kq_mask();
  1140. // [TAG_NO_CACHE_PAD]
  1141. // TODO: if ubatch.equal_seqs() == true, we can split the three tensors below into ubatch.n_seqs_unq streams
  1142. // but it might not be worth it: https://github.com/ggml-org/llama.cpp/pull/15636
  1143. //assert(!ubatch.equal_seqs() || (k_cur->ne[3] == 1 && k_cur->ne[3] == ubatch.n_seqs_unq));
  1144. ggml_tensor * q = q_cur;
  1145. ggml_tensor * k = k_cur;
  1146. ggml_tensor * v = v_cur;
  1147. ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
  1148. cb(cur, "kqv_out", il);
  1149. if (wo) {
  1150. cur = build_lora_mm(wo, cur);
  1151. }
  1152. if (wo_b) {
  1153. //cb(cur, "kqv_wo", il);
  1154. }
  1155. if (wo_b) {
  1156. cur = ggml_add(ctx0, cur, wo_b);
  1157. }
  1158. return cur;
  1159. }
  1160. static std::unique_ptr<llm_graph_input_attn_kv> build_attn_inp_kv_impl(
  1161. ggml_context * ctx0,
  1162. const llama_ubatch & ubatch,
  1163. const llama_hparams & hparams,
  1164. const llama_cparams & cparams,
  1165. const llama_kv_cache_context * mctx_cur) {
  1166. auto inp = std::make_unique<llm_graph_input_attn_kv>(hparams, cparams, mctx_cur);
  1167. {
  1168. GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_iswa for SWA");
  1169. const auto n_kv = mctx_cur->get_n_kv();
  1170. const auto n_tokens = ubatch.n_tokens;
  1171. const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
  1172. inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch);
  1173. inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch);
  1174. inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
  1175. ggml_set_input(inp->self_kq_mask);
  1176. inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
  1177. }
  1178. return inp;
  1179. }
  1180. llm_graph_input_attn_kv * llm_graph_context::build_attn_inp_kv() const {
  1181. const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx);
  1182. auto inp = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur);
  1183. return (llm_graph_input_attn_kv *) res->add_input(std::move(inp));
  1184. }
  1185. ggml_tensor * llm_graph_context::build_attn(
  1186. llm_graph_input_attn_kv * inp,
  1187. ggml_tensor * wo,
  1188. ggml_tensor * wo_b,
  1189. ggml_tensor * q_cur,
  1190. ggml_tensor * k_cur,
  1191. ggml_tensor * v_cur,
  1192. ggml_tensor * kq_b,
  1193. ggml_tensor * sinks,
  1194. ggml_tensor * v_mla,
  1195. float kq_scale,
  1196. int il) const {
  1197. // these nodes are added to the graph together so that they are not reordered
  1198. // by doing so, the number of splits in the graph is reduced
  1199. ggml_build_forward_expand(gf, q_cur);
  1200. ggml_build_forward_expand(gf, k_cur);
  1201. ggml_build_forward_expand(gf, v_cur);
  1202. const auto * mctx_cur = inp->mctx;
  1203. // store to KV cache
  1204. {
  1205. const auto & k_idxs = inp->get_k_idxs();
  1206. const auto & v_idxs = inp->get_v_idxs();
  1207. ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
  1208. ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
  1209. }
  1210. const auto & kq_mask = inp->get_kq_mask();
  1211. ggml_tensor * q = q_cur;
  1212. ggml_tensor * k = mctx_cur->get_k(ctx0, il);
  1213. ggml_tensor * v = mctx_cur->get_v(ctx0, il);
  1214. ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
  1215. cb(cur, "kqv_out", il);
  1216. if (wo) {
  1217. cur = build_lora_mm(wo, cur);
  1218. if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
  1219. // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
  1220. ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
  1221. }
  1222. }
  1223. if (wo_b) {
  1224. cur = ggml_add(ctx0, cur, wo_b);
  1225. }
  1226. return cur;
  1227. }
  1228. ggml_tensor * llm_graph_context::build_attn(
  1229. llm_graph_input_attn_kv_iswa * inp,
  1230. ggml_tensor * wo,
  1231. ggml_tensor * wo_b,
  1232. ggml_tensor * q_cur,
  1233. ggml_tensor * k_cur,
  1234. ggml_tensor * v_cur,
  1235. ggml_tensor * kq_b,
  1236. ggml_tensor * sinks,
  1237. ggml_tensor * v_mla,
  1238. float kq_scale,
  1239. int il) const {
  1240. // these nodes are added to the graph together so that they are not reordered
  1241. // by doing so, the number of splits in the graph is reduced
  1242. ggml_build_forward_expand(gf, q_cur);
  1243. if (k_cur) {
  1244. ggml_build_forward_expand(gf, k_cur);
  1245. }
  1246. if (v_cur) {
  1247. ggml_build_forward_expand(gf, v_cur);
  1248. }
  1249. const auto * mctx_iswa = inp->mctx;
  1250. const bool is_swa = hparams.is_swa(il);
  1251. const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base();
  1252. // optionally store to KV cache
  1253. if (k_cur) {
  1254. const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs();
  1255. ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
  1256. }
  1257. if (v_cur) {
  1258. const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs();
  1259. ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
  1260. }
  1261. const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
  1262. ggml_tensor * q = q_cur;
  1263. ggml_tensor * k = mctx_cur->get_k(ctx0, il);
  1264. ggml_tensor * v = mctx_cur->get_v(ctx0, il);
  1265. ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
  1266. cb(cur, "kqv_out", il);
  1267. if (wo) {
  1268. cur = build_lora_mm(wo, cur);
  1269. }
  1270. if (wo_b) {
  1271. //cb(cur, "kqv_wo", il);
  1272. }
  1273. if (wo_b) {
  1274. cur = ggml_add(ctx0, cur, wo_b);
  1275. }
  1276. return cur;
  1277. }
  1278. llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const {
  1279. auto inp = std::make_unique<llm_graph_input_attn_cross>(cross);
  1280. const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
  1281. inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
  1282. ggml_set_input(inp->cross_kq_mask);
  1283. inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask;
  1284. return (llm_graph_input_attn_cross *) res->add_input(std::move(inp));
  1285. }
  1286. ggml_tensor * llm_graph_context::build_attn(
  1287. llm_graph_input_attn_cross * inp,
  1288. ggml_tensor * wo,
  1289. ggml_tensor * wo_b,
  1290. ggml_tensor * q_cur,
  1291. ggml_tensor * k_cur,
  1292. ggml_tensor * v_cur,
  1293. ggml_tensor * kq_b,
  1294. ggml_tensor * sinks,
  1295. ggml_tensor * v_mla,
  1296. float kq_scale,
  1297. int il) const {
  1298. // these nodes are added to the graph together so that they are not reordered
  1299. // by doing so, the number of splits in the graph is reduced
  1300. ggml_build_forward_expand(gf, q_cur);
  1301. ggml_build_forward_expand(gf, k_cur);
  1302. ggml_build_forward_expand(gf, v_cur);
  1303. const auto & kq_mask = inp->get_kq_mask_cross();
  1304. ggml_tensor * q = q_cur;
  1305. ggml_tensor * k = k_cur;
  1306. ggml_tensor * v = v_cur;
  1307. ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
  1308. cb(cur, "kqv_out", il);
  1309. if (wo) {
  1310. cur = build_lora_mm(wo, cur);
  1311. }
  1312. if (wo_b) {
  1313. //cb(cur, "kqv_wo", il);
  1314. }
  1315. if (wo_b) {
  1316. cur = ggml_add(ctx0, cur, wo_b);
  1317. }
  1318. return cur;
  1319. }
  1320. // TODO: maybe separate the inner implementation into a separate function
  1321. // like with the non-sliding window equivalent
  1322. // once sliding-window hybrid caches are a thing.
  1323. llm_graph_input_attn_kv_iswa * llm_graph_context::build_attn_inp_kv_iswa() const {
  1324. const auto * mctx_cur = static_cast<const llama_kv_cache_iswa_context *>(mctx);
  1325. auto inp = std::make_unique<llm_graph_input_attn_kv_iswa>(hparams, cparams, mctx_cur);
  1326. const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
  1327. {
  1328. const auto n_kv = mctx_cur->get_base()->get_n_kv();
  1329. inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch);
  1330. inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch);
  1331. inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
  1332. ggml_set_input(inp->self_kq_mask);
  1333. inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
  1334. }
  1335. {
  1336. GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache for non-SWA");
  1337. const auto n_kv = mctx_cur->get_swa()->get_n_kv();
  1338. inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch);
  1339. inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch);
  1340. inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
  1341. ggml_set_input(inp->self_kq_mask_swa);
  1342. inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
  1343. }
  1344. return (llm_graph_input_attn_kv_iswa *) res->add_input(std::move(inp));
  1345. }
  1346. ggml_tensor * llm_graph_context::build_rs(
  1347. ggml_tensor * s,
  1348. ggml_tensor * state_copy_main,
  1349. ggml_tensor * state_copy_extra,
  1350. int32_t state_size,
  1351. int32_t n_seqs,
  1352. uint32_t n_rs,
  1353. uint32_t rs_head,
  1354. uint32_t rs_size,
  1355. int32_t rs_zero,
  1356. const llm_graph_get_rows_fn & get_state_rows) const {
  1357. ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, rs_size);
  1358. // Clear a single state which will then be copied to the other cleared states.
  1359. // Note that this is a no-op when the view is zero-sized.
  1360. ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0));
  1361. ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0));
  1362. // copy states
  1363. // NOTE: assuming the copy destinations are ALL contained between rs_head and rs_head + n_rs
  1364. // {state_size, rs_size} -> {state_size, n_seqs}
  1365. ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main);
  1366. ggml_build_forward_expand(gf, output_states);
  1367. // copy extra states which won't be changed further (between n_seqs and n_rs)
  1368. ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra);
  1369. ggml_build_forward_expand(gf,
  1370. ggml_cpy(ctx0,
  1371. states_extra,
  1372. ggml_view_1d(ctx0, s, state_size*(n_rs - n_seqs), (rs_head + n_seqs)*state_size*ggml_element_size(s))));
  1373. return output_states;
  1374. }
  1375. static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl(
  1376. ggml_context * ctx0,
  1377. const llama_ubatch & ubatch,
  1378. const llama_memory_recurrent_context * mctx_cur) {
  1379. auto inp = std::make_unique<llm_graph_input_rs>(mctx_cur);
  1380. const int64_t n_rs = mctx_cur->get_n_rs();
  1381. const int64_t n_seqs = ubatch.n_seqs;
  1382. inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
  1383. ggml_set_input(inp->s_copy);
  1384. inp->s_copy_main = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0);
  1385. inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]);
  1386. return inp;
  1387. }
  1388. llm_graph_input_rs * llm_graph_context::build_rs_inp() const {
  1389. const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
  1390. auto inp = build_rs_inp_impl(ctx0, ubatch, mctx_cur);
  1391. return (llm_graph_input_rs *) res->add_input(std::move(inp));
  1392. }
  1393. ggml_tensor * llm_graph_context::build_rs(
  1394. llm_graph_input_rs * inp,
  1395. ggml_tensor * s,
  1396. int32_t state_size,
  1397. int32_t n_seqs,
  1398. const llm_graph_get_rows_fn & get_state_rows) const {
  1399. const auto * kv_state = inp->mctx;
  1400. return build_rs(s, inp->s_copy_main, inp->s_copy_extra, state_size, n_seqs,
  1401. kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(),
  1402. get_state_rows);
  1403. }
  1404. ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
  1405. llm_graph_input_rs * inp,
  1406. const llama_ubatch & ubatch,
  1407. int il) const {
  1408. const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
  1409. const auto token_shift_count = hparams.token_shift_count;
  1410. const int64_t n_seqs = ubatch.n_seqs;
  1411. ggml_tensor * token_shift_all = mctx_cur->get_r_l(il);
  1412. ggml_tensor * token_shift = build_rs(
  1413. inp, token_shift_all,
  1414. hparams.n_embd_r(), n_seqs);
  1415. token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
  1416. return token_shift;
  1417. }
  1418. ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
  1419. ggml_tensor * token_shift,
  1420. const llama_ubatch & ubatch,
  1421. int il) const {
  1422. const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
  1423. const auto token_shift_count = hparams.token_shift_count;
  1424. const auto n_embd = hparams.n_embd;
  1425. const int64_t n_seqs = ubatch.n_seqs;
  1426. const auto kv_head = mctx_cur->get_head();
  1427. return ggml_cpy(
  1428. ctx0,
  1429. ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
  1430. ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il)))
  1431. );
  1432. }
  1433. llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
  1434. const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx);
  1435. auto inp_rs = build_rs_inp_impl(ctx0, ubatch, mctx_cur->get_recr());
  1436. auto inp_attn = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn());
  1437. auto inp = std::make_unique<llm_graph_input_mem_hybrid>(std::move(inp_attn), std::move(inp_rs), mctx_cur);
  1438. return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
  1439. }
  1440. void llm_graph_context::build_pooling(
  1441. ggml_tensor * cls,
  1442. ggml_tensor * cls_b,
  1443. ggml_tensor * cls_out,
  1444. ggml_tensor * cls_out_b) const {
  1445. if (!cparams.embeddings) {
  1446. return;
  1447. }
  1448. ggml_tensor * inp = res->t_embd;
  1449. //// find result_norm tensor for input
  1450. //for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
  1451. // inp = ggml_graph_node(gf, i);
  1452. // if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
  1453. // break;
  1454. // }
  1455. // inp = nullptr;
  1456. //}
  1457. GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
  1458. ggml_tensor * cur;
  1459. switch (pooling_type) {
  1460. case LLAMA_POOLING_TYPE_NONE:
  1461. {
  1462. cur = inp;
  1463. } break;
  1464. case LLAMA_POOLING_TYPE_MEAN:
  1465. {
  1466. ggml_tensor * inp_mean = build_inp_mean();
  1467. cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean);
  1468. } break;
  1469. case LLAMA_POOLING_TYPE_CLS:
  1470. case LLAMA_POOLING_TYPE_LAST:
  1471. {
  1472. ggml_tensor * inp_cls = build_inp_cls();
  1473. cur = ggml_get_rows(ctx0, inp, inp_cls);
  1474. } break;
  1475. case LLAMA_POOLING_TYPE_RANK:
  1476. {
  1477. ggml_tensor * inp_cls = build_inp_cls();
  1478. cur = ggml_get_rows(ctx0, inp, inp_cls);
  1479. // classification head
  1480. // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566
  1481. if (cls) {
  1482. cur = ggml_mul_mat(ctx0, cls, cur);
  1483. if (cls_b) {
  1484. cur = ggml_add(ctx0, cur, cls_b);
  1485. }
  1486. cur = ggml_tanh(ctx0, cur);
  1487. }
  1488. // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  1489. // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896
  1490. // Single layer classification head (direct projection)
  1491. // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476
  1492. if (cls_out) {
  1493. cur = ggml_mul_mat(ctx0, cls_out, cur);
  1494. if (cls_out_b) {
  1495. cur = ggml_add(ctx0, cur, cls_out_b);
  1496. }
  1497. }
  1498. // softmax for qwen3 reranker
  1499. if (arch == LLM_ARCH_QWEN3) {
  1500. cur = ggml_soft_max(ctx0, cur);
  1501. }
  1502. } break;
  1503. default:
  1504. {
  1505. GGML_ABORT("unknown pooling type");
  1506. }
  1507. }
  1508. cb(cur, "result_embd_pooled", -1);
  1509. res->t_embd_pooled = cur;
  1510. ggml_build_forward_expand(gf, cur);
  1511. }
  1512. int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
  1513. // TODO move to hparams if a T5 variant appears that uses a different value
  1514. const int64_t max_distance = 128;
  1515. if (bidirectional) {
  1516. n_buckets >>= 1;
  1517. }
  1518. const int64_t max_exact = n_buckets >> 1;
  1519. int32_t relative_position = x - y;
  1520. int32_t relative_bucket = 0;
  1521. if (bidirectional) {
  1522. relative_bucket += (relative_position > 0) * n_buckets;
  1523. relative_position = abs(relative_position);
  1524. } else {
  1525. relative_position = -std::min<int32_t>(relative_position, 0);
  1526. }
  1527. int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
  1528. relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1);
  1529. relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
  1530. return relative_bucket;
  1531. }