llama-graph.cpp 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. #include "llama-graph.h"
  2. #include "llama-impl.h"
  3. #include "llama-batch.h"
  4. #include "llama-cparams.h"
  5. #include "llama-kv-cache-unified.h"
  6. #include "llama-kv-cache-unified-iswa.h"
  7. #include "llama-kv-cache-recurrent.h"
  8. #include <cassert>
  9. #include <cmath>
  10. #include <cstring>
  11. void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) {
  12. if (ubatch->token) {
  13. const int64_t n_tokens = ubatch->n_tokens;
  14. ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens));
  15. }
  16. if (ubatch->embd) {
  17. const int64_t n_embd = embd->ne[0];
  18. const int64_t n_tokens = ubatch->n_tokens;
  19. ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd));
  20. }
  21. }
  22. void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) {
  23. if (ubatch->pos && pos) {
  24. const int64_t n_tokens = ubatch->n_tokens;
  25. if (ubatch->token && n_pos_per_embd == 4) {
  26. // in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
  27. // the 3 first dims are the same, and 4th dim is all 0
  28. std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd);
  29. // copy the first dimension
  30. for (int i = 0; i < n_tokens; ++i) {
  31. pos_data[ i] = ubatch->pos[i];
  32. pos_data[ n_tokens + i] = ubatch->pos[i];
  33. pos_data[2 * n_tokens + i] = ubatch->pos[i];
  34. pos_data[3 * n_tokens + i] = 0; // 4th dim is 0
  35. }
  36. ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
  37. } else {
  38. ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos));
  39. }
  40. }
  41. }
  42. void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) {
  43. if (ubatch->pos && attn_scale) {
  44. const int64_t n_tokens = ubatch->n_tokens;
  45. std::vector<float> attn_scale_data(n_tokens, 0.0f);
  46. for (int i = 0; i < n_tokens; ++i) {
  47. const float pos = ubatch->pos[i];
  48. attn_scale_data[i] = std::log(
  49. std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0
  50. ) * f_attn_temp_scale + 1.0;
  51. }
  52. ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale));
  53. }
  54. }
  55. void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) {
  56. if (pos_bucket) {
  57. const int64_t n_tokens = ubatch->n_tokens;
  58. GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer));
  59. GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing
  60. int32_t * data = (int32_t *) pos_bucket->data;
  61. for (int h = 0; h < 1; ++h) {
  62. for (int j = 0; j < n_tokens; ++j) {
  63. for (int i = 0; i < n_tokens; ++i) {
  64. data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true);
  65. }
  66. }
  67. }
  68. }
  69. }
  70. void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
  71. if (pos_bucket) {
  72. kv_state->set_input_pos_bucket(pos_bucket, ubatch);
  73. }
  74. }
  75. void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
  76. if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
  77. //GGML_ASSERT(out_ids && "every model that can must skip unused outputs");
  78. if (!out_ids) {
  79. LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__);
  80. } else {
  81. const int64_t n_tokens = ubatch->n_tokens;
  82. GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
  83. int32_t * data = (int32_t *) out_ids->data;
  84. if (n_outputs == n_tokens) {
  85. for (int i = 0; i < n_tokens; ++i) {
  86. data[i] = i;
  87. }
  88. } else if (ubatch->output) {
  89. int32_t n_outputs = 0;
  90. for (int i = 0; i < n_tokens; ++i) {
  91. if (ubatch->output[i]) {
  92. data[n_outputs++] = i;
  93. }
  94. }
  95. // the graph needs to have been passed the correct number of outputs
  96. GGML_ASSERT(n_outputs == n_outputs);
  97. } else if (n_outputs == 1) {
  98. // only keep last output
  99. data[0] = n_tokens - 1;
  100. } else {
  101. GGML_ASSERT(n_outputs == 0);
  102. }
  103. }
  104. }
  105. }
  106. void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
  107. if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
  108. const int64_t n_tokens = ubatch->n_tokens;
  109. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  110. const int64_t n_seqs = ubatch->n_seqs;
  111. GGML_ASSERT(mean);
  112. GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
  113. float * data = (float *) mean->data;
  114. memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean));
  115. std::vector<uint64_t> sum(n_tokens, 0);
  116. for (int s = 0; s < n_seqs; ++s) {
  117. const llama_seq_id seq_id = ubatch->seq_id[s][0];
  118. // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
  119. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
  120. sum[seq_id] += ubatch->n_seq_tokens;
  121. }
  122. std::vector<float> div(n_tokens, 0.0f);
  123. for (int i = 0; i < n_tokens; ++i) {
  124. const uint64_t s = sum[i];
  125. if (s > 0) {
  126. div[i] = 1.0f/float(s);
  127. }
  128. }
  129. for (int s = 0; s < n_seqs; ++s) {
  130. const llama_seq_id seq_id = ubatch->seq_id[s][0];
  131. for (int i = 0; i < n_seq_tokens; ++i) {
  132. data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id];
  133. }
  134. }
  135. }
  136. }
  137. void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
  138. if (cparams.embeddings && (
  139. cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
  140. cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
  141. const int64_t n_tokens = ubatch->n_tokens;
  142. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  143. const int64_t n_seqs = ubatch->n_seqs;
  144. GGML_ASSERT(cls);
  145. GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
  146. uint32_t * data = (uint32_t *) cls->data;
  147. memset(cls->data, 0, n_tokens * ggml_element_size(cls));
  148. for (int s = 0; s < n_seqs; ++s) {
  149. const llama_seq_id seq_id = ubatch->seq_id[s][0];
  150. // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
  151. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
  152. for (int i = 0; i < n_seq_tokens; ++i) {
  153. const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
  154. if (pos == 0) {
  155. data[seq_id] = s*n_seq_tokens + i;
  156. }
  157. }
  158. }
  159. }
  160. if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
  161. const int64_t n_tokens = ubatch->n_tokens;
  162. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  163. const int64_t n_seqs = ubatch->n_seqs;
  164. GGML_ASSERT(cls);
  165. GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
  166. uint32_t * data = (uint32_t *) cls->data;
  167. memset(cls->data, 0, n_tokens * ggml_element_size(cls));
  168. std::vector<int> last_pos(n_tokens, -1);
  169. std::vector<int> last_row(n_tokens, -1);
  170. for (int s = 0; s < n_seqs; ++s) {
  171. const llama_seq_id seq_id = ubatch->seq_id[s][0];
  172. // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
  173. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
  174. for (int i = 0; i < n_seq_tokens; ++i) {
  175. const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
  176. if (pos >= last_pos[seq_id]) {
  177. last_pos[seq_id] = pos;
  178. last_row[seq_id] = s*n_seq_tokens + i;
  179. }
  180. }
  181. }
  182. for (int i = 0; i < n_tokens; ++i) {
  183. if (last_row[i] >= 0) {
  184. data[i] = last_row[i];
  185. }
  186. }
  187. }
  188. }
  189. void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) {
  190. GGML_UNUSED(ubatch);
  191. const int64_t n_kv = kv_state->get_n_kv();
  192. if (s_copy) {
  193. GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
  194. int32_t * data = (int32_t *) s_copy->data;
  195. // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
  196. for (uint32_t i = 0; i < n_kv; ++i) {
  197. data[i] = kv_state->s_copy(i);
  198. }
  199. }
  200. }
  201. void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) {
  202. GGML_UNUSED(ubatch);
  203. const int64_t n_kv = kv_state->get_n_kv();
  204. if (s_mask) {
  205. GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer));
  206. float * data = (float *) s_mask->data;
  207. // clear unused states
  208. for (int i = 0; i < n_kv; ++i) {
  209. data[i] = kv_state->s_mask(i);
  210. }
  211. }
  212. }
  213. void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
  214. GGML_UNUSED(ubatch);
  215. if (cross_embd && !cross->v_embd.empty()) {
  216. assert(cross_embd->type == GGML_TYPE_F32);
  217. ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd));
  218. }
  219. }
  220. void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
  221. if (kq_mask) {
  222. if (cparams.causal_attn) {
  223. const int64_t n_kv = ubatch->n_tokens;
  224. const int64_t n_tokens = ubatch->n_tokens;
  225. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  226. const int64_t n_seqs = ubatch->n_seqs;
  227. GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
  228. float * data = (float *) kq_mask->data;
  229. for (int h = 0; h < 1; ++h) {
  230. for (int s1 = 0; s1 < n_seqs; ++s1) {
  231. const llama_seq_id seq_id = ubatch->seq_id[s1][0];
  232. for (int j = 0; j < n_seq_tokens; ++j) {
  233. const int32_t tj = s1*n_seq_tokens + j;
  234. for (int s0 = 0; s0 < n_seqs; ++s0) {
  235. for (int i = 0; i < n_seq_tokens; ++i) {
  236. const int32_t ti = s0*n_seq_tokens + i;
  237. float f = -INFINITY;
  238. for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
  239. if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) {
  240. if (hparams.use_alibi) {
  241. f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
  242. } else {
  243. f = 0.0f;
  244. }
  245. break;
  246. }
  247. }
  248. data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f;
  249. }
  250. }
  251. }
  252. }
  253. }
  254. } else {
  255. const int64_t n_tokens = ubatch->n_tokens;
  256. const int64_t n_seq_tokens = ubatch->n_seq_tokens;
  257. const int64_t n_seqs = ubatch->n_seqs;
  258. const int64_t n_stride = ubatch->n_tokens;
  259. GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
  260. float * data = (float *) kq_mask->data;
  261. for (int h = 0; h < 1; ++h) {
  262. for (int s1 = 0; s1 < n_seqs; ++s1) {
  263. const llama_seq_id seq_id = ubatch->seq_id[s1][0];
  264. for (int j = 0; j < n_seq_tokens; ++j) {
  265. const int32_t tj = s1*n_seq_tokens + j;
  266. for (int s0 = 0; s0 < n_seqs; ++s0) {
  267. for (int i = 0; i < n_seq_tokens; ++i) {
  268. const int32_t ti = s0*n_seq_tokens + i;
  269. float f = -INFINITY;
  270. for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
  271. if (ubatch->seq_id[s0][s] == seq_id) {
  272. if (hparams.use_alibi) {
  273. f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
  274. } else {
  275. f = 0.0f;
  276. }
  277. break;
  278. }
  279. }
  280. data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
  281. }
  282. }
  283. for (int i = n_tokens; i < n_stride; ++i) {
  284. data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
  285. }
  286. }
  287. }
  288. }
  289. }
  290. }
  291. }
  292. void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) {
  293. if (self_kq_mask) {
  294. kv_state->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
  295. }
  296. }
  297. void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch) {
  298. if (self_kq_mask) {
  299. kv_state->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
  300. }
  301. if (self_kq_mask_swa) {
  302. kv_state->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn);
  303. }
  304. }
  305. void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
  306. if (cross_kq_mask) {
  307. const int64_t n_enc = cross_kq_mask->ne[0];
  308. const int64_t n_tokens = ubatch->n_tokens;
  309. GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
  310. GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing
  311. float * data = (float *) cross_kq_mask->data;
  312. for (int h = 0; h < 1; ++h) {
  313. for (int j = 0; j < n_tokens; ++j) {
  314. for (int i = 0; i < n_enc; ++i) {
  315. float f = -INFINITY;
  316. for (int s = 0; s < ubatch->n_seq_id[j]; ++s) {
  317. const llama_seq_id seq_id = ubatch->seq_id[j][s];
  318. if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) {
  319. f = 0.0f;
  320. }
  321. }
  322. data[h*(n_enc*n_tokens) + j*n_enc + i] = f;
  323. }
  324. }
  325. for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
  326. for (int j = 0; j < n_enc; ++j) {
  327. data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
  328. }
  329. }
  330. }
  331. }
  332. }
  333. //
  334. // llm_graph_context
  335. //
  336. llm_graph_context::llm_graph_context(const llm_graph_params & params) :
  337. arch (params.arch),
  338. hparams (params.hparams),
  339. cparams (params.cparams),
  340. ubatch (params.ubatch),
  341. n_embd (hparams.n_embd),
  342. n_layer (hparams.n_layer),
  343. n_rot (hparams.n_rot),
  344. n_ctx (cparams.n_ctx),
  345. n_head (hparams.n_head()),
  346. n_head_kv (hparams.n_head_kv()),
  347. n_embd_head_k (hparams.n_embd_head_k),
  348. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  349. n_embd_head_v (hparams.n_embd_head_v),
  350. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  351. n_expert (hparams.n_expert),
  352. n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used),
  353. freq_base (cparams.rope_freq_base),
  354. freq_scale (cparams.rope_freq_scale),
  355. ext_factor (cparams.yarn_ext_factor),
  356. attn_factor (cparams.yarn_attn_factor),
  357. beta_fast (cparams.yarn_beta_fast),
  358. beta_slow (cparams.yarn_beta_slow),
  359. norm_eps (hparams.f_norm_eps),
  360. norm_rms_eps (hparams.f_norm_rms_eps),
  361. n_tokens (ubatch.n_tokens),
  362. n_outputs (params.n_outputs),
  363. n_ctx_orig (cparams.n_ctx_orig_yarn),
  364. pooling_type (cparams.pooling_type),
  365. rope_type (hparams.rope_type),
  366. ctx0 (params.ctx),
  367. sched (params.sched),
  368. backend_cpu (params.backend_cpu),
  369. cvec (params.cvec),
  370. loras (params.loras),
  371. mstate (params.mstate),
  372. cross (params.cross),
  373. cb_func (params.cb),
  374. res (std::make_unique<llm_graph_result>()) {
  375. }
  376. int64_t llm_graph_context::n_pos_per_embd() const {
  377. return hparams.rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
  378. }
  379. void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
  380. if (cb_func) {
  381. cb_func(ubatch, cur, name, il);
  382. }
  383. }
  384. ggml_tensor * llm_graph_context::build_cvec(
  385. ggml_tensor * cur,
  386. int il) const {
  387. return cvec->apply_to(ctx0, cur, il);
  388. }
  389. ggml_tensor * llm_graph_context::build_lora_mm(
  390. ggml_tensor * w,
  391. ggml_tensor * cur) const {
  392. ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
  393. for (const auto & lora : *loras) {
  394. llama_adapter_lora_weight * lw = lora.first->get_weight(w);
  395. if (lw == nullptr) {
  396. continue;
  397. }
  398. const float adapter_scale = lora.second;
  399. const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
  400. ggml_tensor * ab_cur = ggml_mul_mat(
  401. ctx0, lw->b,
  402. ggml_mul_mat(ctx0, lw->a, cur)
  403. );
  404. ab_cur = ggml_scale(ctx0, ab_cur, scale);
  405. res = ggml_add(ctx0, res, ab_cur);
  406. }
  407. return res;
  408. }
  409. ggml_tensor * llm_graph_context::build_lora_mm_id(
  410. ggml_tensor * w, // ggml_tensor * as
  411. ggml_tensor * cur, // ggml_tensor * b
  412. ggml_tensor * ids) const {
  413. ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
  414. for (const auto & lora : *loras) {
  415. llama_adapter_lora_weight * lw = lora.first->get_weight(w);
  416. if (lw == nullptr) {
  417. continue;
  418. }
  419. const float alpha = lora.first->alpha;
  420. const float rank = (float) lw->b->ne[0];
  421. const float scale = alpha ? lora.second * alpha / rank : lora.second;
  422. ggml_tensor * ab_cur = ggml_mul_mat_id(
  423. ctx0, lw->b,
  424. ggml_mul_mat_id(ctx0, lw->a, cur, ids),
  425. ids
  426. );
  427. ab_cur = ggml_scale(ctx0, ab_cur, scale);
  428. res = ggml_add(ctx0, res, ab_cur);
  429. }
  430. return res;
  431. }
  432. ggml_tensor * llm_graph_context::build_norm(
  433. ggml_tensor * cur,
  434. ggml_tensor * mw,
  435. ggml_tensor * mb,
  436. llm_norm_type type,
  437. int il) const {
  438. switch (type) {
  439. case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break;
  440. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break;
  441. case LLM_NORM_GROUP:
  442. {
  443. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]);
  444. cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps);
  445. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]);
  446. } break;
  447. }
  448. if (mw || mb) {
  449. cb(cur, "norm", il);
  450. }
  451. if (mw) {
  452. cur = ggml_mul(ctx0, cur, mw);
  453. if (mb) {
  454. cb(cur, "norm_w", il);
  455. }
  456. }
  457. if (mb) {
  458. cur = ggml_add(ctx0, cur, mb);
  459. }
  460. return cur;
  461. }
  462. ggml_tensor * llm_graph_context::build_ffn(
  463. ggml_tensor * cur,
  464. ggml_tensor * up,
  465. ggml_tensor * up_b,
  466. ggml_tensor * up_s,
  467. ggml_tensor * gate,
  468. ggml_tensor * gate_b,
  469. ggml_tensor * gate_s,
  470. ggml_tensor * down,
  471. ggml_tensor * down_b,
  472. ggml_tensor * down_s,
  473. ggml_tensor * act_scales,
  474. llm_ffn_op_type type_op,
  475. llm_ffn_gate_type type_gate,
  476. int il) const {
  477. ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur;
  478. cb(tmp, "ffn_up", il);
  479. if (up_b) {
  480. tmp = ggml_add(ctx0, tmp, up_b);
  481. cb(tmp, "ffn_up_b", il);
  482. }
  483. if (up_s) {
  484. tmp = ggml_mul(ctx0, tmp, up_s);
  485. cb(tmp, "ffn_up_s", il);
  486. }
  487. if (gate) {
  488. switch (type_gate) {
  489. case LLM_FFN_SEQ:
  490. {
  491. cur = build_lora_mm(gate, tmp);
  492. cb(cur, "ffn_gate", il);
  493. } break;
  494. case LLM_FFN_PAR:
  495. {
  496. cur = build_lora_mm(gate, cur);
  497. cb(cur, "ffn_gate", il);
  498. } break;
  499. }
  500. if (gate_b) {
  501. cur = ggml_add(ctx0, cur, gate_b);
  502. cb(cur, "ffn_gate_b", il);
  503. }
  504. if (gate_s) {
  505. cur = ggml_mul(ctx0, cur, gate_s);
  506. cb(cur, "ffn_gate_s", il);
  507. }
  508. } else {
  509. cur = tmp;
  510. }
  511. switch (type_op) {
  512. case LLM_FFN_SILU:
  513. {
  514. cur = ggml_silu(ctx0, cur);
  515. cb(cur, "ffn_silu", il);
  516. } break;
  517. case LLM_FFN_GELU:
  518. {
  519. cur = ggml_gelu(ctx0, cur);
  520. cb(cur, "ffn_gelu", il);
  521. if (act_scales != NULL) {
  522. cur = ggml_div(ctx0, cur, act_scales);
  523. cb(cur, "ffn_act", il);
  524. }
  525. } break;
  526. case LLM_FFN_RELU:
  527. {
  528. cur = ggml_relu(ctx0, cur);
  529. cb(cur, "ffn_relu", il);
  530. } break;
  531. case LLM_FFN_RELU_SQR:
  532. {
  533. cur = ggml_relu(ctx0, cur);
  534. cb(cur, "ffn_relu", il);
  535. cur = ggml_sqr(ctx0, cur);
  536. cb(cur, "ffn_sqr(relu)", il);
  537. } break;
  538. case LLM_FFN_SWIGLU:
  539. {
  540. // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
  541. int64_t split_point = cur->ne[0] / 2;
  542. ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0));
  543. ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur)));
  544. x0 = ggml_silu(ctx0, x0);
  545. cb(cur, "ffn_silu", il);
  546. cur = ggml_mul(ctx0, x0, x1);
  547. cb(cur, "ffn_mul", il);
  548. } break;
  549. case LLM_FFN_GEGLU:
  550. {
  551. // Split into two equal parts
  552. int64_t split_point = cur->ne[0] / 2;
  553. // TODO: these conts should not be needed
  554. ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0));
  555. ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur)));
  556. x0 = ggml_gelu(ctx0, x0);
  557. cb(x0, "ffn_gelu", il);
  558. cur = ggml_mul(ctx0, x0, x1);
  559. cb(cur, "ffn_geglu", il);
  560. } break;
  561. }
  562. if (gate && type_gate == LLM_FFN_PAR) {
  563. cur = ggml_mul(ctx0, cur, tmp);
  564. cb(cur, "ffn_gate_par", il);
  565. }
  566. if (down) {
  567. cur = build_lora_mm(down, cur);
  568. if (arch == LLM_ARCH_GLM4) {
  569. // GLM4 seems to have numerical issues with half-precision accumulators
  570. ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
  571. }
  572. }
  573. if (down_b) {
  574. cb(cur, "ffn_down", il);
  575. }
  576. if (down_b) {
  577. cur = ggml_add(ctx0, cur, down_b);
  578. }
  579. if (down_s) {
  580. cur = ggml_mul(ctx0, cur, down_s);
  581. cb(cur, "ffn_down_s", il);
  582. }
  583. return cur;
  584. }
  585. ggml_tensor * llm_graph_context::build_moe_ffn(
  586. ggml_tensor * cur,
  587. ggml_tensor * gate_inp,
  588. ggml_tensor * up_exps,
  589. ggml_tensor * gate_exps,
  590. ggml_tensor * down_exps,
  591. ggml_tensor * exp_probs_b,
  592. int64_t n_expert,
  593. int64_t n_expert_used,
  594. llm_ffn_op_type type_op,
  595. bool norm_w,
  596. bool scale_w,
  597. float w_scale,
  598. llama_expert_gating_func_type gating_op,
  599. int il) const {
  600. const int64_t n_embd = cur->ne[0];
  601. const int64_t n_tokens = cur->ne[1];
  602. const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN
  603. ggml_tensor * logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens]
  604. cb(logits, "ffn_moe_logits", il);
  605. ggml_tensor * probs = nullptr;
  606. switch (gating_op) {
  607. case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
  608. {
  609. probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens]
  610. } break;
  611. case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
  612. {
  613. probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
  614. } break;
  615. default:
  616. GGML_ABORT("fatal error");
  617. }
  618. cb(probs, "ffn_moe_probs", il);
  619. // add experts selection bias - introduced in DeepSeek V3
  620. // leave probs unbiased as it's later used to get expert weights
  621. ggml_tensor * selection_probs = probs;
  622. if (exp_probs_b != nullptr) {
  623. selection_probs = ggml_add(ctx0, probs, exp_probs_b);
  624. cb(selection_probs, "ffn_moe_probs_biased", il);
  625. }
  626. // llama4 doesn't have exp_probs_b, and sigmoid is only used after top_k
  627. // see: https://github.com/meta-llama/llama-models/blob/699a02993512fb36936b1b0741e13c06790bcf98/models/llama4/moe.py#L183-L198
  628. if (arch == LLM_ARCH_LLAMA4) {
  629. selection_probs = logits;
  630. }
  631. // select experts
  632. ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
  633. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  634. cb(selected_experts, "ffn_moe_topk", il);
  635. ggml_tensor * weights = ggml_get_rows(ctx0,
  636. ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
  637. cb(weights, "ffn_moe_weights", il);
  638. if (norm_w) {
  639. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
  640. ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
  641. cb(weights_sum, "ffn_moe_weights_sum", il);
  642. weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
  643. cb(weights, "ffn_moe_weights_norm", il);
  644. weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
  645. }
  646. if (scale_w) {
  647. weights = ggml_scale(ctx0, weights, w_scale);
  648. cb(weights, "ffn_moe_weights_scaled", il);
  649. }
  650. cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
  651. if (weight_before_ffn) {
  652. // repeat cur to [n_embd, n_expert_used, n_tokens]
  653. ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1);
  654. cur = ggml_mul(ctx0, repeated, weights);
  655. cb(cur, "ffn_moe_weighted", il);
  656. }
  657. ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  658. cb(up, "ffn_moe_up", il);
  659. ggml_tensor * experts = nullptr;
  660. if (gate_exps) {
  661. cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  662. cb(cur, "ffn_moe_gate", il);
  663. } else {
  664. cur = up;
  665. }
  666. switch (type_op) {
  667. case LLM_FFN_SILU:
  668. {
  669. cur = ggml_silu(ctx0, cur);
  670. cb(cur, "ffn_moe_silu", il);
  671. } break;
  672. case LLM_FFN_GELU:
  673. {
  674. cur = ggml_gelu(ctx0, cur);
  675. cb(cur, "ffn_moe_gelu", il);
  676. } break;
  677. default:
  678. GGML_ABORT("fatal error");
  679. }
  680. if (gate_exps) {
  681. cur = ggml_mul(ctx0, cur, up); // [n_ff, n_expert_used, n_tokens]
  682. cb(cur, "ffn_moe_gate_par", il);
  683. }
  684. experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens]
  685. cb(experts, "ffn_moe_down", il);
  686. if (!weight_before_ffn) {
  687. experts = ggml_mul(ctx0, experts, weights);
  688. cb(cur, "ffn_moe_weighted", il);
  689. }
  690. // aggregate experts
  691. ggml_tensor * moe_out = nullptr;
  692. for (int i = 0; i < n_expert_used; ++i) {
  693. ggml_tensor * cur_expert = ggml_view_2d(ctx0, experts, n_embd, n_tokens,
  694. experts->nb[2], i*experts->nb[1]);
  695. if (i == 0) {
  696. moe_out = cur_expert;
  697. } else {
  698. moe_out = ggml_add(ctx0, moe_out, cur_expert);
  699. }
  700. }
  701. if (n_expert_used == 1) {
  702. // avoid returning a non-contiguous tensor
  703. moe_out = ggml_cont(ctx0, moe_out);
  704. }
  705. cb(moe_out, "ffn_moe_out", il);
  706. return moe_out;
  707. }
  708. // input embeddings with optional lora
  709. ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
  710. const int64_t n_embd = hparams.n_embd;
  711. auto inp = std::make_unique<llm_graph_input_embd>();
  712. ggml_tensor * cur = nullptr;
  713. if (ubatch.token) {
  714. inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens);
  715. //cb(inp->tokens, "inp_tokens", -1);
  716. ggml_set_input(inp->tokens);
  717. res->t_tokens = inp->tokens;
  718. cur = ggml_get_rows(ctx0, tok_embd, inp->tokens);
  719. // apply lora for embedding tokens if needed
  720. for (const auto & lora : *loras) {
  721. llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd);
  722. if (lw == nullptr) {
  723. continue;
  724. }
  725. const float adapter_scale = lora.second;
  726. const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
  727. ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat(
  728. ctx0, lw->b, // non-transposed lora_b
  729. ggml_get_rows(ctx0, lw->a, inp->tokens)
  730. ), scale);
  731. cur = ggml_add(ctx0, cur, inpL_delta);
  732. }
  733. } else {
  734. inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
  735. ggml_set_input(inp->embd);
  736. cur = inp->embd;
  737. }
  738. // For Granite architecture
  739. if (hparams.f_embedding_scale != 0.0f) {
  740. cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale);
  741. }
  742. cb(cur, "inp_embd", -1);
  743. res->add_input(std::move(inp));
  744. return cur;
  745. }
  746. ggml_tensor * llm_graph_context::build_inp_pos() const {
  747. auto inp = std::make_unique<llm_graph_input_pos>(n_pos_per_embd());
  748. auto & cur = inp->pos;
  749. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_embd());
  750. ggml_set_input(cur);
  751. res->add_input(std::move(inp));
  752. return cur;
  753. }
  754. ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
  755. auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale);
  756. auto & cur = inp->attn_scale;
  757. // this need to be 1x1xN for broadcasting
  758. cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens);
  759. ggml_set_input(cur);
  760. res->add_input(std::move(inp));
  761. return cur;
  762. }
  763. ggml_tensor * llm_graph_context::build_inp_out_ids() const {
  764. auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
  765. auto & cur = inp->out_ids;
  766. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
  767. ggml_set_input(cur);
  768. res->add_input(std::move(inp));
  769. return cur;
  770. }
  771. ggml_tensor * llm_graph_context::build_inp_mean() const {
  772. auto inp = std::make_unique<llm_graph_input_mean>(cparams);
  773. auto & cur = inp->mean;
  774. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
  775. ggml_set_input(cur);
  776. res->add_input(std::move(inp));
  777. return cur;
  778. }
  779. ggml_tensor * llm_graph_context::build_inp_cls() const {
  780. auto inp = std::make_unique<llm_graph_input_cls>(cparams);
  781. auto & cur = inp->cls;
  782. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  783. ggml_set_input(cur);
  784. res->add_input(std::move(inp));
  785. return cur;
  786. }
  787. ggml_tensor * llm_graph_context::build_inp_s_copy() const {
  788. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  789. auto inp = std::make_unique<llm_graph_input_s_copy>(kv_state);
  790. const auto n_kv = kv_state->get_n_kv();
  791. auto & cur = inp->s_copy;
  792. cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv);
  793. ggml_set_input(cur);
  794. res->add_input(std::move(inp));
  795. return cur;
  796. }
  797. ggml_tensor * llm_graph_context::build_inp_s_mask() const {
  798. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  799. auto inp = std::make_unique<llm_graph_input_s_mask>(kv_state);
  800. const auto n_kv = kv_state->get_n_kv();
  801. auto & cur = inp->s_mask;
  802. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv);
  803. ggml_set_input(cur);
  804. res->add_input(std::move(inp));
  805. return cur;
  806. }
  807. ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
  808. auto inp = std::make_unique<llm_graph_input_cross_embd>(cross);
  809. auto & cur = inp->cross_embd;
  810. // if we have the output embeddings from the encoder, use them directly
  811. // TODO: needs more work to be correct, for now just use the tensor shape
  812. //if (cross->t_embd) {
  813. // cur = ggml_view_tensor(ctx0, cross->t_embd);
  814. // return cur;
  815. //}
  816. const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd;
  817. const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
  818. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc);
  819. ggml_set_input(cur);
  820. res->add_input(std::move(inp));
  821. return cur;
  822. }
  823. ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const {
  824. auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams);
  825. auto & cur = inp->pos_bucket;
  826. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens);
  827. ggml_set_input(cur);
  828. res->add_input(std::move(inp));
  829. return cur;
  830. }
  831. ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const {
  832. const auto * kv_state = static_cast<const llama_kv_cache_unified_state *>(mstate);
  833. auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, kv_state);
  834. const auto n_kv = kv_state->get_n_kv();
  835. auto & cur = inp->pos_bucket;
  836. cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
  837. ggml_set_input(cur);
  838. res->add_input(std::move(inp));
  839. return cur;
  840. }
  841. ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const {
  842. ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]);
  843. cb(pos_bucket_1d, "pos_bucket_1d", -1);
  844. ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d);
  845. pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]);
  846. pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3);
  847. pos_bias = ggml_cont (ctx0, pos_bias);
  848. cb(pos_bias, "pos_bias", -1);
  849. return pos_bias;
  850. }
  851. ggml_tensor * llm_graph_context::build_attn_mha(
  852. ggml_cgraph * gf,
  853. ggml_tensor * q,
  854. ggml_tensor * k,
  855. ggml_tensor * v,
  856. ggml_tensor * kq_b,
  857. ggml_tensor * kq_mask,
  858. ggml_tensor * v_mla,
  859. float kq_scale) const {
  860. const bool v_trans = v->nb[1] > v->nb[2];
  861. q = ggml_permute(ctx0, q, 0, 2, 1, 3);
  862. k = ggml_permute(ctx0, k, 0, 2, 1, 3);
  863. v = ggml_permute(ctx0, v, 0, 2, 1, 3);
  864. const auto n_tokens = q->ne[1];
  865. const auto n_head = q->ne[2];
  866. const auto n_kv = k->ne[1];
  867. ggml_tensor * cur;
  868. // TODO: replace hardcoded padding with ggml-provided padding
  869. if (cparams.flash_attn && (n_kv % 256 == 0) && kq_b == nullptr) {
  870. GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet");
  871. if (v_trans) {
  872. v = ggml_transpose(ctx0, v);
  873. }
  874. // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn)
  875. if (k->type == GGML_TYPE_F32) {
  876. k = ggml_cast(ctx0, k, GGML_TYPE_F16);
  877. }
  878. if (v->type == GGML_TYPE_F32) {
  879. v = ggml_cast(ctx0, v, GGML_TYPE_F16);
  880. }
  881. cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
  882. hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
  883. ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
  884. if (v_mla) {
  885. #if 0
  886. // v_mla can be applied as a matrix-vector multiplication with broadcasting across dimension 3 == n_tokens.
  887. // However, the code is optimized for dimensions 0 and 1 being large, so this is ineffient.
  888. cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens);
  889. cur = ggml_mul_mat(ctx0, v_mla, cur);
  890. #else
  891. // It's preferable to do the calculation as a matrix-matrix multiplication with n_tokens in dimension 1.
  892. // The permutations are noops and only change how the tensor data is interpreted.
  893. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  894. cur = ggml_mul_mat(ctx0, v_mla, cur);
  895. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  896. cur = ggml_cont(ctx0, cur); // Needed because ggml_reshape_2d expects contiguous inputs.
  897. #endif
  898. }
  899. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  900. } else {
  901. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  902. // note: this op tends to require high floating point range
  903. // while for some models F16 is enough, for others it is not, so we default to F32 here
  904. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  905. if (arch == LLM_ARCH_GROK) {
  906. // need to do the following:
  907. // multiply by attn_output_multiplyer of 0.08838834764831845
  908. // and then :
  909. // kq = 30 * tanh(kq / 30)
  910. // before the softmax below
  911. kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, 0.08838834764831845f/30.0f));
  912. kq = ggml_scale(ctx0, kq, 30);
  913. }
  914. if (hparams.attn_soft_cap) {
  915. kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping);
  916. kq = ggml_tanh (ctx0, kq);
  917. kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
  918. }
  919. if (kq_b) {
  920. kq = ggml_add(ctx0, kq, kq_b);
  921. }
  922. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
  923. if (!v_trans) {
  924. // note: avoid this branch
  925. v = ggml_cont(ctx0, ggml_transpose(ctx0, v));
  926. }
  927. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  928. // for MLA with the absorption optimization, we need to "decompress" from MQA back to MHA
  929. if (v_mla) {
  930. kqv = ggml_mul_mat(ctx0, v_mla, kqv);
  931. }
  932. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  933. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  934. if (!cparams.offload_kqv) {
  935. // all nodes between the KV store and the attention output are run on the CPU
  936. ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu);
  937. }
  938. }
  939. ggml_build_forward_expand(gf, cur);
  940. return cur;
  941. }
  942. llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const {
  943. auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams);
  944. // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch
  945. inp->kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  946. //cb(inp_kq_mask, "KQ_mask", -1);
  947. ggml_set_input(inp->kq_mask);
  948. inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask;
  949. return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp));
  950. }
  951. ggml_tensor * llm_graph_context::build_attn(
  952. llm_graph_input_attn_no_cache * inp,
  953. ggml_cgraph * gf,
  954. ggml_tensor * wo,
  955. ggml_tensor * wo_b,
  956. ggml_tensor * q_cur,
  957. ggml_tensor * k_cur,
  958. ggml_tensor * v_cur,
  959. ggml_tensor * kq_b,
  960. ggml_tensor * v_mla,
  961. float kq_scale,
  962. int il) const {
  963. GGML_UNUSED(n_tokens);
  964. // these nodes are added to the graph together so that they are not reordered
  965. // by doing so, the number of splits in the graph is reduced
  966. ggml_build_forward_expand(gf, q_cur);
  967. ggml_build_forward_expand(gf, k_cur);
  968. ggml_build_forward_expand(gf, v_cur);
  969. const auto & kq_mask = inp->get_kq_mask();
  970. ggml_tensor * q = q_cur;
  971. ggml_tensor * k = k_cur;
  972. ggml_tensor * v = v_cur;
  973. ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale);
  974. cb(cur, "kqv_out", il);
  975. if (wo) {
  976. cur = build_lora_mm(wo, cur);
  977. }
  978. if (wo_b) {
  979. //cb(cur, "kqv_wo", il);
  980. }
  981. if (wo_b) {
  982. cur = ggml_add(ctx0, cur, wo_b);
  983. }
  984. return cur;
  985. }
  986. llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const {
  987. const auto * kv_state = static_cast<const llama_kv_cache_unified_state *>(mstate);
  988. auto inp = std::make_unique<llm_graph_input_attn_kv_unified>(hparams, cparams, kv_state);
  989. {
  990. GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified_iswa for SWA");
  991. const auto n_kv = kv_state->get_n_kv();
  992. inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  993. //cb(inp->self_kq_mask, "KQ_mask", -1);
  994. ggml_set_input(inp->self_kq_mask);
  995. inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
  996. }
  997. return (llm_graph_input_attn_kv_unified *) res->add_input(std::move(inp));
  998. }
  999. ggml_tensor * llm_graph_context::build_attn(
  1000. llm_graph_input_attn_kv_unified * inp,
  1001. ggml_cgraph * gf,
  1002. ggml_tensor * wo,
  1003. ggml_tensor * wo_b,
  1004. ggml_tensor * q_cur,
  1005. ggml_tensor * k_cur,
  1006. ggml_tensor * v_cur,
  1007. ggml_tensor * kq_b,
  1008. ggml_tensor * v_mla,
  1009. float kq_scale,
  1010. int il) const {
  1011. // these nodes are added to the graph together so that they are not reordered
  1012. // by doing so, the number of splits in the graph is reduced
  1013. ggml_build_forward_expand(gf, q_cur);
  1014. ggml_build_forward_expand(gf, k_cur);
  1015. ggml_build_forward_expand(gf, v_cur);
  1016. const auto * kv_state = static_cast<const llama_kv_cache_unified_state *>(mstate);
  1017. // store to KV cache
  1018. {
  1019. ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il));
  1020. ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il));
  1021. }
  1022. const auto & kq_mask = inp->get_kq_mask();
  1023. ggml_tensor * q = q_cur;
  1024. ggml_tensor * k = kv_state->get_k(ctx0, il);
  1025. ggml_tensor * v = kv_state->get_v(ctx0, il);
  1026. ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale);
  1027. cb(cur, "kqv_out", il);
  1028. if (wo) {
  1029. cur = build_lora_mm(wo, cur);
  1030. if (arch == LLM_ARCH_GLM4) {
  1031. // GLM4 seems to have numerical issues with half-precision accumulators
  1032. ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
  1033. }
  1034. }
  1035. if (wo_b) {
  1036. cur = ggml_add(ctx0, cur, wo_b);
  1037. }
  1038. return cur;
  1039. }
  1040. llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const {
  1041. const auto * kv_state = static_cast<const llama_kv_cache_unified_iswa_state *>(mstate);
  1042. auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, kv_state);
  1043. {
  1044. const auto n_kv = kv_state->get_base()->get_n_kv();
  1045. inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  1046. //cb(inp->self_kq_mask, "KQ_mask", -1);
  1047. ggml_set_input(inp->self_kq_mask);
  1048. inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
  1049. }
  1050. {
  1051. GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA");
  1052. const auto n_kv = kv_state->get_swa()->get_n_kv();
  1053. inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  1054. //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1);
  1055. ggml_set_input(inp->self_kq_mask_swa);
  1056. inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
  1057. }
  1058. return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp));
  1059. }
  1060. ggml_tensor * llm_graph_context::build_attn(
  1061. llm_graph_input_attn_kv_unified_iswa * inp,
  1062. ggml_cgraph * gf,
  1063. ggml_tensor * wo,
  1064. ggml_tensor * wo_b,
  1065. ggml_tensor * q_cur,
  1066. ggml_tensor * k_cur,
  1067. ggml_tensor * v_cur,
  1068. ggml_tensor * kq_b,
  1069. ggml_tensor * v_mla,
  1070. float kq_scale,
  1071. int il) const {
  1072. // these nodes are added to the graph together so that they are not reordered
  1073. // by doing so, the number of splits in the graph is reduced
  1074. ggml_build_forward_expand(gf, q_cur);
  1075. ggml_build_forward_expand(gf, k_cur);
  1076. ggml_build_forward_expand(gf, v_cur);
  1077. const auto * kv_state_iswa = static_cast<const llama_kv_cache_unified_iswa_state *>(mstate);
  1078. const bool is_swa = hparams.is_swa(il);
  1079. const auto * kv_state = is_swa ? kv_state_iswa->get_swa() : kv_state_iswa->get_base();
  1080. // store to KV cache
  1081. {
  1082. ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il));
  1083. ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il));
  1084. }
  1085. const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
  1086. ggml_tensor * q = q_cur;
  1087. ggml_tensor * k = kv_state->get_k(ctx0, il);
  1088. ggml_tensor * v = kv_state->get_v(ctx0, il);
  1089. ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale);
  1090. cb(cur, "kqv_out", il);
  1091. if (wo) {
  1092. cur = build_lora_mm(wo, cur);
  1093. }
  1094. if (wo_b) {
  1095. //cb(cur, "kqv_wo", il);
  1096. }
  1097. if (wo_b) {
  1098. cur = ggml_add(ctx0, cur, wo_b);
  1099. }
  1100. return cur;
  1101. }
  1102. llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const {
  1103. auto inp = std::make_unique<llm_graph_input_attn_cross>(cross);
  1104. const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
  1105. inp->cross_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  1106. ggml_set_input(inp->cross_kq_mask);
  1107. inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask;
  1108. return (llm_graph_input_attn_cross *) res->add_input(std::move(inp));
  1109. }
  1110. ggml_tensor * llm_graph_context::build_attn(
  1111. llm_graph_input_attn_cross * inp,
  1112. ggml_cgraph * gf,
  1113. ggml_tensor * wo,
  1114. ggml_tensor * wo_b,
  1115. ggml_tensor * q_cur,
  1116. ggml_tensor * k_cur,
  1117. ggml_tensor * v_cur,
  1118. ggml_tensor * kq_b,
  1119. ggml_tensor * v_mla,
  1120. float kq_scale,
  1121. int il) const {
  1122. // these nodes are added to the graph together so that they are not reordered
  1123. // by doing so, the number of splits in the graph is reduced
  1124. ggml_build_forward_expand(gf, q_cur);
  1125. ggml_build_forward_expand(gf, k_cur);
  1126. ggml_build_forward_expand(gf, v_cur);
  1127. const auto & kq_mask = inp->get_kq_mask_cross();
  1128. ggml_tensor * q = q_cur;
  1129. ggml_tensor * k = k_cur;
  1130. ggml_tensor * v = v_cur;
  1131. ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale);
  1132. cb(cur, "kqv_out", il);
  1133. if (wo) {
  1134. cur = build_lora_mm(wo, cur);
  1135. }
  1136. if (wo_b) {
  1137. //cb(cur, "kqv_wo", il);
  1138. }
  1139. if (wo_b) {
  1140. cur = ggml_add(ctx0, cur, wo_b);
  1141. }
  1142. return cur;
  1143. }
  1144. ggml_tensor * llm_graph_context::build_copy_mask_state(
  1145. ggml_cgraph * gf,
  1146. ggml_tensor * s,
  1147. ggml_tensor * state_copy,
  1148. ggml_tensor * state_mask,
  1149. int32_t n_state,
  1150. int32_t n_seqs) const {
  1151. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  1152. const auto n_kv = kv_state->get_n_kv();
  1153. const auto kv_head = kv_state->get_head();
  1154. ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_state->get_size());
  1155. // copy states
  1156. // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv
  1157. // this shrinks the tensors's ne[1] to n_kv
  1158. states = ggml_get_rows(ctx0, states, state_copy);
  1159. // clear states of sequences which are starting at the beginning of this batch
  1160. // FIXME: zero-out NANs?
  1161. states = ggml_mul(ctx0, states, state_mask);
  1162. // copy states which won't be changed further (between n_seqs and n_kv)
  1163. ggml_build_forward_expand(gf,
  1164. ggml_cpy(ctx0,
  1165. ggml_view_1d(ctx0, states, n_state*(n_kv - n_seqs), (n_seqs )*n_state*ggml_element_size(states)),
  1166. ggml_view_1d(ctx0, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s))));
  1167. // the part of the states that will be used and modified
  1168. return ggml_view_2d(ctx0, states, n_state, n_seqs, states->nb[1], 0);
  1169. }
  1170. ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
  1171. ggml_cgraph * gf,
  1172. ggml_tensor * state_copy,
  1173. ggml_tensor * state_mask,
  1174. const llama_ubatch & ubatch,
  1175. int il) const {
  1176. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  1177. const auto token_shift_count = hparams.token_shift_count;
  1178. const int64_t n_seqs = ubatch.n_seqs;
  1179. ggml_tensor * token_shift_all = kv_state->get_k_l(il);
  1180. ggml_tensor * token_shift = build_copy_mask_state(
  1181. gf, token_shift_all, state_copy, state_mask,
  1182. hparams.n_embd_k_s(), n_seqs);
  1183. token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
  1184. return token_shift;
  1185. }
  1186. ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
  1187. ggml_tensor * token_shift,
  1188. const llama_ubatch & ubatch,
  1189. int il) const {
  1190. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  1191. const auto token_shift_count = hparams.token_shift_count;
  1192. const auto n_embd = hparams.n_embd;
  1193. const int64_t n_seqs = ubatch.n_seqs;
  1194. const auto kv_head = kv_state->get_head();
  1195. return ggml_cpy(
  1196. ctx0,
  1197. ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
  1198. ggml_view_1d(ctx0, kv_state->get_k_l(il), hparams.n_embd_k_s()*n_seqs, hparams.n_embd_k_s()*kv_head*ggml_element_size(kv_state->get_k_l(il)))
  1199. );
  1200. }
  1201. void llm_graph_context::build_pooling(
  1202. ggml_cgraph * gf,
  1203. ggml_tensor * cls,
  1204. ggml_tensor * cls_b,
  1205. ggml_tensor * cls_out,
  1206. ggml_tensor * cls_out_b) const {
  1207. if (!cparams.embeddings) {
  1208. return;
  1209. }
  1210. ggml_tensor * inp = res->t_embd;
  1211. //// find result_norm tensor for input
  1212. //for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
  1213. // inp = ggml_graph_node(gf, i);
  1214. // if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
  1215. // break;
  1216. // }
  1217. // inp = nullptr;
  1218. //}
  1219. GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
  1220. ggml_tensor * cur;
  1221. switch (pooling_type) {
  1222. case LLAMA_POOLING_TYPE_NONE:
  1223. {
  1224. cur = inp;
  1225. } break;
  1226. case LLAMA_POOLING_TYPE_MEAN:
  1227. {
  1228. ggml_tensor * inp_mean = build_inp_mean();
  1229. cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean);
  1230. } break;
  1231. case LLAMA_POOLING_TYPE_CLS:
  1232. case LLAMA_POOLING_TYPE_LAST:
  1233. {
  1234. ggml_tensor * inp_cls = build_inp_cls();
  1235. cur = ggml_get_rows(ctx0, inp, inp_cls);
  1236. } break;
  1237. case LLAMA_POOLING_TYPE_RANK:
  1238. {
  1239. ggml_tensor * inp_cls = build_inp_cls();
  1240. inp = ggml_get_rows(ctx0, inp, inp_cls);
  1241. if (cls != nullptr && cls_b != nullptr) {
  1242. // classification head
  1243. // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566
  1244. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b);
  1245. cur = ggml_tanh(ctx0, cur);
  1246. // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
  1247. // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896
  1248. if (cls_out) {
  1249. GGML_ASSERT(cls_out_b != nullptr);
  1250. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b);
  1251. }
  1252. } else if (cls_out) {
  1253. // Single layer classification head (direct projection)
  1254. // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476
  1255. GGML_ASSERT(cls_out_b != nullptr);
  1256. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls_out, inp), cls_out_b);
  1257. } else {
  1258. GGML_ABORT("RANK pooling requires either cls+cls_b or cls_out+cls_out_b");
  1259. }
  1260. } break;
  1261. default:
  1262. {
  1263. GGML_ABORT("unknown pooling type");
  1264. }
  1265. }
  1266. cb(cur, "result_embd_pooled", -1);
  1267. res->t_embd_pooled = cur;
  1268. ggml_build_forward_expand(gf, cur);
  1269. }
  1270. int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
  1271. // TODO move to hparams if a T5 variant appears that uses a different value
  1272. const int64_t max_distance = 128;
  1273. if (bidirectional) {
  1274. n_buckets >>= 1;
  1275. }
  1276. const int64_t max_exact = n_buckets >> 1;
  1277. int32_t relative_position = x - y;
  1278. int32_t relative_bucket = 0;
  1279. if (bidirectional) {
  1280. relative_bucket += (relative_position > 0) * n_buckets;
  1281. relative_position = abs(relative_position);
  1282. } else {
  1283. relative_position = -std::min<int32_t>(relative_position, 0);
  1284. }
  1285. int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
  1286. relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1);
  1287. relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
  1288. return relative_bucket;
  1289. }