llm_build_minicpm3.cpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #include "../llama-model.h"
  2. #include "../llama-graph.h"
  3. #include "llm_build_minicpm3.h"
  4. #include <cmath>
  5. llm_build_minicpm3::llm_build_minicpm3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
  6. //TODO: if the model varies, these parameters need to be read from the model
  7. const int64_t n_embd_base = 256;
  8. const float scale_embd = 12.0f;
  9. const float scale_depth = 1.4f;
  10. const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k));
  11. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  12. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  13. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  14. ggml_tensor * cur;
  15. ggml_tensor * inpL;
  16. inpL = build_inp_embd(model.tok_embd);
  17. // scale the input embeddings
  18. inpL = ggml_scale(ctx0, inpL, scale_embd);
  19. cb(inpL, "inp_scaled", -1);
  20. // inp_pos - contains the positions
  21. ggml_tensor * inp_pos = build_inp_pos();
  22. auto * inp_attn = build_attn_inp_kv();
  23. ggml_tensor * inp_out_ids = build_inp_out_ids();
  24. for (int il = 0; il < n_layer; ++il) {
  25. ggml_tensor * inpSA = inpL;
  26. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  27. // norm
  28. cur = build_norm(inpL,
  29. model.layers[il].attn_norm, NULL,
  30. LLM_NORM_RMS, il);
  31. cb(cur, "attn_norm", il);
  32. // self_attention
  33. {
  34. ggml_tensor * q = NULL;
  35. // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
  36. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  37. cb(q, "q", il);
  38. q = build_norm(q,
  39. model.layers[il].attn_q_a_norm, NULL,
  40. LLM_NORM_RMS, il);
  41. cb(q, "q", il);
  42. // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
  43. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  44. cb(q, "q", il);
  45. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  46. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  47. ggml_row_size(q->type, hparams.n_embd_head_k),
  48. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  49. 0);
  50. cb(q_nope, "q_nope", il);
  51. // and {n_head * n_embd_head_qk_rope, n_tokens}
  52. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  53. ggml_row_size(q->type, hparams.n_embd_head_k),
  54. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  55. ggml_row_size(q->type, n_embd_head_qk_nope));
  56. cb(q_pe, "q_pe", il);
  57. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  58. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  59. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  60. // split into {kv_lora_rank, n_tokens}
  61. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  62. kv_pe_compresseed->nb[1],
  63. 0);
  64. cb(kv_compressed, "kv_compressed", il);
  65. // and {n_embd_head_qk_rope, n_tokens}
  66. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  67. kv_pe_compresseed->nb[1],
  68. kv_pe_compresseed->nb[1],
  69. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  70. cb(k_pe, "k_pe", il);
  71. kv_compressed = build_norm(kv_compressed,
  72. model.layers[il].attn_kv_a_norm, NULL,
  73. LLM_NORM_RMS, il);
  74. cb(kv_compressed, "kv_compressed", il);
  75. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  76. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  77. cb(kv, "kv", il);
  78. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  79. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  80. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  81. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  82. 0);
  83. cb(k_nope, "k_nope", il);
  84. // and {n_head * n_embd_head_v, n_tokens}
  85. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  86. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  87. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  88. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  89. cb(v_states, "v_states", il);
  90. v_states = ggml_cont(ctx0, v_states);
  91. cb(v_states, "v_states", il);
  92. q_pe = ggml_rope_ext(
  93. ctx0, q_pe, inp_pos, rope_factors,
  94. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  95. ext_factor, attn_factor, beta_fast, beta_slow
  96. );
  97. cb(q_pe, "q_pe", il);
  98. // shared RoPE key
  99. k_pe = ggml_rope_ext(
  100. ctx0, k_pe, inp_pos, rope_factors,
  101. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  102. ext_factor, attn_factor, beta_fast, beta_slow
  103. );
  104. cb(k_pe, "k_pe", il);
  105. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  106. cb(q_states, "q_states", il);
  107. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  108. cb(k_states, "k_states", il);
  109. cur = build_attn(inp_attn,
  110. model.layers[il].wo, NULL,
  111. q_states, k_states, v_states, nullptr, nullptr, nullptr, kq_scale, il);
  112. }
  113. ;
  114. if (il == n_layer - 1 && inp_out_ids) {
  115. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  116. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  117. }
  118. ;
  119. // scale_res - scale the hidden states for residual connection
  120. const float scale_res = scale_depth/sqrtf(float(n_layer)); // TODO: is this correct?
  121. cur = ggml_scale(ctx0, cur, scale_res);
  122. cb(cur, "hidden_scaled", il);
  123. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  124. cb(ffn_inp, "ffn_inp", il);
  125. // feed-forward network
  126. {
  127. cur = build_norm(ffn_inp,
  128. model.layers[il].ffn_norm, NULL,
  129. LLM_NORM_RMS, il);
  130. cb(cur, "ffn_norm", il);
  131. cur = build_ffn(cur,
  132. model.layers[il].ffn_up, NULL, NULL,
  133. model.layers[il].ffn_gate, NULL, NULL,
  134. model.layers[il].ffn_down, NULL, NULL,
  135. NULL,
  136. LLM_FFN_SILU, LLM_FFN_PAR, il);
  137. cb(cur, "ffn_out", il);
  138. }
  139. ;
  140. // scale the hidden states for residual connection
  141. cur = ggml_scale(ctx0, cur, scale_res);
  142. cb(cur, "hidden_scaled_ffn", il);
  143. cur = ggml_add(ctx0, cur, ffn_inp);
  144. cur = build_cvec(cur, il);
  145. cb(cur, "l_out", il);
  146. // input for next layer
  147. inpL = cur;
  148. }
  149. ;
  150. cur = inpL;
  151. cur = build_norm(cur,
  152. model.output_norm, NULL,
  153. LLM_NORM_RMS, -1);
  154. cb(cur, "result_norm", -1);
  155. res->t_embd = cur;
  156. // lm_head scaling
  157. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  158. cur = ggml_scale(ctx0, cur, scale_lmhead);
  159. cb(cur, "lmhead_scaling", -1);
  160. // lm_head
  161. cur = build_lora_mm(model.output, cur);
  162. cb(cur, "result_output", -1);
  163. res->t_logits = cur;
  164. ggml_build_forward_expand(gf, cur);
  165. }