minicpm3.cpp 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. #include "models.h"
  2. llm_build_minicpm3::llm_build_minicpm3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
  3. //TODO: if the model varies, these parameters need to be read from the model
  4. const int64_t n_embd_base = 256;
  5. const float scale_embd = 12.0f;
  6. const float scale_depth = 1.4f;
  7. const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k));
  8. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  9. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  10. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  11. ggml_tensor * cur;
  12. ggml_tensor * inpL;
  13. inpL = build_inp_embd(model.tok_embd);
  14. // scale the input embeddings
  15. inpL = ggml_scale(ctx0, inpL, scale_embd);
  16. cb(inpL, "inp_scaled", -1);
  17. // inp_pos - contains the positions
  18. ggml_tensor * inp_pos = build_inp_pos();
  19. auto * inp_attn = build_attn_inp_kv();
  20. ggml_tensor * inp_out_ids = build_inp_out_ids();
  21. for (int il = 0; il < n_layer; ++il) {
  22. ggml_tensor * inpSA = inpL;
  23. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  24. // norm
  25. cur = build_norm(inpL,
  26. model.layers[il].attn_norm, NULL,
  27. LLM_NORM_RMS, il);
  28. cb(cur, "attn_norm", il);
  29. // self_attention
  30. {
  31. ggml_tensor * q = NULL;
  32. // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
  33. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  34. cb(q, "q", il);
  35. q = build_norm(q,
  36. model.layers[il].attn_q_a_norm, NULL,
  37. LLM_NORM_RMS, il);
  38. cb(q, "q", il);
  39. // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
  40. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  41. cb(q, "q", il);
  42. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  43. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  44. ggml_row_size(q->type, hparams.n_embd_head_k),
  45. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  46. 0);
  47. cb(q_nope, "q_nope", il);
  48. // and {n_head * n_embd_head_qk_rope, n_tokens}
  49. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  50. ggml_row_size(q->type, hparams.n_embd_head_k),
  51. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  52. ggml_row_size(q->type, n_embd_head_qk_nope));
  53. cb(q_pe, "q_pe", il);
  54. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  55. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  56. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  57. // split into {kv_lora_rank, n_tokens}
  58. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  59. kv_pe_compresseed->nb[1],
  60. 0);
  61. cb(kv_compressed, "kv_compressed", il);
  62. // and {n_embd_head_qk_rope, n_tokens}
  63. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  64. kv_pe_compresseed->nb[1],
  65. kv_pe_compresseed->nb[1],
  66. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  67. cb(k_pe, "k_pe", il);
  68. kv_compressed = build_norm(kv_compressed,
  69. model.layers[il].attn_kv_a_norm, NULL,
  70. LLM_NORM_RMS, il);
  71. cb(kv_compressed, "kv_compressed", il);
  72. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  73. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  74. cb(kv, "kv", il);
  75. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  76. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  77. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  78. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  79. 0);
  80. cb(k_nope, "k_nope", il);
  81. // and {n_head * n_embd_head_v, n_tokens}
  82. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  83. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  84. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  85. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  86. cb(v_states, "v_states", il);
  87. v_states = ggml_cont(ctx0, v_states);
  88. cb(v_states, "v_states", il);
  89. q_pe = ggml_rope_ext(
  90. ctx0, q_pe, inp_pos, rope_factors,
  91. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  92. ext_factor, attn_factor, beta_fast, beta_slow
  93. );
  94. cb(q_pe, "q_pe", il);
  95. // shared RoPE key
  96. k_pe = ggml_rope_ext(
  97. ctx0, k_pe, inp_pos, rope_factors,
  98. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  99. ext_factor, attn_factor, beta_fast, beta_slow
  100. );
  101. cb(k_pe, "k_pe", il);
  102. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  103. cb(q_states, "q_states", il);
  104. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  105. cb(k_states, "k_states", il);
  106. cur = build_attn(inp_attn,
  107. model.layers[il].wo, NULL,
  108. q_states, k_states, v_states, nullptr, nullptr, nullptr, kq_scale, il);
  109. }
  110. if (il == n_layer - 1 && inp_out_ids) {
  111. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  112. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  113. }
  114. // scale_res - scale the hidden states for residual connection
  115. const float scale_res = scale_depth/sqrtf(float(n_layer)); // TODO: is this correct?
  116. cur = ggml_scale(ctx0, cur, scale_res);
  117. cb(cur, "hidden_scaled", il);
  118. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  119. cb(ffn_inp, "ffn_inp", il);
  120. // feed-forward network
  121. {
  122. cur = build_norm(ffn_inp,
  123. model.layers[il].ffn_norm, NULL,
  124. LLM_NORM_RMS, il);
  125. cb(cur, "ffn_norm", il);
  126. cur = build_ffn(cur,
  127. model.layers[il].ffn_up, NULL, NULL,
  128. model.layers[il].ffn_gate, NULL, NULL,
  129. model.layers[il].ffn_down, NULL, NULL,
  130. NULL,
  131. LLM_FFN_SILU, LLM_FFN_PAR, il);
  132. cb(cur, "ffn_out", il);
  133. }
  134. // scale the hidden states for residual connection
  135. cur = ggml_scale(ctx0, cur, scale_res);
  136. cb(cur, "hidden_scaled_ffn", il);
  137. cur = ggml_add(ctx0, cur, ffn_inp);
  138. cur = build_cvec(cur, il);
  139. cb(cur, "l_out", il);
  140. // input for next layer
  141. inpL = cur;
  142. }
  143. cur = inpL;
  144. cur = build_norm(cur,
  145. model.output_norm, NULL,
  146. LLM_NORM_RMS, -1);
  147. cb(cur, "result_norm", -1);
  148. res->t_embd = cur;
  149. // lm_head scaling
  150. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  151. cur = ggml_scale(ctx0, cur, scale_lmhead);
  152. cb(cur, "lmhead_scaling", -1);
  153. // lm_head
  154. cur = build_lora_mm(model.output, cur);
  155. cb(cur, "result_output", -1);
  156. res->t_logits = cur;
  157. ggml_build_forward_expand(gf, cur);
  158. }