llama-hparams.cpp 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. #include "llama-hparams.h"
  2. #include "ggml.h"
  3. uint32_t llama_hparams::n_head(uint32_t il) const {
  4. if (il < n_layer) {
  5. return n_head_arr[il];
  6. }
  7. GGML_ABORT("fatal error");
  8. }
  9. uint32_t llama_hparams::n_head_kv(uint32_t il) const {
  10. if (il < n_layer) {
  11. return n_head_kv_arr[il];
  12. }
  13. GGML_ABORT("fatal error");
  14. }
  15. uint32_t llama_hparams::n_ff(uint32_t il) const {
  16. if (il < n_layer) {
  17. return n_ff_arr[il];
  18. }
  19. GGML_ABORT("fatal error");
  20. }
  21. uint32_t llama_hparams::n_gqa(uint32_t il) const {
  22. const uint32_t n_head = this->n_head(il);
  23. const uint32_t n_head_kv = this->n_head_kv(il);
  24. if (n_head_kv == 0) {
  25. return 0;
  26. }
  27. return n_head/n_head_kv;
  28. }
  29. uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
  30. const uint32_t n_head_kv = this->n_head_kv(il);
  31. return n_embd_head_k * n_head_kv;
  32. }
  33. uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
  34. const uint32_t n_head_kv = this->n_head_kv(il);
  35. return n_embd_head_v * n_head_kv;
  36. }
  37. uint32_t llama_hparams::n_embd_k_s() const {
  38. if (wkv_head_size != 0) {
  39. // for RWKV models
  40. return token_shift_count * n_embd;
  41. }
  42. // TODO: maybe support other convolution strides than 1
  43. // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
  44. return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
  45. }
  46. uint32_t llama_hparams::n_embd_v_s() const {
  47. if (wkv_head_size != 0) {
  48. // corresponds to RWKV's wkv_states size
  49. return n_embd * wkv_head_size;
  50. }
  51. // corresponds to Mamba's ssm_states size
  52. return ssm_d_state * ssm_d_inner;
  53. }