llama-hparams.cpp 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #include "llama-hparams.h"
  2. #include "ggml.h"
  3. void llama_hparams::set_swa_pattern(uint32_t n_pattern, bool dense_first) {
  4. if (dense_first) {
  5. for (uint32_t il = 0; il < n_layer; ++il) {
  6. swa_layers[il] = n_pattern == 0 || (il % n_pattern != 0);
  7. }
  8. } else {
  9. for (uint32_t il = 0; il < n_layer; ++il) {
  10. swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));
  11. }
  12. }
  13. }
  14. bool llama_hparams::is_swa_any() const {
  15. for (uint32_t il = 0; il < n_layer; ++il) {
  16. if (swa_layers[il]) {
  17. return true;
  18. }
  19. }
  20. return false;
  21. }
  22. uint32_t llama_hparams::n_head(uint32_t il) const {
  23. if (il < n_layer) {
  24. return n_head_arr[il];
  25. }
  26. GGML_ABORT("fatal error");
  27. }
  28. uint32_t llama_hparams::n_head_kv(uint32_t il) const {
  29. if (il < n_layer) {
  30. return n_head_kv_arr[il];
  31. }
  32. GGML_ABORT("fatal error");
  33. }
  34. uint32_t llama_hparams::n_ff(uint32_t il) const {
  35. if (il < n_layer) {
  36. return n_ff_arr[il];
  37. }
  38. GGML_ABORT("fatal error");
  39. }
  40. uint32_t llama_hparams::n_gqa(uint32_t il) const {
  41. const uint32_t n_head = this->n_head(il);
  42. const uint32_t n_head_kv = this->n_head_kv(il);
  43. if (n_head_kv == 0) {
  44. return 0;
  45. }
  46. return n_head/n_head_kv;
  47. }
  48. uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
  49. const uint32_t n_head_kv = this->n_head_kv(il);
  50. return n_embd_head_k * n_head_kv;
  51. }
  52. uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
  53. const uint32_t n_head_kv = this->n_head_kv(il);
  54. return n_embd_head_v * n_head_kv;
  55. }
  56. bool llama_hparams::is_n_embd_k_gqa_variable() const {
  57. const uint32_t val = n_embd_k_gqa();
  58. for (uint32_t il = 0; il < n_layer; ++il) {
  59. if (val != n_embd_k_gqa(il)) {
  60. return true;
  61. }
  62. }
  63. return false;
  64. }
  65. bool llama_hparams::is_n_embd_v_gqa_variable() const {
  66. const uint32_t val = n_embd_v_gqa();
  67. for (uint32_t il = 0; il < n_layer; ++il) {
  68. if (val != n_embd_v_gqa(il)) {
  69. return true;
  70. }
  71. }
  72. return false;
  73. }
  74. uint32_t llama_hparams::n_embd_k_gqa_max() const {
  75. uint32_t val = n_embd_k_gqa();
  76. for (uint32_t il = 0; il < n_layer; ++il) {
  77. val = std::max(val, n_embd_k_gqa(il));
  78. }
  79. return val;
  80. }
  81. uint32_t llama_hparams::n_embd_v_gqa_max() const {
  82. uint32_t val = n_embd_v_gqa();
  83. for (uint32_t il = 0; il < n_layer; ++il) {
  84. val = std::max(val, n_embd_v_gqa(il));
  85. }
  86. return val;
  87. }
  88. uint32_t llama_hparams::n_embd_r() const {
  89. if (wkv_head_size != 0) {
  90. // for RWKV models
  91. return token_shift_count * n_embd;
  92. }
  93. if (n_shortconv_l_cache != 0) {
  94. // for LFM2 models
  95. return n_embd * (n_shortconv_l_cache - 1);
  96. }
  97. // TODO: maybe support other convolution strides than 1
  98. // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
  99. // Corresponds to Mamba's conv_states size
  100. return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state);
  101. }
  102. uint32_t llama_hparams::n_embd_s() const {
  103. if (wkv_head_size != 0) {
  104. // corresponds to RWKV's wkv_states size
  105. return n_embd * wkv_head_size;
  106. }
  107. // corresponds to Mamba's ssm_states size
  108. return ssm_d_state * ssm_d_inner;
  109. }
  110. bool llama_hparams::is_recurrent(uint32_t il) const {
  111. return recurrent_layer_arr[il];
  112. }
  113. uint32_t llama_hparams::n_pos_per_embd() const {
  114. return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
  115. }
  116. bool llama_hparams::is_swa(uint32_t il) const {
  117. if (il < n_layer) {
  118. return swa_layers[il];
  119. }
  120. GGML_ABORT("fatal error");
  121. }