llama-hparams.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #pragma once
  2. #include "llama.h"
  3. #include <array>
  4. // bump if necessary
  5. #define LLAMA_MAX_LAYERS 512
  6. #define LLAMA_MAX_EXPERTS 384 // Kimi-K2
  7. enum llama_expert_gating_func_type {
  8. LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0,
  9. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
  10. LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
  11. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT = 3, // applied to the router weights instead of the logits
  12. };
  13. enum llama_swa_type {
  14. LLAMA_SWA_TYPE_NONE = 0,
  15. LLAMA_SWA_TYPE_STANDARD = 1,
  16. LLAMA_SWA_TYPE_CHUNKED = 2,
  17. LLAMA_SWA_TYPE_SYMMETRIC = 3,
  18. };
  19. struct llama_hparams_posnet {
  20. uint32_t n_embd;
  21. uint32_t n_layer;
  22. };
  23. struct llama_hparams_convnext {
  24. uint32_t n_embd;
  25. uint32_t n_layer;
  26. };
  27. struct llama_hparams {
  28. bool vocab_only;
  29. bool rope_finetuned;
  30. bool use_par_res;
  31. bool swin_norm;
  32. uint32_t n_ctx_train; // context size the model was trained on
  33. uint32_t n_embd;
  34. uint32_t n_embd_features = 0;
  35. uint32_t n_layer;
  36. int32_t n_layer_kv_from_start = -1; // if non-negative, the first n_layer_kv_from_start layers have KV cache
  37. uint32_t n_rot;
  38. uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
  39. uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
  40. uint32_t n_expert = 0;
  41. uint32_t n_expert_used = 0;
  42. uint32_t n_rel_attn_bkts = 0;
  43. // note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
  44. uint32_t n_embd_head_k_mla = 0;
  45. uint32_t n_embd_head_v_mla = 0;
  46. // for WavTokenizer
  47. struct llama_hparams_posnet posnet;
  48. struct llama_hparams_convnext convnext;
  49. uint32_t n_shortconv_l_cache = 0;
  50. std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
  51. std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
  52. std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
  53. uint32_t n_layer_dense_lead = 0;
  54. uint32_t n_lora_q = 0;
  55. uint32_t n_lora_kv = 0;
  56. uint32_t n_ff_exp = 0;
  57. uint32_t n_ff_shexp = 0;
  58. uint32_t n_expert_shared = 0;
  59. uint32_t n_norm_groups = 0;
  60. float expert_weights_scale = 0.0;
  61. bool expert_weights_norm = false;
  62. uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
  63. uint32_t moe_every_n_layers = 0;
  64. uint32_t nextn_predict_layers = 0;
  65. float f_norm_eps;
  66. float f_norm_rms_eps;
  67. float f_norm_group_eps;
  68. float f_attn_logit_softcapping = 50.0f;
  69. float f_router_logit_softcapping = 30.0f;
  70. float f_final_logit_softcapping = 30.0f;
  71. // for RWKV
  72. uint32_t rescale_every_n_layers = 0;
  73. uint32_t time_mix_extra_dim = 0;
  74. uint32_t time_decay_extra_dim = 0;
  75. uint32_t wkv_head_size = 0;
  76. uint32_t token_shift_count = 2;
  77. uint32_t n_lora_decay = 0;
  78. uint32_t n_lora_iclr = 0;
  79. uint32_t n_lora_value_res_mix = 0;
  80. uint32_t n_lora_gate = 0;
  81. float rope_attn_factor = 1.0f;
  82. float rope_freq_base_train;
  83. float rope_freq_base_train_swa;
  84. float rope_freq_scale_train;
  85. float rope_freq_scale_train_swa;
  86. uint32_t n_ctx_orig_yarn;
  87. float rope_yarn_log_mul = 0.0f;
  88. float yarn_ext_factor = -1.0f;
  89. float yarn_attn_factor = 1.0f;
  90. float yarn_beta_fast = 32.0f;
  91. float yarn_beta_slow = 1.0f;
  92. std::array<int, 4> rope_sections;
  93. // Sliding Window Attention (SWA)
  94. llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
  95. // the size of the sliding window (0 - no SWA)
  96. uint32_t n_swa = 0;
  97. // if swa_layers[il] == true, then layer il is SWA
  98. // if swa_layers[il] == false, then layer il is dense (i.e. non-SWA)
  99. // by default, all layers are dense
  100. std::array<bool, LLAMA_MAX_LAYERS> swa_layers;
  101. // for State Space Models
  102. uint32_t ssm_d_conv = 0;
  103. uint32_t ssm_d_inner = 0;
  104. uint32_t ssm_d_state = 0;
  105. uint32_t ssm_dt_rank = 0;
  106. uint32_t ssm_n_group = 0;
  107. // for hybrid state space models
  108. std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;
  109. bool ssm_dt_b_c_rms = false;
  110. float f_clamp_kqv = 0.0f;
  111. float f_max_alibi_bias = 0.0f;
  112. float f_logit_scale = 0.0f;
  113. // Additional scale factors (Granite/Granite MoE)
  114. float f_residual_scale = 0.0f;
  115. float f_embedding_scale = 0.0f;
  116. float f_attention_scale = 0.0f;
  117. // grok-2
  118. float f_attn_out_scale = 0.0f;
  119. uint32_t attn_temp_length = 0;
  120. bool causal_attn = true;
  121. bool use_alibi = false;
  122. bool attn_soft_cap = false;
  123. bool use_kq_norm = false;
  124. // for Classifiers
  125. uint32_t n_cls_out = 1;
  126. // llama4 smallthinker
  127. uint32_t n_moe_layer_step = 0;
  128. uint32_t n_no_rope_layer_step = 4;
  129. uint32_t n_attn_temp_floor_scale = 8192;
  130. float f_attn_temp_scale = 0.1;
  131. // gemma3n altup
  132. uint32_t n_altup = 4; // altup_num_inputs
  133. uint32_t i_altup_act = 0; // altup_active_idx
  134. uint32_t laurel_rank = 64;
  135. uint32_t n_embd_altup = 256;
  136. // needed by encoder-decoder models (e.g. T5, FLAN-T5)
  137. // ref: https://github.com/ggerganov/llama.cpp/pull/8141
  138. llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
  139. uint32_t dec_n_layer = 0;
  140. enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
  141. enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
  142. enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
  143. // this value n_pattern means that every nth layer is dense (i.e. non-SWA)
  144. // dense_first means whether the pattern is start with a dense layer
  145. // note that if n_pattern == 0, all layers are SWA
  146. // if n_pattern == 1, all layers are dense
  147. // example 1: n_pattern = 3, dense_first = false
  148. // il == 0: swa
  149. // il == 1: swa
  150. // il == 2: dense
  151. // il == 3: swa
  152. // il == 4: swa
  153. // il == 5: dense
  154. // il == 6: swa
  155. // etc ...
  156. // example 2: n_pattern = 2, dense_first = true
  157. // il == 0: dense
  158. // il == 1: swa
  159. // il == 2: dense
  160. // il == 3: swa
  161. // etc ...
  162. void set_swa_pattern(uint32_t n_pattern, bool dense_first = false);
  163. // return true if one of the layers is SWA
  164. bool is_swa_any() const;
  165. uint32_t n_head(uint32_t il = 0) const;
  166. uint32_t n_head_kv(uint32_t il = 0) const;
  167. uint32_t n_ff(uint32_t il = 0) const;
  168. uint32_t n_gqa(uint32_t il = 0) const;
  169. // dimension of key embeddings across all k-v heads
  170. uint32_t n_embd_k_gqa(uint32_t il = 0) const;
  171. // dimension of value embeddings across all k-v heads
  172. uint32_t n_embd_v_gqa(uint32_t il = 0) const;
  173. // true if any layer has a different n_embd_k_gqa/n_embd_v_gqa
  174. bool is_n_embd_k_gqa_variable() const;
  175. bool is_n_embd_v_gqa_variable() const;
  176. // return the maximum n_embd_k_gqa/n_embd_v_gqa across all layers
  177. uint32_t n_embd_k_gqa_max() const;
  178. uint32_t n_embd_v_gqa_max() const;
  179. // dimension of the rolling state embeddings
  180. // corresponds to Mamba's conv_states size or RWKV's token_shift states size
  181. uint32_t n_embd_r() const;
  182. // dimension of the recurrent state embeddings
  183. uint32_t n_embd_s() const;
  184. // whether or not the given layer is recurrent (for hybrid models)
  185. bool is_recurrent(uint32_t il) const;
  186. uint32_t n_pos_per_embd() const;
  187. bool is_swa(uint32_t il) const;
  188. bool has_kv(uint32_t il) const;
  189. // number of layers for which has_kv() returns true
  190. uint32_t n_layer_kv() const;
  191. // note that this function uses different SWA parameters from those in the hparams
  192. // TODO: think of a better place for this function
  193. // TODO: pack the SWA params in a struct?
  194. static bool is_masked_swa(uint32_t n_swa, llama_swa_type swa_type, llama_pos p0, llama_pos p1);
  195. };
  196. static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");