| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149 |
- #include "llama-hparams.h"
- #include "ggml.h"
- void llama_hparams::set_swa_pattern(uint32_t n_pattern) {
- for (uint32_t il = 0; il < n_layer; ++il) {
- swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));
- }
- }
- bool llama_hparams::is_swa_any() const {
- for (uint32_t il = 0; il < n_layer; ++il) {
- if (swa_layers[il]) {
- return true;
- }
- }
- return false;
- }
- uint32_t llama_hparams::n_head(uint32_t il) const {
- if (il < n_layer) {
- return n_head_arr[il];
- }
- GGML_ABORT("fatal error");
- }
- uint32_t llama_hparams::n_head_kv(uint32_t il) const {
- if (il < n_layer) {
- return n_head_kv_arr[il];
- }
- GGML_ABORT("fatal error");
- }
- uint32_t llama_hparams::n_ff(uint32_t il) const {
- if (il < n_layer) {
- return n_ff_arr[il];
- }
- GGML_ABORT("fatal error");
- }
- uint32_t llama_hparams::n_gqa(uint32_t il) const {
- const uint32_t n_head = this->n_head(il);
- const uint32_t n_head_kv = this->n_head_kv(il);
- if (n_head_kv == 0) {
- return 0;
- }
- return n_head/n_head_kv;
- }
- uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
- const uint32_t n_head_kv = this->n_head_kv(il);
- return n_embd_head_k * n_head_kv;
- }
- uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
- const uint32_t n_head_kv = this->n_head_kv(il);
- return n_embd_head_v * n_head_kv;
- }
- bool llama_hparams::is_n_embd_k_gqa_variable() const {
- const uint32_t val = n_embd_k_gqa();
- for (uint32_t il = 0; il < n_layer; ++il) {
- if (val != n_embd_k_gqa(il)) {
- return true;
- }
- }
- return false;
- }
- bool llama_hparams::is_n_embd_v_gqa_variable() const {
- const uint32_t val = n_embd_v_gqa();
- for (uint32_t il = 0; il < n_layer; ++il) {
- if (val != n_embd_v_gqa(il)) {
- return true;
- }
- }
- return false;
- }
- uint32_t llama_hparams::n_embd_k_gqa_max() const {
- uint32_t val = n_embd_k_gqa();
- for (uint32_t il = 0; il < n_layer; ++il) {
- val = std::max(val, n_embd_k_gqa(il));
- }
- return val;
- }
- uint32_t llama_hparams::n_embd_v_gqa_max() const {
- uint32_t val = n_embd_v_gqa();
- for (uint32_t il = 0; il < n_layer; ++il) {
- val = std::max(val, n_embd_v_gqa(il));
- }
- return val;
- }
- uint32_t llama_hparams::n_embd_r() const {
- if (wkv_head_size != 0) {
- // for RWKV models
- return token_shift_count * n_embd;
- }
- if (n_shortconv_l_cache != 0) {
- // for LFM2 models
- return n_embd * (n_shortconv_l_cache - 1);
- }
- // TODO: maybe support other convolution strides than 1
- // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
- // Corresponds to Mamba's conv_states size
- return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state);
- }
- uint32_t llama_hparams::n_embd_s() const {
- if (wkv_head_size != 0) {
- // corresponds to RWKV's wkv_states size
- return n_embd * wkv_head_size;
- }
- // corresponds to Mamba's ssm_states size
- return ssm_d_state * ssm_d_inner;
- }
- bool llama_hparams::is_recurrent(uint32_t il) const {
- return recurrent_layer_arr[il];
- }
- uint32_t llama_hparams::n_pos_per_embd() const {
- return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
- }
- bool llama_hparams::is_swa(uint32_t il) const {
- if (il < n_layer) {
- return swa_layers[il];
- }
- GGML_ABORT("fatal error");
- }
|