Pārlūkot izejas kodu

llama : use Q4_K for attn_v for Q2_K_S when n_gqa >= 4 (#4996)

Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Kawrakow 2 gadi atpakaļ
vecāks
revīzija
2b3a665d39
1 mainītis faili ar 6 papildinājumiem un 1 dzēšanām
  1. 6 1
      llama.cpp

+ 6 - 1
llama.cpp

@@ -8477,7 +8477,12 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
         }
         else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K;
     } else if (name.find("attn_v.weight") != std::string::npos) {
-        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
+            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q4_K;
+        }
         else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
             new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
         }