Просмотр исходного кода

llama : fix non-quantization of expert gating tensors (#5754)

This reverts a single line from #5475
compilade 1 год назад
Родитель
Сommit
adcb12a9ba
1 измененных файлов с 2 добавлено и 1 удалено
  1. 2 1
      llama.cpp

+ 2 - 1
llama.cpp

@@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
         quantize &= !params->only_copy;
 
         // do not quantize expert gating tensors
-        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
+        // NOTE: can't use LLM_TN here because the layer number is not known
+        quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
 
         // do not quantize positional embeddings and token types (BERT)
         quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight");