Sfoglia il codice sorgente

model: add llama 4 scaling for mistral-large (deepseek arch) (#17744)

Xuan-Son Nguyen 1 mese fa
parent
commit
4d3726278b
2 ha cambiato i file con 22 aggiunte e 0 eliminazioni
  1. 4 0
      src/llama-model.cpp
  2. 18 0
      src/models/deepseek2.cpp

+ 4 - 0
src/llama-model.cpp

@@ -1628,6 +1628,10 @@ void llama_model::load_hparams(llama_model_loader & ml) {
                 }
                 }
                 ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false);
                 ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false);
 
 
+                // (optional) temperature tuning - used by mistral-large
+                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE,  hparams.f_attn_temp_scale,       false);
+                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false);
+
                 switch (hparams.n_layer) {
                 switch (hparams.n_layer) {
                     case 27: type = LLM_TYPE_16B; break;
                     case 27: type = LLM_TYPE_16B; break;
                     case 60: type = LLM_TYPE_236B; break;
                     case 60: type = LLM_TYPE_236B; break;

+ 18 - 0
src/models/deepseek2.cpp

@@ -30,6 +30,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
     // {n_embd, n_tokens}
     // {n_embd, n_tokens}
     inpL = build_inp_embd(model.tok_embd);
     inpL = build_inp_embd(model.tok_embd);
 
 
+    // (optional) temperature tuning - used by mistral-large
+    ggml_tensor * inp_attn_scale = nullptr;
+    if (hparams.f_attn_temp_scale != 0.0f) {
+        inp_attn_scale = build_inp_attn_scale();
+    }
+
     // inp_pos - contains the positions
     // inp_pos - contains the positions
     ggml_tensor * inp_pos = build_inp_pos();
     ggml_tensor * inp_pos = build_inp_pos();
 
 
@@ -128,6 +134,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
                 ggml_tensor * Vcur = kv_cmpr;
                 ggml_tensor * Vcur = kv_cmpr;
                 cb(Vcur, "Vcur", il);
                 cb(Vcur, "Vcur", il);
 
 
+                if (inp_attn_scale) {
+                    // apply llama 4 temperature scaling
+                    Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
+                    cb(Qcur, "Qcur_attn_temp_scaled", il);
+                }
+
                 // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group)
                 // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group)
                 cur = build_attn(inp_attn,
                 cur = build_attn(inp_attn,
                         model.layers[il].wo, NULL,
                         model.layers[il].wo, NULL,
@@ -160,6 +172,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
                 ggml_tensor * Kcur = ggml_concat(ctx0, ggml_repeat(ctx0, k_pe, q_pe), k_nope, 0);
                 ggml_tensor * Kcur = ggml_concat(ctx0, ggml_repeat(ctx0, k_pe, q_pe), k_nope, 0);
                 cb(Kcur, "Kcur", il);
                 cb(Kcur, "Kcur", il);
 
 
+                if (inp_attn_scale) {
+                    // apply llama 4 temperature scaling
+                    Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
+                    cb(Qcur, "Qcur_attn_temp_scaled", il);
+                }
+
                 // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
                 // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
                 cur = build_attn(inp_attn,
                 cur = build_attn(inp_attn,
                             model.layers[il].wo, NULL,
                             model.layers[il].wo, NULL,