|
|
@@ -30,6 +30,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
|
|
|
// {n_embd, n_tokens}
|
|
|
inpL = build_inp_embd(model.tok_embd);
|
|
|
|
|
|
+ // (optional) temperature tuning - used by mistral-large
|
|
|
+ ggml_tensor * inp_attn_scale = nullptr;
|
|
|
+ if (hparams.f_attn_temp_scale != 0.0f) {
|
|
|
+ inp_attn_scale = build_inp_attn_scale();
|
|
|
+ }
|
|
|
+
|
|
|
// inp_pos - contains the positions
|
|
|
ggml_tensor * inp_pos = build_inp_pos();
|
|
|
|
|
|
@@ -128,6 +134,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
|
|
|
ggml_tensor * Vcur = kv_cmpr;
|
|
|
cb(Vcur, "Vcur", il);
|
|
|
|
|
|
+ if (inp_attn_scale) {
|
|
|
+ // apply llama 4 temperature scaling
|
|
|
+ Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
|
|
|
+ cb(Qcur, "Qcur_attn_temp_scaled", il);
|
|
|
+ }
|
|
|
+
|
|
|
// note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group)
|
|
|
cur = build_attn(inp_attn,
|
|
|
model.layers[il].wo, NULL,
|
|
|
@@ -160,6 +172,12 @@ llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_gr
|
|
|
ggml_tensor * Kcur = ggml_concat(ctx0, ggml_repeat(ctx0, k_pe, q_pe), k_nope, 0);
|
|
|
cb(Kcur, "Kcur", il);
|
|
|
|
|
|
+ if (inp_attn_scale) {
|
|
|
+ // apply llama 4 temperature scaling
|
|
|
+ Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
|
|
|
+ cb(Qcur, "Qcur_attn_temp_scaled", il);
|
|
|
+ }
|
|
|
+
|
|
|
// note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
|
|
|
cur = build_attn(inp_attn,
|
|
|
model.layers[il].wo, NULL,
|