|
|
@@ -685,7 +685,30 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|
|
} break;
|
|
|
case LLM_ARCH_GROK:
|
|
|
{
|
|
|
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
|
+ // defaults for old GGUFs
|
|
|
+ hparams.yarn_beta_fast = 8.0f;
|
|
|
+ hparams.f_logit_scale = 0.5773502691896257f;
|
|
|
+ hparams.f_embedding_scale = 78.38367176906169f;
|
|
|
+ hparams.f_attn_out_scale = 0.08838834764831845f;
|
|
|
+ hparams.f_attn_logit_softcapping = 30.0f;
|
|
|
+ hparams.f_router_logit_softcapping = 30.0f;
|
|
|
+ // no final_logit_softcapping in grok-1
|
|
|
+ hparams.f_final_logit_softcapping = 0.0f;
|
|
|
+
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
|
+ ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
|
|
|
+ ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale, false);
|
|
|
+ ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale, false);
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_OUTPUT_SCALE, hparams.f_attn_out_scale, false);
|
|
|
+ ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
|
|
|
+ ml.get_key(LLM_KV_ROUTER_LOGIT_SOFTCAPPING, hparams.f_router_logit_softcapping, false);
|
|
|
+ ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
|
|
|
+
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.attn_temp_length, false);
|
|
|
+ ml.get_key(LLM_KV_ROPE_SCALING_YARN_EXT_FACTOR, hparams.yarn_ext_factor, false);
|
|
|
+ ml.get_key(LLM_KV_ROPE_SCALING_YARN_ATTN_FACTOR, hparams.yarn_attn_factor, false);
|
|
|
+ ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_FAST, hparams.yarn_beta_fast, false);
|
|
|
+ ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_SLOW, hparams.yarn_beta_slow, false);
|
|
|
|
|
|
switch (hparams.n_layer) {
|
|
|
case 64: type = LLM_TYPE_314B; break;
|
|
|
@@ -2540,6 +2563,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
|
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
|
|
|
}
|
|
|
|
|
|
+ const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff/* / n_expert_used*/; // grok-1 n_ff_exp == n_ff
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
|
auto & layer = layers[i];
|
|
|
|
|
|
@@ -2554,12 +2578,19 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
|
|
|
|
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
|
|
|
|
|
|
+ layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
|
|
|
+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, TENSOR_NOT_REQUIRED);
|
|
|
+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
|
|
|
+
|
|
|
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
|
|
|
- layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED);
|
|
|
- layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
|
|
|
- layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
|
|
|
+ layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, TENSOR_NOT_REQUIRED);
|
|
|
+ layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
|
|
|
+ layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
|
|
|
|
|
|
- layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
|
|
|
+ layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
|
|
|
+ if (!layer.ffn_post_norm) {
|
|
|
+ layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
|
|
|
+ }
|
|
|
}
|
|
|
} break;
|
|
|
case LLM_ARCH_DBRX:
|
|
|
@@ -7028,9 +7059,6 @@ struct llm_build_grok : public llm_graph_context {
|
|
|
|
|
|
inpL = build_inp_embd(model.tok_embd);
|
|
|
|
|
|
- // multiply by embedding_multiplier_scale of 78.38367176906169
|
|
|
- inpL = ggml_scale(ctx0, inpL, 78.38367176906169f);
|
|
|
-
|
|
|
// inp_pos - contains the positions
|
|
|
ggml_tensor * inp_pos = build_inp_pos();
|
|
|
|
|
|
@@ -7102,26 +7130,22 @@ struct llm_build_grok : public llm_graph_context {
|
|
|
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
|
|
}
|
|
|
|
|
|
- // Grok
|
|
|
- // if attn_out_norm is present then apply it before adding the input
|
|
|
- if (model.layers[il].attn_out_norm) {
|
|
|
- cur = build_norm(cur,
|
|
|
- model.layers[il].attn_out_norm, NULL,
|
|
|
- LLM_NORM_RMS, il);
|
|
|
- cb(cur, "attn_out_norm", il);
|
|
|
- }
|
|
|
+ cur = build_norm(cur,
|
|
|
+ model.layers[il].attn_out_norm, NULL,
|
|
|
+ LLM_NORM_RMS, il);
|
|
|
+ cb(cur, "attn_out_norm", il);
|
|
|
|
|
|
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
|
|
cb(ffn_inp, "ffn_inp", il);
|
|
|
|
|
|
// feed-forward network
|
|
|
- // MoE branch
|
|
|
cur = build_norm(ffn_inp,
|
|
|
model.layers[il].ffn_norm, NULL,
|
|
|
LLM_NORM_RMS, il);
|
|
|
cb(cur, "ffn_norm", il);
|
|
|
|
|
|
- cur = build_moe_ffn(cur,
|
|
|
+ // MoE branch
|
|
|
+ ggml_tensor * moe_out = build_moe_ffn(cur,
|
|
|
model.layers[il].ffn_gate_inp,
|
|
|
model.layers[il].ffn_up_exps,
|
|
|
model.layers[il].ffn_gate_exps,
|
|
|
@@ -7132,18 +7156,28 @@ struct llm_build_grok : public llm_graph_context {
|
|
|
false, 0.0,
|
|
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
|
|
|
il);
|
|
|
- cb(cur, "ffn_moe_out", il);
|
|
|
+ cb(moe_out, "ffn_moe_out", il);
|
|
|
|
|
|
- // Grok
|
|
|
- // if layer_out_norm is present then apply it before adding the input
|
|
|
- // Idea: maybe ffn_out_norm is a better name
|
|
|
- if (model.layers[il].layer_out_norm) {
|
|
|
- cur = build_norm(cur,
|
|
|
- model.layers[il].layer_out_norm, NULL,
|
|
|
- LLM_NORM_RMS, il);
|
|
|
- cb(cur, "layer_out_norm", il);
|
|
|
+ if (model.layers[il].ffn_up) {
|
|
|
+ ggml_tensor * ffn_out = build_ffn(cur,
|
|
|
+ model.layers[il].ffn_up, NULL, NULL,
|
|
|
+ model.layers[il].ffn_gate, NULL, NULL,
|
|
|
+ model.layers[il].ffn_down, NULL, NULL,
|
|
|
+ NULL,
|
|
|
+ LLM_FFN_GELU, LLM_FFN_PAR, il);
|
|
|
+ cb(ffn_out, "ffn_out", il);
|
|
|
+
|
|
|
+ cur = ggml_scale(ctx0, ggml_add(ctx0, ffn_out, moe_out), std::sqrt(2) / 2);
|
|
|
+ cb(cur, "ffn_out", il);
|
|
|
+ } else {
|
|
|
+ cur = moe_out;
|
|
|
}
|
|
|
|
|
|
+ cur = build_norm(cur,
|
|
|
+ model.layers[il].ffn_post_norm, NULL,
|
|
|
+ LLM_NORM_RMS, il);
|
|
|
+ cb(cur, "ffn_post_norm", il);
|
|
|
+
|
|
|
cur = ggml_add(ctx0, cur, ffn_inp);
|
|
|
cb(cur, "ffn_out", il);
|
|
|
|
|
|
@@ -7166,10 +7200,14 @@ struct llm_build_grok : public llm_graph_context {
|
|
|
// lm_head
|
|
|
cur = build_lora_mm(model.output, cur);
|
|
|
|
|
|
- // Grok
|
|
|
- // multiply logits by output_multiplier_scale of 0.5773502691896257
|
|
|
+ cur = ggml_scale(ctx0, cur, hparams.f_logit_scale);
|
|
|
|
|
|
- cur = ggml_scale(ctx0, cur, 0.5773502691896257f);
|
|
|
+ // final logit soft-capping
|
|
|
+ if (hparams.f_final_logit_softcapping) {
|
|
|
+ cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping);
|
|
|
+ cur = ggml_tanh(ctx0, cur);
|
|
|
+ cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping);
|
|
|
+ }
|
|
|
|
|
|
cb(cur, "result_output", -1);
|
|
|
res->t_logits = cur;
|