|
@@ -2177,6 +2177,15 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|
|
default: type = LLM_TYPE_UNKNOWN;
|
|
default: type = LLM_TYPE_UNKNOWN;
|
|
|
}
|
|
}
|
|
|
} break;
|
|
} break;
|
|
|
|
|
+ case LLM_ARCH_PANGU_EMBED:
|
|
|
|
|
+ {
|
|
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
|
|
|
+ switch (hparams.n_layer) {
|
|
|
|
|
+ case 26: type = LLM_TYPE_1B; break; // openPangu-Embedded-1B-V1.1
|
|
|
|
|
+ case 34: type = LLM_TYPE_7B; break; // openPangu-Embedded-7B-V1.1
|
|
|
|
|
+ default: type = LLM_TYPE_UNKNOWN;
|
|
|
|
|
+ }
|
|
|
|
|
+ } break;
|
|
|
default: throw std::runtime_error("unsupported model architecture");
|
|
default: throw std::runtime_error("unsupported model architecture");
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -6263,6 +6272,50 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
|
layer.visexp_ffn_up = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
|
|
layer.visexp_ffn_up = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
|
|
|
}
|
|
}
|
|
|
} break;
|
|
} break;
|
|
|
|
|
+ case LLM_ARCH_PANGU_EMBED:
|
|
|
|
|
+ {
|
|
|
|
|
+ tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
|
|
|
|
+
|
|
|
|
|
+ // output
|
|
|
|
|
+ output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
|
|
|
|
|
+ output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
|
|
|
|
|
+
|
|
|
|
|
+ // if output is NULL, init from the input tok embed
|
|
|
|
|
+ if (output == NULL) {
|
|
|
|
|
+ output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ for (int i = 0; i < n_layer; ++i) {
|
|
|
|
|
+ auto & layer = layers[i];
|
|
|
|
|
+
|
|
|
|
|
+ layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
|
|
|
|
|
+
|
|
|
|
|
+ // weight tensors
|
|
|
|
|
+ layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
|
|
|
|
|
+ layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
|
|
|
|
|
+ layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
|
|
|
|
|
+ layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
|
|
|
|
|
+
|
|
|
|
|
+ // bias tensors
|
|
|
|
|
+ layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd_head_k * n_head}, 0);
|
|
|
|
|
+ layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
|
|
|
|
|
+ layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
|
|
|
|
|
+ layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
|
|
|
|
|
+
|
|
|
|
|
+ layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
|
|
|
|
|
+
|
|
|
|
|
+ if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
|
|
|
|
|
+ layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
|
|
|
|
+ layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
|
|
|
|
+ } else {
|
|
|
|
|
+ layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
|
|
|
|
|
+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
|
|
|
|
|
+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
|
|
|
|
|
+ }
|
|
|
|
|
+ } break;
|
|
|
default:
|
|
default:
|
|
|
throw std::runtime_error("unknown architecture");
|
|
throw std::runtime_error("unknown architecture");
|
|
|
}
|
|
}
|
|
@@ -7260,6 +7313,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
|
|
|
{
|
|
{
|
|
|
llm = std::make_unique<llm_build_cogvlm>(*this, params);
|
|
llm = std::make_unique<llm_build_cogvlm>(*this, params);
|
|
|
} break;
|
|
} break;
|
|
|
|
|
+ case LLM_ARCH_PANGU_EMBED:
|
|
|
|
|
+ {
|
|
|
|
|
+ llm = std::make_unique<llm_build_pangu_embedded>(*this, params);
|
|
|
|
|
+ }break;
|
|
|
default:
|
|
default:
|
|
|
GGML_ABORT("fatal error");
|
|
GGML_ABORT("fatal error");
|
|
|
}
|
|
}
|
|
@@ -7479,6 +7536,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
|
|
|
case LLM_ARCH_APERTUS:
|
|
case LLM_ARCH_APERTUS:
|
|
|
case LLM_ARCH_MINIMAX_M2:
|
|
case LLM_ARCH_MINIMAX_M2:
|
|
|
case LLM_ARCH_COGVLM:
|
|
case LLM_ARCH_COGVLM:
|
|
|
|
|
+ case LLM_ARCH_PANGU_EMBED:
|
|
|
return LLAMA_ROPE_TYPE_NEOX;
|
|
return LLAMA_ROPE_TYPE_NEOX;
|
|
|
|
|
|
|
|
case LLM_ARCH_QWEN2VL:
|
|
case LLM_ARCH_QWEN2VL:
|