|
|
@@ -5124,12 +5124,10 @@ static bool llm_load_tensors(
|
|
|
// output
|
|
|
{
|
|
|
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
|
|
- if (model.arch != LLM_ARCH_MINICPM){
|
|
|
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
- // if output is NULL, init from the input tok embed
|
|
|
- if (model.output == NULL) {
|
|
|
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
|
|
|
- }
|
|
|
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+ // if output is NULL, init from the input tok embed
|
|
|
+ if (model.output == NULL) {
|
|
|
+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -10212,7 +10210,7 @@ struct llm_build_context {
|
|
|
cb(cur, "lmhead_scaling", -1);
|
|
|
|
|
|
// lm_head
|
|
|
- cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
|
|
|
+ cur = ggml_mul_mat(ctx0, model.output, cur);
|
|
|
cb(cur, "result_output", -1);
|
|
|
|
|
|
ggml_build_forward_expand(gf, cur);
|