Просмотр исходного кода

llama : assume tied weights if lm_head/output weights is missing (#5824)

This is to support model configurations with "tie_word_embeddings" set to true.

Co-authored-by: Don Mahurin <2797413+dmahurin@users.noreply.github.com>
Don Mahurin 1 год назад
Родитель
Сommit
e457fb3540
1 измененных файлов с 7 добавлено и 1 удалено
  1. 7 1
      llama.cpp

+ 7 - 1
llama.cpp

@@ -3888,7 +3888,13 @@ static bool llm_load_tensors(
                     {
                         model.output_norm = ml.create_tensor(ctx_output,       tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
                         if (model.arch != LLM_ARCH_MINICPM){
-                            model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab});
+                            model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
+                            // if output is NULL, init from the input tok embed
+                            if (model.output == NULL) {
+                                model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
+                                ml.n_created--; // artificial tensor
+                                ml.size_data += ggml_nbytes(model.output);
+                            }
                         }
                     }