瀏覽代碼

llama : add support for Qwen3 MoE tied word embeddings (#13768)

Piotr Jasiukajtis 8 月之前
父節點
當前提交
4032ca4066
共有 1 個文件被更改,包括 5 次插入1 次删除
  1. 5 1
      src/llama-model.cpp

+ 5 - 1
src/llama-model.cpp

@@ -2489,7 +2489,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
 
 
                     // output
                     // output
                     output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
                     output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
 
 
                     for (int i = 0; i < n_layer; ++i) {
                     for (int i = 0; i < n_layer; ++i) {
                         auto & layer = layers[i];
                         auto & layer = layers[i];