|
@@ -3264,7 +3264,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
|
|
|
|
|
|
// output
|
|
// output
|
|
|
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
|
|
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
|
|
|
- output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
|
|
|
|
|
|
|
+ output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
|
|
|
|
|
+
|
|
|
|
|
+ // if output is NULL, init from the input tok embed
|
|
|
|
|
+ if (output == NULL) {
|
|
|
|
|
+ output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
|
auto & layer = layers[i];
|
|
auto & layer = layers[i];
|