|
|
@@ -1542,6 +1542,9 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|
|
hparams.dec_start_token_id = dec_start_token_id;
|
|
|
}
|
|
|
|
|
|
+ hparams.dec_n_layer = hparams.n_layer;
|
|
|
+ ml.get_key(LLM_KV_DECODER_BLOCK_COUNT, hparams.dec_n_layer, false);
|
|
|
+
|
|
|
switch (hparams.n_layer) {
|
|
|
case 6: type = LLM_TYPE_60M; break; // t5-small
|
|
|
case 8: type = LLM_TYPE_80M; break; // flan-t5-small
|
|
|
@@ -4414,6 +4417,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
|
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
|
|
|
}
|
|
|
|
|
|
+ // n_layer: number of encoder_layers
|
|
|
+ // dec_n_layer: number of decoder_layers
|
|
|
+ const int dec_n_layer = hparams.dec_n_layer;
|
|
|
+ if (dec_n_layer > n_layer) {
|
|
|
+ layers.resize(dec_n_layer);
|
|
|
+ }
|
|
|
+
|
|
|
+ // load encoder layers
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
|
auto & layer = layers[i];
|
|
|
|
|
|
@@ -4429,6 +4440,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
|
layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
|
|
|
layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
|
|
|
layer.ffn_up_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ // load decoder layers
|
|
|
+ for (int i = 0; i < dec_n_layer; ++i) {
|
|
|
+ auto & layer = layers[i];
|
|
|
|
|
|
layer.attn_norm = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM, "weight", i), {n_embd}, 0);
|
|
|
layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
|
|
|
@@ -13509,7 +13525,9 @@ struct llm_build_t5_dec : public llm_graph_context {
|
|
|
|
|
|
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
|
|
|
|
- for (int il = 0; il < n_layer; ++il) {
|
|
|
+ const int64_t dec_n_layer = hparams.dec_n_layer;
|
|
|
+
|
|
|
+ for (int il = 0; il < dec_n_layer; ++il) {
|
|
|
ggml_tensor * inpSA = inpL;
|
|
|
|
|
|
// norm
|
|
|
@@ -13600,7 +13618,7 @@ struct llm_build_t5_dec : public llm_graph_context {
|
|
|
//cb(cur, "kqv_out", il);
|
|
|
}
|
|
|
|
|
|
- if (il == n_layer - 1 && inp_out_ids) {
|
|
|
+ if (il == dec_n_layer - 1 && inp_out_ids) {
|
|
|
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
|
inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
|
|
|
}
|
|
|
@@ -13621,8 +13639,8 @@ struct llm_build_t5_dec : public llm_graph_context {
|
|
|
model.layers[il].ffn_gate, NULL, NULL,
|
|
|
model.layers[il].ffn_down, NULL, NULL,
|
|
|
NULL,
|
|
|
- model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
|
|
|
- model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
|
|
|
+ model.layers[il].ffn_gate ? LLM_FFN_GELU : LLM_FFN_RELU,
|
|
|
+ model.layers[il].ffn_gate ? LLM_FFN_PAR : LLM_FFN_SEQ,
|
|
|
il);
|
|
|
cb(cur, "ffn_out", il);
|
|
|
}
|