|
@@ -205,6 +205,7 @@ enum llm_arch {
|
|
|
LLM_ARCH_REFACT,
|
|
LLM_ARCH_REFACT,
|
|
|
LLM_ARCH_BERT,
|
|
LLM_ARCH_BERT,
|
|
|
LLM_ARCH_NOMIC_BERT,
|
|
LLM_ARCH_NOMIC_BERT,
|
|
|
|
|
+ LLM_ARCH_JINA_BERT_V2,
|
|
|
LLM_ARCH_BLOOM,
|
|
LLM_ARCH_BLOOM,
|
|
|
LLM_ARCH_STABLELM,
|
|
LLM_ARCH_STABLELM,
|
|
|
LLM_ARCH_QWEN,
|
|
LLM_ARCH_QWEN,
|
|
@@ -228,39 +229,40 @@ enum llm_arch {
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|
|
- { LLM_ARCH_LLAMA, "llama" },
|
|
|
|
|
- { LLM_ARCH_FALCON, "falcon" },
|
|
|
|
|
- { LLM_ARCH_GROK, "grok" },
|
|
|
|
|
- { LLM_ARCH_GPT2, "gpt2" },
|
|
|
|
|
- { LLM_ARCH_GPTJ, "gptj" },
|
|
|
|
|
- { LLM_ARCH_GPTNEOX, "gptneox" },
|
|
|
|
|
- { LLM_ARCH_MPT, "mpt" },
|
|
|
|
|
- { LLM_ARCH_BAICHUAN, "baichuan" },
|
|
|
|
|
- { LLM_ARCH_STARCODER, "starcoder" },
|
|
|
|
|
- { LLM_ARCH_PERSIMMON, "persimmon" },
|
|
|
|
|
- { LLM_ARCH_REFACT, "refact" },
|
|
|
|
|
- { LLM_ARCH_BERT, "bert" },
|
|
|
|
|
- { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
|
|
|
|
|
- { LLM_ARCH_BLOOM, "bloom" },
|
|
|
|
|
- { LLM_ARCH_STABLELM, "stablelm" },
|
|
|
|
|
- { LLM_ARCH_QWEN, "qwen" },
|
|
|
|
|
- { LLM_ARCH_QWEN2, "qwen2" },
|
|
|
|
|
- { LLM_ARCH_QWEN2MOE, "qwen2moe" },
|
|
|
|
|
- { LLM_ARCH_PHI2, "phi2" },
|
|
|
|
|
- { LLM_ARCH_PHI3, "phi3" },
|
|
|
|
|
- { LLM_ARCH_PLAMO, "plamo" },
|
|
|
|
|
- { LLM_ARCH_CODESHELL, "codeshell" },
|
|
|
|
|
- { LLM_ARCH_ORION, "orion" },
|
|
|
|
|
- { LLM_ARCH_INTERNLM2, "internlm2" },
|
|
|
|
|
- { LLM_ARCH_MINICPM, "minicpm" },
|
|
|
|
|
- { LLM_ARCH_GEMMA, "gemma" },
|
|
|
|
|
- { LLM_ARCH_STARCODER2, "starcoder2" },
|
|
|
|
|
- { LLM_ARCH_MAMBA, "mamba" },
|
|
|
|
|
- { LLM_ARCH_XVERSE, "xverse" },
|
|
|
|
|
- { LLM_ARCH_COMMAND_R, "command-r" },
|
|
|
|
|
- { LLM_ARCH_DBRX, "dbrx" },
|
|
|
|
|
- { LLM_ARCH_OLMO, "olmo" },
|
|
|
|
|
- { LLM_ARCH_UNKNOWN, "(unknown)" },
|
|
|
|
|
|
|
+ { LLM_ARCH_LLAMA, "llama" },
|
|
|
|
|
+ { LLM_ARCH_FALCON, "falcon" },
|
|
|
|
|
+ { LLM_ARCH_GROK, "grok" },
|
|
|
|
|
+ { LLM_ARCH_GPT2, "gpt2" },
|
|
|
|
|
+ { LLM_ARCH_GPTJ, "gptj" },
|
|
|
|
|
+ { LLM_ARCH_GPTNEOX, "gptneox" },
|
|
|
|
|
+ { LLM_ARCH_MPT, "mpt" },
|
|
|
|
|
+ { LLM_ARCH_BAICHUAN, "baichuan" },
|
|
|
|
|
+ { LLM_ARCH_STARCODER, "starcoder" },
|
|
|
|
|
+ { LLM_ARCH_PERSIMMON, "persimmon" },
|
|
|
|
|
+ { LLM_ARCH_REFACT, "refact" },
|
|
|
|
|
+ { LLM_ARCH_BERT, "bert" },
|
|
|
|
|
+ { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
|
|
|
|
|
+ { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" },
|
|
|
|
|
+ { LLM_ARCH_BLOOM, "bloom" },
|
|
|
|
|
+ { LLM_ARCH_STABLELM, "stablelm" },
|
|
|
|
|
+ { LLM_ARCH_QWEN, "qwen" },
|
|
|
|
|
+ { LLM_ARCH_QWEN2, "qwen2" },
|
|
|
|
|
+ { LLM_ARCH_QWEN2MOE, "qwen2moe" },
|
|
|
|
|
+ { LLM_ARCH_PHI2, "phi2" },
|
|
|
|
|
+ { LLM_ARCH_PHI3, "phi3" },
|
|
|
|
|
+ { LLM_ARCH_PLAMO, "plamo" },
|
|
|
|
|
+ { LLM_ARCH_CODESHELL, "codeshell" },
|
|
|
|
|
+ { LLM_ARCH_ORION, "orion" },
|
|
|
|
|
+ { LLM_ARCH_INTERNLM2, "internlm2" },
|
|
|
|
|
+ { LLM_ARCH_MINICPM, "minicpm" },
|
|
|
|
|
+ { LLM_ARCH_GEMMA, "gemma" },
|
|
|
|
|
+ { LLM_ARCH_STARCODER2, "starcoder2" },
|
|
|
|
|
+ { LLM_ARCH_MAMBA, "mamba" },
|
|
|
|
|
+ { LLM_ARCH_XVERSE, "xverse" },
|
|
|
|
|
+ { LLM_ARCH_COMMAND_R, "command-r" },
|
|
|
|
|
+ { LLM_ARCH_DBRX, "dbrx" },
|
|
|
|
|
+ { LLM_ARCH_OLMO, "olmo" },
|
|
|
|
|
+ { LLM_ARCH_UNKNOWN, "(unknown)" },
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
enum llm_kv {
|
|
enum llm_kv {
|
|
@@ -691,6 +693,25 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
|
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
|
},
|
|
},
|
|
|
},
|
|
},
|
|
|
|
|
+ {
|
|
|
|
|
+ LLM_ARCH_JINA_BERT_V2,
|
|
|
|
|
+ {
|
|
|
|
|
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
|
|
|
|
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
|
|
|
|
+ { LLM_TENSOR_TOKEN_TYPES, "token_types" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
|
|
|
|
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
|
|
|
|
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
|
|
|
|
|
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
|
|
|
|
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
|
|
|
|
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
{
|
|
{
|
|
|
LLM_ARCH_BLOOM,
|
|
LLM_ARCH_BLOOM,
|
|
|
{
|
|
{
|
|
@@ -3778,6 +3799,12 @@ static void llm_load_hparams(
|
|
|
|
|
|
|
|
// get hparams kv
|
|
// get hparams kv
|
|
|
ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
|
|
ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
|
|
|
|
|
+
|
|
|
|
|
+ // everything past this point is not vocab-related
|
|
|
|
|
+ if (hparams.vocab_only) {
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
|
|
ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
|
|
|
ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
|
|
ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
|
|
|
ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
|
|
ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
|
|
@@ -3961,6 +3988,19 @@ static void llm_load_hparams(
|
|
|
model.type = e_model::MODEL_335M; break; // bge-large
|
|
model.type = e_model::MODEL_335M; break; // bge-large
|
|
|
}
|
|
}
|
|
|
} break;
|
|
} break;
|
|
|
|
|
+ case LLM_ARCH_JINA_BERT_V2:
|
|
|
|
|
+ {
|
|
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
|
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
|
|
|
|
|
+ ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
|
|
|
|
|
+ ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
|
|
|
|
|
+ hparams.f_max_alibi_bias = 8.0f;
|
|
|
|
|
+
|
|
|
|
|
+ switch (hparams.n_layer) {
|
|
|
|
|
+ case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small
|
|
|
|
|
+ case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
|
|
|
|
|
+ }
|
|
|
|
|
+ } break;
|
|
|
case LLM_ARCH_NOMIC_BERT:
|
|
case LLM_ARCH_NOMIC_BERT:
|
|
|
{
|
|
{
|
|
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
|
@@ -4382,7 +4422,9 @@ static void llm_load_vocab(
|
|
|
tokenizer_pre == "starcoder") {
|
|
tokenizer_pre == "starcoder") {
|
|
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
|
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
|
|
|
} else if (
|
|
} else if (
|
|
|
- tokenizer_pre == "gpt-2") {
|
|
|
|
|
|
|
+ tokenizer_pre == "gpt-2" ||
|
|
|
|
|
+ tokenizer_pre == "jina-es" ||
|
|
|
|
|
+ tokenizer_pre == "jina-de") {
|
|
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
|
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
|
|
|
} else if (
|
|
} else if (
|
|
|
tokenizer_pre == "refact") {
|
|
tokenizer_pre == "refact") {
|
|
@@ -5241,6 +5283,50 @@ static bool llm_load_tensors(
|
|
|
layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
|
layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
|
|
}
|
|
}
|
|
|
} break;
|
|
} break;
|
|
|
|
|
+ case LLM_ARCH_JINA_BERT_V2:
|
|
|
|
|
+ {
|
|
|
|
|
+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // word_embeddings
|
|
|
|
|
+ model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}); //token_type_embeddings
|
|
|
|
|
+ model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); // LayerNorm
|
|
|
|
|
+ model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); //LayerNorm bias
|
|
|
|
|
+
|
|
|
|
|
+ for (int i = 0; i < n_layer; ++i) {
|
|
|
|
|
+ ggml_context * ctx_layer = ctx_for_layer(i);
|
|
|
|
|
+ ggml_context * ctx_split = ctx_for_layer_split(i);
|
|
|
|
|
+
|
|
|
|
|
+ auto & layer = model.layers[i]; // JinaBertLayer
|
|
|
|
|
+
|
|
|
|
|
+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
|
|
|
|
|
+ layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
|
|
|
|
|
+
|
|
|
|
|
+ layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, false);
|
|
|
|
|
+ layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, false);
|
|
|
|
|
+
|
|
|
|
|
+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
|
|
|
|
|
+ layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
|
|
|
|
|
+
|
|
|
|
|
+ layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, false);
|
|
|
|
|
+ layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, false);
|
|
|
|
|
+
|
|
|
|
|
+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
|
|
|
|
|
+ layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
|
|
|
|
|
+
|
|
|
|
|
+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); //output_dens
|
|
|
|
|
+ layer.bo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); //output_dens
|
|
|
|
|
+
|
|
|
|
|
+ layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm
|
|
|
|
|
+ layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
|
|
|
|
|
+
|
|
|
|
|
+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
|
|
|
|
+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
|
|
|
|
+
|
|
|
|
|
+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
|
|
|
|
+ layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
|
|
|
|
|
+
|
|
|
|
|
+ layer.layer_out_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
|
|
|
|
|
+ layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
|
|
|
|
+ }
|
|
|
|
|
+ } break;
|
|
|
case LLM_ARCH_BLOOM:
|
|
case LLM_ARCH_BLOOM:
|
|
|
{
|
|
{
|
|
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
|
@@ -6317,7 +6403,7 @@ static struct ggml_tensor * llm_build_ffn(
|
|
|
llm_ffn_gate_type type_gate,
|
|
llm_ffn_gate_type type_gate,
|
|
|
const llm_build_cb & cb,
|
|
const llm_build_cb & cb,
|
|
|
int il) {
|
|
int il) {
|
|
|
- struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
|
|
|
|
|
|
|
+ struct ggml_tensor * tmp = up ? ggml_mul_mat(ctx, up, cur) : cur;
|
|
|
cb(tmp, "ffn_up", il);
|
|
cb(tmp, "ffn_up", il);
|
|
|
|
|
|
|
|
if (up_b) {
|
|
if (up_b) {
|
|
@@ -8118,8 +8204,11 @@ struct llm_build_context {
|
|
|
|
|
|
|
|
struct ggml_tensor * cur;
|
|
struct ggml_tensor * cur;
|
|
|
struct ggml_tensor * inpL;
|
|
struct ggml_tensor * inpL;
|
|
|
|
|
+ struct ggml_tensor * inp_pos = nullptr;
|
|
|
|
|
|
|
|
- struct ggml_tensor * inp_pos = build_inp_pos();
|
|
|
|
|
|
|
+ if (model.arch != LLM_ARCH_JINA_BERT_V2) {
|
|
|
|
|
+ inp_pos = build_inp_pos();
|
|
|
|
|
+ }
|
|
|
struct ggml_tensor * inp_mean = build_inp_mean();
|
|
struct ggml_tensor * inp_mean = build_inp_mean();
|
|
|
struct ggml_tensor * inp_cls = build_inp_cls();
|
|
struct ggml_tensor * inp_cls = build_inp_cls();
|
|
|
|
|
|
|
@@ -8150,13 +8239,26 @@ struct llm_build_context {
|
|
|
struct ggml_tensor * Vcur;
|
|
struct ggml_tensor * Vcur;
|
|
|
|
|
|
|
|
// self-attention
|
|
// self-attention
|
|
|
- if (model.arch == LLM_ARCH_BERT) {
|
|
|
|
|
|
|
+ if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
|
|
|
Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
|
|
Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
|
|
|
cb(Qcur, "Qcur", il);
|
|
cb(Qcur, "Qcur", il);
|
|
|
|
|
|
|
|
|
|
+ if (model.layers[il].attn_q_norm) {
|
|
|
|
|
+ Qcur = llm_build_norm(ctx0, Qcur, hparams,
|
|
|
|
|
+ model.layers[il].attn_q_norm,
|
|
|
|
|
+ model.layers[il].attn_q_norm_b,
|
|
|
|
|
+ LLM_NORM, cb, il);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
|
|
Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
|
|
|
cb(Kcur, "Kcur", il);
|
|
cb(Kcur, "Kcur", il);
|
|
|
|
|
|
|
|
|
|
+ if (model.layers[il].attn_k_norm) {
|
|
|
|
|
+ Kcur = llm_build_norm(ctx0, Kcur, hparams,
|
|
|
|
|
+ model.layers[il].attn_k_norm,
|
|
|
|
|
+ model.layers[il].attn_k_norm_b,
|
|
|
|
|
+ LLM_NORM, cb, il);
|
|
|
|
|
+ }
|
|
|
Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
|
|
Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
|
|
|
cb(Vcur, "Vcur", il);
|
|
cb(Vcur, "Vcur", il);
|
|
|
|
|
|
|
@@ -8247,6 +8349,13 @@ struct llm_build_context {
|
|
|
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
|
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
|
|
NULL,
|
|
NULL,
|
|
|
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
|
|
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
|
|
|
|
|
+ } else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
|
|
|
|
|
+ cur = llm_build_ffn(ctx0, cur,
|
|
|
|
|
+ model.layers[il].ffn_up, NULL,
|
|
|
|
|
+ model.layers[il].ffn_gate, NULL,
|
|
|
|
|
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
|
|
|
|
+ NULL,
|
|
|
|
|
+ LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
|
|
|
} else {
|
|
} else {
|
|
|
cur = llm_build_ffn(ctx0, cur,
|
|
cur = llm_build_ffn(ctx0, cur,
|
|
|
model.layers[il].ffn_up, NULL,
|
|
model.layers[il].ffn_up, NULL,
|
|
@@ -10769,6 +10878,7 @@ static struct ggml_cgraph * llama_build_graph(
|
|
|
result = llm.build_refact();
|
|
result = llm.build_refact();
|
|
|
} break;
|
|
} break;
|
|
|
case LLM_ARCH_BERT:
|
|
case LLM_ARCH_BERT:
|
|
|
|
|
+ case LLM_ARCH_JINA_BERT_V2:
|
|
|
case LLM_ARCH_NOMIC_BERT:
|
|
case LLM_ARCH_NOMIC_BERT:
|
|
|
{
|
|
{
|
|
|
result = llm.build_bert();
|
|
result = llm.build_bert();
|
|
@@ -12695,7 +12805,10 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- GGML_ASSERT(vocab.special_add_eos != 1);
|
|
|
|
|
|
|
+ if (add_special && vocab.special_add_eos == 1) {
|
|
|
|
|
+ GGML_ASSERT(vocab.special_add_eos != -1);
|
|
|
|
|
+ output.push_back(vocab.special_eos_id);
|
|
|
|
|
+ }
|
|
|
} break;
|
|
} break;
|
|
|
case LLAMA_VOCAB_TYPE_WPM:
|
|
case LLAMA_VOCAB_TYPE_WPM:
|
|
|
{
|
|
{
|
|
@@ -15746,6 +15859,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|
|
case LLM_ARCH_REFACT:
|
|
case LLM_ARCH_REFACT:
|
|
|
case LLM_ARCH_BLOOM:
|
|
case LLM_ARCH_BLOOM:
|
|
|
case LLM_ARCH_MAMBA:
|
|
case LLM_ARCH_MAMBA:
|
|
|
|
|
+ case LLM_ARCH_JINA_BERT_V2:
|
|
|
return LLAMA_ROPE_TYPE_NONE;
|
|
return LLAMA_ROPE_TYPE_NONE;
|
|
|
|
|
|
|
|
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
|
// use what we call a normal RoPE, operating on pairs of consecutive head values
|