|
|
@@ -210,6 +210,7 @@ enum llm_arch {
|
|
|
LLM_ARCH_T5,
|
|
|
LLM_ARCH_T5ENCODER,
|
|
|
LLM_ARCH_JAIS,
|
|
|
+ LLM_ARCH_NEMOTRON,
|
|
|
LLM_ARCH_UNKNOWN,
|
|
|
};
|
|
|
|
|
|
@@ -255,6 +256,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|
|
{ LLM_ARCH_T5, "t5" },
|
|
|
{ LLM_ARCH_T5ENCODER, "t5encoder" },
|
|
|
{ LLM_ARCH_JAIS, "jais" },
|
|
|
+ { LLM_ARCH_NEMOTRON, "nemotron" },
|
|
|
{ LLM_ARCH_UNKNOWN, "(unknown)" },
|
|
|
};
|
|
|
|
|
|
@@ -1296,6 +1298,24 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
|
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
|
|
},
|
|
|
},
|
|
|
+ {
|
|
|
+ LLM_ARCH_NEMOTRON,
|
|
|
+ {
|
|
|
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
|
|
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
|
|
+ { LLM_TENSOR_OUTPUT, "output" },
|
|
|
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
|
|
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
|
|
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
|
|
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
|
|
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
|
|
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
|
|
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
|
|
|
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
|
|
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
|
|
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
|
+ },
|
|
|
+ },
|
|
|
{
|
|
|
LLM_ARCH_UNKNOWN,
|
|
|
{
|
|
|
@@ -5235,6 +5255,14 @@ static void llm_load_hparams(
|
|
|
default: model.type = e_model::MODEL_UNKNOWN;
|
|
|
}
|
|
|
} break;
|
|
|
+ case LLM_ARCH_NEMOTRON:
|
|
|
+ {
|
|
|
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
|
|
+ switch (hparams.n_layer) {
|
|
|
+ case 32: model.type = e_model::MODEL_4B; break;
|
|
|
+ default: model.type = e_model::MODEL_UNKNOWN;
|
|
|
+ }
|
|
|
+ } break;
|
|
|
default: (void)0;
|
|
|
}
|
|
|
|
|
|
@@ -7568,6 +7596,48 @@ static bool llm_load_tensors(
|
|
|
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
|
|
}
|
|
|
} break;
|
|
|
+ case LLM_ARCH_NEMOTRON:
|
|
|
+ {
|
|
|
+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
|
|
+
|
|
|
+ // output
|
|
|
+ {
|
|
|
+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
|
|
+ model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
|
|
|
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
|
|
|
+ }
|
|
|
+
|
|
|
+ for (int i = 0; i < n_layer; ++i) {
|
|
|
+ ggml_context * ctx_layer = ctx_for_layer(i);
|
|
|
+ ggml_context * ctx_split = ctx_for_layer_split(i);
|
|
|
+
|
|
|
+ auto & layer = model.layers[i];
|
|
|
+
|
|
|
+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
|
|
+ layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
|
|
|
+
|
|
|
+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
|
|
|
+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
|
|
|
+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
|
|
|
+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
|
|
|
+
|
|
|
+ // optional bias tensors
|
|
|
+ layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+ layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+ layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+ layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+
|
|
|
+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
|
|
+ layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
|
|
|
+
|
|
|
+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
|
|
+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
|
|
+
|
|
|
+ // optional MLP bias
|
|
|
+ layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+ layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
|
|
+ }
|
|
|
+ } break;
|
|
|
default:
|
|
|
throw std::runtime_error("unknown architecture");
|
|
|
}
|
|
|
@@ -8254,7 +8324,7 @@ static struct ggml_tensor * llm_build_kqv(
|
|
|
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
|
|
cb(kq, "kq", il);
|
|
|
|
|
|
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
|
|
|
+ if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON) {
|
|
|
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
|
|
|
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
|
|
|
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
|
|
@@ -13755,6 +13825,128 @@ struct llm_build_context {
|
|
|
|
|
|
return gf;
|
|
|
}
|
|
|
+
|
|
|
+ struct ggml_cgraph * build_nemotron() {
|
|
|
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
|
|
|
+
|
|
|
+ const int64_t n_embd_head = hparams.n_embd_head_v;
|
|
|
+ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
|
|
+ //GGML_ASSERT(n_embd_head == hparams.n_rot);
|
|
|
+
|
|
|
+ struct ggml_tensor * cur;
|
|
|
+ struct ggml_tensor * inpL;
|
|
|
+
|
|
|
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
|
|
+
|
|
|
+ // inp_pos - contains the positions
|
|
|
+ struct ggml_tensor * inp_pos = build_inp_pos();
|
|
|
+
|
|
|
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
|
|
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
|
|
+
|
|
|
+ for (int il = 0; il < n_layer; ++il) {
|
|
|
+ struct ggml_tensor * inpSA = inpL;
|
|
|
+
|
|
|
+ // norm
|
|
|
+ cur = llm_build_norm(ctx0, inpL, hparams,
|
|
|
+ model.layers[il].attn_norm,
|
|
|
+ model.layers[il].attn_norm_b,
|
|
|
+ LLM_NORM, cb, il);
|
|
|
+ cb(cur, "attn_norm", il);
|
|
|
+
|
|
|
+ // self-attention
|
|
|
+ {
|
|
|
+ // compute Q and K and RoPE them
|
|
|
+ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
|
|
|
+ cb(Qcur, "Qcur", il);
|
|
|
+ if (model.layers[il].bq) {
|
|
|
+ Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
|
|
+ cb(Qcur, "Qcur", il);
|
|
|
+ }
|
|
|
+
|
|
|
+ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
|
|
|
+ cb(Kcur, "Kcur", il);
|
|
|
+ if (model.layers[il].bk) {
|
|
|
+ Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
|
|
+ cb(Kcur, "Kcur", il);
|
|
|
+ }
|
|
|
+
|
|
|
+ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
|
|
|
+ cb(Vcur, "Vcur", il);
|
|
|
+ if (model.layers[il].bv) {
|
|
|
+ Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
|
|
+ cb(Vcur, "Vcur", il);
|
|
|
+ }
|
|
|
+
|
|
|
+ Qcur = ggml_rope_ext(
|
|
|
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
|
|
|
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
|
+ ext_factor, attn_factor, beta_fast, beta_slow
|
|
|
+ );
|
|
|
+ cb(Qcur, "Qcur", il);
|
|
|
+
|
|
|
+ Kcur = ggml_rope_ext(
|
|
|
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
|
|
|
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
|
+ ext_factor, attn_factor, beta_fast, beta_slow
|
|
|
+ );
|
|
|
+ cb(Kcur, "Kcur", il);
|
|
|
+
|
|
|
+ cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
|
|
+ model.layers[il].wo, model.layers[il].bo,
|
|
|
+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ if (il == n_layer - 1) {
|
|
|
+ // skip computing output for unused tokens
|
|
|
+ struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
|
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
|
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
|
|
+ }
|
|
|
+
|
|
|
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
|
|
+ cb(ffn_inp, "ffn_inp", il);
|
|
|
+
|
|
|
+ // feed-forward network
|
|
|
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
|
|
|
+ model.layers[il].ffn_norm,
|
|
|
+ model.layers[il].ffn_norm_b,
|
|
|
+ LLM_NORM, cb, il);
|
|
|
+ cb(cur, "ffn_norm", il);
|
|
|
+
|
|
|
+ cur = llm_build_ffn(ctx0, lctx, cur,
|
|
|
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
|
|
+ NULL, NULL, NULL,
|
|
|
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
|
|
+ NULL,
|
|
|
+ LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
|
|
|
+
|
|
|
+ cur = ggml_add(ctx0, cur, ffn_inp);
|
|
|
+ cb(cur, "ffn_out", il);
|
|
|
+
|
|
|
+ cur = lctx.cvec.apply_to(ctx0, cur, il);
|
|
|
+ cb(cur, "l_out", il);
|
|
|
+
|
|
|
+ // input for next layer
|
|
|
+ inpL = cur;
|
|
|
+ }
|
|
|
+
|
|
|
+ cur = inpL;
|
|
|
+
|
|
|
+ cur = llm_build_norm(ctx0, cur, hparams,
|
|
|
+ model.output_norm, model.output_norm_b,
|
|
|
+ LLM_NORM, cb, -1);
|
|
|
+ cb(cur, "result_norm", -1);
|
|
|
+
|
|
|
+ // lm_head
|
|
|
+ cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
|
|
|
+ cb(cur, "result_output", -1);
|
|
|
+
|
|
|
+ ggml_build_forward_expand(gf, cur);
|
|
|
+
|
|
|
+ return gf;
|
|
|
+ }
|
|
|
};
|
|
|
|
|
|
static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
|
|
|
@@ -14010,6 +14202,10 @@ static struct ggml_cgraph * llama_build_graph(
|
|
|
{
|
|
|
result = llm.build_jais();
|
|
|
} break;
|
|
|
+ case LLM_ARCH_NEMOTRON:
|
|
|
+ {
|
|
|
+ result = llm.build_nemotron();
|
|
|
+ } break;
|
|
|
default:
|
|
|
GGML_ABORT("fatal error");
|
|
|
}
|
|
|
@@ -17080,6 +17276,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|
|
case LLM_ARCH_OPENELM:
|
|
|
case LLM_ARCH_GPTNEOX:
|
|
|
case LLM_ARCH_CODESHELL:
|
|
|
+ case LLM_ARCH_NEMOTRON:
|
|
|
return LLAMA_ROPE_TYPE_NEOX;
|
|
|
|
|
|
// all model arches should be listed explicitly here
|