| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157 |
- #include "llm_build_olmo2.h"
- #include <cmath>
- template <bool iswa>
- llm_build_olmo2<iswa>::llm_build_olmo2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- ggml_tensor * cur;
- ggml_tensor * inpL;
- inpL = build_inp_embd(model.tok_embd);
- // inp_pos - contains the positions
- ggml_tensor * inp_pos = build_inp_pos();
- using inp_attn_type = std::conditional_t<iswa, llm_graph_input_attn_kv_iswa, llm_graph_input_attn_kv>;
- inp_attn_type * inp_attn = nullptr;
- if constexpr (iswa) {
- inp_attn = build_attn_inp_kv_iswa();
- } else {
- inp_attn = build_attn_inp_kv();
- }
- ;
- ggml_tensor * inp_out_ids = build_inp_out_ids();
- for (int il = 0; il < n_layer; ++il) {
- ggml_tensor * inpSA = inpL;
- cur = inpL;
- // self_attention
- {
- // compute Q and K and RoPE them
- ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL,
- LLM_NORM_RMS, il);
- cb(Qcur, "Qcur_normed", il);
- Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL,
- LLM_NORM_RMS, il);
- cb(Kcur, "Kcur_normed", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
- const bool is_swa = hparams.is_swa(il);
- if (is_swa) {
- // For sliding window layers, Olmo3 use regular rope with no yarn rope scaling.
- // This is achieved here by setting freq_scale and attn_factor to 1.
- // We also set ext_factor to 0 to avoid a few unnecessary computations.
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, 1.0,
- 0.0, 1.0, beta_fast, beta_slow
- );
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, 1.0,
- 0.0, 1.0, beta_fast, beta_slow
- );
- } else {
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- }
- ;
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- cur = build_attn(inp_attn,
- model.layers[il].wo, NULL,
- Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
- }
- ;
- if (il == n_layer - 1 && inp_out_ids) {
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- ;
- cur = build_norm(cur,
- model.layers[il].attn_post_norm, NULL,
- LLM_NORM_RMS, il);
- cb(cur, "attn_post_norm", il);
- ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = build_ffn(ffn_inp,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, il);
- cb(cur, "ffn_out", il);
- cur = build_norm(cur,
- model.layers[il].ffn_post_norm, NULL,
- LLM_NORM_RMS, -1);
- cb(cur, "ffn_post_norm", -1);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = build_cvec(cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- ;
- cur = inpL;
- cur = build_norm(cur,
- model.output_norm, NULL,
- LLM_NORM_RMS, -1);
- cb(cur, "result_norm", -1);
- res->t_embd = cur;
- // lm_head
- cur = build_lora_mm(model.output, cur);
- cb(cur, "result_output", -1);
- res->t_logits = cur;
- ggml_build_forward_expand(gf, cur);
- }
- // Explicit template instantiations
- template struct llm_build_olmo2<false>;
- template struct llm_build_olmo2<true>;
|