whisper-enc.cpp 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #include "models.h"
  2. ggml_cgraph * clip_graph_whisper_enc::build() {
  3. const int n_frames = img.nx;
  4. const int n_pos = n_frames / 2;
  5. GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
  6. ggml_tensor * inp = build_inp_raw(1);
  7. // conv1d block
  8. {
  9. // convolution + gelu
  10. ggml_tensor * cur = ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
  11. cur = ggml_add(ctx0, cur, model.conv1d_1_b);
  12. cur = ggml_gelu_erf(ctx0, cur);
  13. cur = ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
  14. cur = ggml_add(ctx0, cur, model.conv1d_2_b);
  15. cur = ggml_gelu_erf(ctx0, cur);
  16. // transpose
  17. inp = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  18. cb(inp, "after_conv1d", -1);
  19. }
  20. // sanity check (only check one layer, but it should be the same for all)
  21. GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
  22. GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
  23. GGML_ASSERT(model.layers[0].q_b);
  24. GGML_ASSERT(model.layers[0].v_b);
  25. GGML_ASSERT(!model.layers[0].k_b); // no bias for k
  26. ggml_tensor * pos_embd_selected = ggml_view_2d(
  27. ctx0, model.position_embeddings,
  28. model.position_embeddings->ne[0], n_pos,
  29. model.position_embeddings->nb[1], 0
  30. );
  31. ggml_tensor * cur = build_vit(
  32. inp, n_pos,
  33. NORM_TYPE_NORMAL,
  34. hparams.ffn_op,
  35. pos_embd_selected,
  36. nullptr);
  37. cb(cur, "after_transformer", -1);
  38. if (model.audio_has_stack_frames()) {
  39. // StackAudioFrames
  40. // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
  41. cur = build_stack(cur, hparams.proj_stack_factor, n_embd);
  42. cb(cur, "after_stacked", -1);
  43. }
  44. if (proj_type == PROJECTOR_TYPE_ULTRAVOX) {
  45. // UltravoxProjector
  46. // pre-norm
  47. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  48. cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
  49. // ffn in
  50. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  51. // swiglu
  52. // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
  53. cur = ggml_swiglu_swapped(ctx0, cur);
  54. // mid-norm
  55. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  56. cur = ggml_mul(ctx0, cur, model.mm_norm_mid_w);
  57. // ffn out
  58. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  59. } else if (proj_type == PROJECTOR_TYPE_QWEN2A) {
  60. // projector
  61. cur = ggml_mul_mat(ctx0, model.mm_fc_w, cur);
  62. cur = ggml_add(ctx0, cur, model.mm_fc_b);
  63. } else if (proj_type == PROJECTOR_TYPE_VOXTRAL) {
  64. // projector
  65. cur = build_ffn(cur,
  66. model.mm_1_w, model.mm_1_b,
  67. nullptr, nullptr,
  68. model.mm_2_w, model.mm_2_b,
  69. FFN_GELU_ERF,
  70. -1);
  71. } else if (proj_type == PROJECTOR_TYPE_GLMA) {
  72. cur = ggml_norm(ctx0, cur, hparams.eps);
  73. cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
  74. cur = ggml_add(ctx0, cur, model.mm_norm_pre_b);
  75. cur = build_stack(cur, hparams.proj_stack_factor, n_embd);
  76. cur = build_ffn(cur, model.mm_1_w, model.mm_1_b, nullptr, nullptr, model.mm_2_w, model.mm_2_b, hparams.ffn_op, 0);
  77. cur = ggml_concat(ctx0, model.mm_boi, cur, 1);
  78. cur = ggml_concat(ctx0, cur, model.mm_eoi, 1);
  79. } else {
  80. GGML_ABORT("%s: unknown projector type", __func__);
  81. }
  82. cb(cur, "projected", -1);
  83. ggml_build_forward_expand(gf, cur);
  84. return gf;
  85. }