clip-model.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. #pragma once
  2. #include "ggml.h"
  3. #include "clip.h"
  4. #include "clip-impl.h"
  5. #include <array>
  6. #include <vector>
  7. #include <unordered_set>
  8. #include <cstdint>
  9. #include <cmath>
  10. enum ffn_op_type {
  11. FFN_GELU,
  12. FFN_GELU_ERF,
  13. FFN_SILU,
  14. FFN_GELU_QUICK,
  15. };
  16. enum norm_type {
  17. NORM_TYPE_NORMAL,
  18. NORM_TYPE_RMS,
  19. };
  20. enum patch_merge_type {
  21. PATCH_MERGE_FLAT,
  22. PATCH_MERGE_SPATIAL_UNPAD,
  23. };
  24. struct clip_hparams {
  25. int32_t image_size = 0;
  26. int32_t patch_size = 0;
  27. int32_t n_embd = 0;
  28. int32_t n_ff = 0;
  29. int32_t projection_dim = 0;
  30. int32_t n_head = 0;
  31. int32_t n_layer = 0;
  32. // idefics3
  33. int32_t image_longest_edge = 0;
  34. int32_t image_min_pixels = -1;
  35. int32_t image_max_pixels = -1;
  36. int32_t n_merge = 0; // number of patch merges **per-side**
  37. float image_mean[3];
  38. float image_std[3];
  39. // for models using dynamic image size, we need to have a smaller image size to warmup
  40. // otherwise, user will get OOM everytime they load the model
  41. int32_t warmup_image_size = 0;
  42. int32_t warmup_audio_size = 3000;
  43. ffn_op_type ffn_op = FFN_GELU;
  44. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  45. float eps = 1e-6;
  46. float rope_theta = 0.0;
  47. std::vector<clip_image_size> image_res_candidates; // for llava-uhd style models
  48. int32_t image_crop_resolution;
  49. std::unordered_set<int32_t> vision_feature_layer;
  50. int32_t attn_window_size = 0;
  51. int32_t n_wa_pattern = 0;
  52. // audio
  53. int32_t n_mel_bins = 0; // whisper preprocessor
  54. int32_t proj_stack_factor = 0; // ultravox
  55. // audio-to-mel preprocessor params
  56. int32_t audio_chunk_len = -1; // in seconds
  57. int32_t audio_sample_rate = -1;
  58. int32_t audio_n_fft = -1;
  59. int32_t audio_window_len = -1;
  60. int32_t audio_hop_len = -1;
  61. // legacy
  62. bool has_llava_projector = false;
  63. int minicpmv_version = 0;
  64. int32_t minicpmv_query_num = 0; // MiniCPM-V query number
  65. // custom value provided by user, can be undefined if not set
  66. int32_t custom_image_min_tokens = -1;
  67. int32_t custom_image_max_tokens = -1;
  68. void set_limit_image_tokens(int n_tokens_min, int n_tokens_max) {
  69. const int cur_merge = n_merge == 0 ? 1 : n_merge;
  70. const int patch_area = patch_size * patch_size * cur_merge * cur_merge;
  71. image_min_pixels = (custom_image_min_tokens > 0 ? custom_image_min_tokens : n_tokens_min) * patch_area;
  72. image_max_pixels = (custom_image_max_tokens > 0 ? custom_image_max_tokens : n_tokens_max) * patch_area;
  73. warmup_image_size = static_cast<int>(std::sqrt(image_max_pixels));
  74. }
  75. void set_warmup_n_tokens(int n_tokens) {
  76. int n_tok_per_side = static_cast<int>(std::sqrt(n_tokens));
  77. GGML_ASSERT(n_tok_per_side * n_tok_per_side == n_tokens && "n_tokens must be n*n");
  78. const int cur_merge = n_merge == 0 ? 1 : n_merge;
  79. warmup_image_size = n_tok_per_side * patch_size * cur_merge;
  80. // TODO: support warmup size for custom token numbers
  81. }
  82. };
  83. struct clip_layer {
  84. // attention
  85. ggml_tensor * k_w = nullptr;
  86. ggml_tensor * k_b = nullptr;
  87. ggml_tensor * q_w = nullptr;
  88. ggml_tensor * q_b = nullptr;
  89. ggml_tensor * v_w = nullptr;
  90. ggml_tensor * v_b = nullptr;
  91. ggml_tensor * qkv_w = nullptr;
  92. ggml_tensor * qkv_b = nullptr;
  93. ggml_tensor * o_w = nullptr;
  94. ggml_tensor * o_b = nullptr;
  95. ggml_tensor * k_norm = nullptr;
  96. ggml_tensor * q_norm = nullptr;
  97. // layernorm 1
  98. ggml_tensor * ln_1_w = nullptr;
  99. ggml_tensor * ln_1_b = nullptr;
  100. ggml_tensor * ff_up_w = nullptr;
  101. ggml_tensor * ff_up_b = nullptr;
  102. ggml_tensor * ff_gate_w = nullptr;
  103. ggml_tensor * ff_gate_b = nullptr;
  104. ggml_tensor * ff_down_w = nullptr;
  105. ggml_tensor * ff_down_b = nullptr;
  106. // layernorm 2
  107. ggml_tensor * ln_2_w = nullptr;
  108. ggml_tensor * ln_2_b = nullptr;
  109. // layer scale (no bias)
  110. ggml_tensor * ls_1_w = nullptr;
  111. ggml_tensor * ls_2_w = nullptr;
  112. // qwen3vl deepstack merger
  113. ggml_tensor * deepstack_norm_w = nullptr;
  114. ggml_tensor * deepstack_norm_b = nullptr;
  115. ggml_tensor * deepstack_fc1_w = nullptr;
  116. ggml_tensor * deepstack_fc1_b = nullptr;
  117. ggml_tensor * deepstack_fc2_w = nullptr;
  118. ggml_tensor * deepstack_fc2_b = nullptr;
  119. // lfm2
  120. ggml_tensor * ff_norm_w = nullptr;
  121. ggml_tensor * ff_norm_b = nullptr;
  122. ggml_tensor * ff_norm_1_w = nullptr;
  123. ggml_tensor * ff_norm_1_b = nullptr;
  124. ggml_tensor * ff_up_1_w = nullptr;
  125. ggml_tensor * ff_up_1_b = nullptr;
  126. ggml_tensor * ff_down_1_w = nullptr;
  127. ggml_tensor * ff_down_1_b = nullptr;
  128. ggml_tensor * pos_bias_u = nullptr;
  129. ggml_tensor * pos_bias_v = nullptr;
  130. ggml_tensor * norm_conv_w = nullptr;
  131. ggml_tensor * norm_conv_b = nullptr;
  132. ggml_tensor * linear_pos_w = nullptr;
  133. ggml_tensor * conv_norm_w = nullptr;
  134. ggml_tensor * conv_norm_b = nullptr;
  135. ggml_tensor * conv_dw_w = nullptr;
  136. ggml_tensor * conv_dw_b = nullptr;
  137. ggml_tensor * conv_pw1_w = nullptr;
  138. ggml_tensor * conv_pw1_b = nullptr;
  139. ggml_tensor * conv_pw2_w = nullptr;
  140. ggml_tensor * conv_pw2_b = nullptr;
  141. bool has_deepstack() const {
  142. return deepstack_fc1_w != nullptr;
  143. }
  144. };
  145. struct clip_model {
  146. clip_modality modality = CLIP_MODALITY_VISION;
  147. projector_type proj_type = PROJECTOR_TYPE_MLP;
  148. clip_hparams hparams;
  149. // embeddings
  150. ggml_tensor * class_embedding = nullptr;
  151. ggml_tensor * patch_embeddings_0 = nullptr;
  152. ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  153. ggml_tensor * patch_bias = nullptr;
  154. ggml_tensor * position_embeddings = nullptr;
  155. ggml_tensor * norm_embd_w = nullptr;
  156. ggml_tensor * norm_embd_b = nullptr;
  157. ggml_tensor * pre_ln_w = nullptr;
  158. ggml_tensor * pre_ln_b = nullptr;
  159. std::vector<clip_layer> layers;
  160. int32_t n_deepstack_layers = 0; // used by Qwen3-VL, calculated from clip_layer
  161. ggml_tensor * post_ln_w;
  162. ggml_tensor * post_ln_b;
  163. ggml_tensor * projection; // TODO: rename it to fc (fully connected layer)
  164. ggml_tensor * mm_fc_w;
  165. ggml_tensor * mm_fc_b;
  166. ggml_tensor * mm_ffn_up_w = nullptr;
  167. ggml_tensor * mm_ffn_up_b = nullptr;
  168. ggml_tensor * mm_ffn_gate_w = nullptr;
  169. ggml_tensor * mm_ffn_gate_b = nullptr;
  170. ggml_tensor * mm_ffn_down_w = nullptr;
  171. ggml_tensor * mm_ffn_down_b = nullptr;
  172. ggml_tensor * mm_post_norm_w = nullptr;
  173. ggml_tensor * mm_post_norm_b = nullptr;
  174. // LLaVA projection
  175. ggml_tensor * mm_input_norm_w = nullptr;
  176. ggml_tensor * mm_input_norm_b = nullptr;
  177. ggml_tensor * mm_0_w = nullptr;
  178. ggml_tensor * mm_0_b = nullptr;
  179. ggml_tensor * mm_2_w = nullptr;
  180. ggml_tensor * mm_2_b = nullptr;
  181. ggml_tensor * image_newline = nullptr;
  182. // Yi type models with mlp+normalization projection
  183. ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  184. ggml_tensor * mm_1_b = nullptr;
  185. ggml_tensor * mm_3_w = nullptr;
  186. ggml_tensor * mm_3_b = nullptr;
  187. ggml_tensor * mm_4_w = nullptr;
  188. ggml_tensor * mm_4_b = nullptr;
  189. // GLMV-Edge projection
  190. ggml_tensor * mm_model_adapter_conv_w = nullptr;
  191. ggml_tensor * mm_model_adapter_conv_b = nullptr;
  192. // MobileVLM projection
  193. ggml_tensor * mm_model_mlp_1_w = nullptr;
  194. ggml_tensor * mm_model_mlp_1_b = nullptr;
  195. ggml_tensor * mm_model_mlp_3_w = nullptr;
  196. ggml_tensor * mm_model_mlp_3_b = nullptr;
  197. ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  198. ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  199. ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  200. ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  201. ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  202. ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  203. ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  204. ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  205. ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  206. ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  207. ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  208. ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  209. ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  210. ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  211. ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  212. ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  213. ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  214. ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  215. ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  216. ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  217. // MobileVLM_V2 projection
  218. ggml_tensor * mm_model_mlp_0_w = nullptr;
  219. ggml_tensor * mm_model_mlp_0_b = nullptr;
  220. ggml_tensor * mm_model_mlp_2_w = nullptr;
  221. ggml_tensor * mm_model_mlp_2_b = nullptr;
  222. ggml_tensor * mm_model_peg_0_w = nullptr;
  223. ggml_tensor * mm_model_peg_0_b = nullptr;
  224. // MINICPMV projection
  225. ggml_tensor * mm_model_pos_embed_k = nullptr;
  226. ggml_tensor * mm_model_query = nullptr;
  227. ggml_tensor * mm_model_proj = nullptr;
  228. ggml_tensor * mm_model_kv_proj = nullptr;
  229. ggml_tensor * mm_model_attn_q_w = nullptr;
  230. ggml_tensor * mm_model_attn_q_b = nullptr;
  231. ggml_tensor * mm_model_attn_k_w = nullptr;
  232. ggml_tensor * mm_model_attn_k_b = nullptr;
  233. ggml_tensor * mm_model_attn_v_w = nullptr;
  234. ggml_tensor * mm_model_attn_v_b = nullptr;
  235. ggml_tensor * mm_model_attn_o_w = nullptr;
  236. ggml_tensor * mm_model_attn_o_b = nullptr;
  237. ggml_tensor * mm_model_ln_q_w = nullptr;
  238. ggml_tensor * mm_model_ln_q_b = nullptr;
  239. ggml_tensor * mm_model_ln_kv_w = nullptr;
  240. ggml_tensor * mm_model_ln_kv_b = nullptr;
  241. ggml_tensor * mm_model_ln_post_w = nullptr;
  242. ggml_tensor * mm_model_ln_post_b = nullptr;
  243. // gemma3
  244. ggml_tensor * mm_input_proj_w = nullptr;
  245. ggml_tensor * mm_soft_emb_norm_w = nullptr;
  246. // pixtral, glm4v
  247. ggml_tensor * token_embd_img_break = nullptr;
  248. ggml_tensor * mm_patch_merger_w = nullptr;
  249. ggml_tensor * mm_patch_merger_b = nullptr;
  250. // ultravox / whisper encoder
  251. ggml_tensor * conv1d_1_w = nullptr;
  252. ggml_tensor * conv1d_1_b = nullptr;
  253. ggml_tensor * conv1d_2_w = nullptr;
  254. ggml_tensor * conv1d_2_b = nullptr;
  255. ggml_tensor * mm_norm_pre_w = nullptr;
  256. ggml_tensor * mm_norm_pre_b = nullptr;
  257. ggml_tensor * mm_norm_mid_w = nullptr;
  258. // cogvlm
  259. ggml_tensor * mm_post_fc_norm_w = nullptr;
  260. ggml_tensor * mm_post_fc_norm_b = nullptr;
  261. ggml_tensor * mm_h_to_4h_w = nullptr;
  262. ggml_tensor * mm_gate_w = nullptr;
  263. ggml_tensor * mm_4h_to_h_w = nullptr;
  264. ggml_tensor * mm_boi = nullptr;
  265. ggml_tensor * mm_eoi = nullptr;
  266. // lfm2 audio
  267. std::array<ggml_tensor *, 7> pre_encode_conv_X_w = {nullptr};
  268. std::array<ggml_tensor *, 7> pre_encode_conv_X_b = {nullptr};
  269. ggml_tensor * pre_encode_out_w = nullptr;
  270. ggml_tensor * pre_encode_out_b = nullptr;
  271. bool audio_has_avgpool() const {
  272. return proj_type == PROJECTOR_TYPE_QWEN2A
  273. || proj_type == PROJECTOR_TYPE_VOXTRAL;
  274. }
  275. bool audio_has_stack_frames() const {
  276. return proj_type == PROJECTOR_TYPE_ULTRAVOX
  277. || proj_type == PROJECTOR_TYPE_VOXTRAL;
  278. }
  279. };
  280. const clip_hparams * clip_get_hparams(const struct clip_ctx * ctx);