mtmd.cpp 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. #include "clip.h"
  2. #include "clip-impl.h"
  3. #include "mtmd.h"
  4. #include "mtmd-audio.h"
  5. #include "llama.h"
  6. #include <algorithm>
  7. #include <cerrno>
  8. #include <cstdio>
  9. #include <cstdlib>
  10. #include <cstring>
  11. #include <limits>
  12. #include <vector>
  13. // represents raw image data, layout is RGBRGBRGB...
  14. // length of data must be nx * ny * 3
  15. struct mtmd_bitmap {
  16. uint32_t nx;
  17. uint32_t ny;
  18. std::vector<unsigned char> data;
  19. std::string id; // optional user-defined id, for ex: can be set to image hash, useful for KV cache tracking
  20. bool is_audio = false; // true if the bitmap is audio
  21. };
  22. struct mtmd_image_tokens {
  23. uint32_t nx; // number of tokens in x direction
  24. uint32_t ny; // number of tokens in y direction
  25. bool use_mrope_pos = false; // use M-RoPE position counting (the whole image is 1 temporal position)
  26. uint32_t n_tokens() const { return nx * ny; }
  27. clip_image_f32_batch batch_f32; // preprocessed image patches
  28. std::string id; // optional user-defined ID, useful for KV cache tracking
  29. mtmd_image_tokens clone() {
  30. return mtmd_image_tokens{
  31. nx,
  32. ny,
  33. use_mrope_pos,
  34. batch_f32.clone(),
  35. id
  36. };
  37. }
  38. };
  39. using mtmd_image_tokens_ptr = std::unique_ptr<mtmd_image_tokens>;
  40. struct mtmd_audio_tokens {
  41. uint32_t n_tokens; // number of tokens
  42. clip_image_f32_batch batch_f32; // preprocessed image patches
  43. std::string id; // optional user-defined ID, useful for KV cache tracking
  44. mtmd_audio_tokens clone() {
  45. return mtmd_audio_tokens{
  46. n_tokens,
  47. batch_f32.clone(),
  48. id
  49. };
  50. }
  51. };
  52. using mtmd_audio_tokens_ptr = std::unique_ptr<mtmd_audio_tokens>;
  53. struct mtmd_input_chunk {
  54. mtmd_input_chunk_type type;
  55. std::vector<llama_token> tokens_text;
  56. mtmd_image_tokens_ptr tokens_image;
  57. mtmd_audio_tokens_ptr tokens_audio;
  58. };
  59. struct mtmd_input_chunks {
  60. std::vector<mtmd_input_chunk> entries;
  61. };
  62. // slice template, used by some llava-uhd models to correctly place the special tokens around image embeddings
  63. // models not having it (llava-1.6) will process embeddings without any special tokens in-between
  64. enum mtmd_slice_tmpl {
  65. MTMD_SLICE_TMPL_NONE,
  66. MTMD_SLICE_TMPL_MINICPMV_2_5,
  67. MTMD_SLICE_TMPL_MINICPMV_2_6,
  68. MTMD_SLICE_TMPL_LLAMA4,
  69. // TODO @ngxson : add support for idefics (SmolVLM)
  70. };
  71. const char * mtmd_default_marker() {
  72. return "<__media__>";
  73. }
  74. mtmd_context_params mtmd_context_params_default() {
  75. mtmd_context_params params;
  76. params.use_gpu = true;
  77. params.print_timings = true;
  78. params.n_threads = 4;
  79. params.verbosity = GGML_LOG_LEVEL_INFO;
  80. params.image_marker = MTMD_DEFAULT_IMAGE_MARKER;
  81. params.media_marker = mtmd_default_marker();
  82. return params;
  83. }
  84. struct mtmd_context {
  85. struct clip_ctx * ctx_v; // vision
  86. struct clip_ctx * ctx_a; // audio
  87. const struct llama_model * text_model;
  88. std::vector<float> image_embd_v; // image embedding vector
  89. bool print_timings;
  90. int n_threads;
  91. std::string media_marker;
  92. const int n_embd_text;
  93. // these are not token, but strings used to mark the beginning and end of image/audio embeddings
  94. std::string img_beg;
  95. std::string img_end;
  96. std::string aud_beg;
  97. std::string aud_end;
  98. // for llava-uhd style models, we need special tokens in-between slices
  99. // minicpmv calls them "slices", llama 4 calls them "tiles"
  100. mtmd_slice_tmpl slice_tmpl = MTMD_SLICE_TMPL_NONE;
  101. llama_token tok_ov_img_start = LLAMA_TOKEN_NULL; // overview image
  102. llama_token tok_ov_img_end = LLAMA_TOKEN_NULL; // overview image
  103. llama_token tok_slices_start = LLAMA_TOKEN_NULL; // start of all slices
  104. llama_token tok_slices_end = LLAMA_TOKEN_NULL; // end of all slices
  105. llama_token tok_sli_img_start = LLAMA_TOKEN_NULL; // single slice start
  106. llama_token tok_sli_img_end = LLAMA_TOKEN_NULL; // single slice end
  107. llama_token tok_sli_img_mid = LLAMA_TOKEN_NULL; // between 2 slices
  108. llama_token tok_row_end = LLAMA_TOKEN_NULL; // end of row
  109. bool tok_row_end_trail = false;
  110. bool ov_img_first = false;
  111. bool use_mrope = false; // for Qwen2VL, we need to use M-RoPE
  112. // for whisper, we pre-calculate the mel filter bank
  113. whisper_preprocessor::whisper_filters w_filters;
  114. // TODO @ngxson : add timings
  115. mtmd_context(const char * mmproj_fname,
  116. const llama_model * text_model,
  117. const mtmd_context_params & ctx_params) :
  118. text_model (text_model),
  119. print_timings(ctx_params.print_timings),
  120. n_threads (ctx_params.n_threads),
  121. media_marker (ctx_params.media_marker),
  122. n_embd_text (llama_model_n_embd(text_model))
  123. {
  124. if (std::string(ctx_params.image_marker) != MTMD_DEFAULT_IMAGE_MARKER) {
  125. throw std::runtime_error("custom image_marker is not supported anymore, use media_marker instead");
  126. }
  127. if (media_marker.empty()) {
  128. throw std::runtime_error("media_marker must not be empty");
  129. }
  130. clip_context_params ctx_clip_params;
  131. ctx_clip_params.use_gpu = ctx_params.use_gpu;
  132. ctx_clip_params.verbosity = ctx_params.verbosity;
  133. auto res = clip_init(mmproj_fname, ctx_clip_params);
  134. ctx_v = res.ctx_v;
  135. ctx_a = res.ctx_a;
  136. if (!ctx_v && !ctx_a) {
  137. throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname));
  138. }
  139. // if both vision and audio mmproj are present, we need to validate their n_embd
  140. if (ctx_v && ctx_a) {
  141. int n_embd_v = clip_n_mmproj_embd(ctx_v);
  142. int n_embd_a = clip_n_mmproj_embd(ctx_a);
  143. if (n_embd_v != n_embd_a) {
  144. throw std::runtime_error(string_format(
  145. "mismatch between vision and audio mmproj (n_embd_v = %d, n_embd_a = %d)\n",
  146. n_embd_v, n_embd_a));
  147. }
  148. }
  149. // since we already validate n_embd of vision and audio mmproj,
  150. // we can safely assume that they are the same
  151. int n_embd_clip = clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a);
  152. if (n_embd_text != n_embd_clip) {
  153. throw std::runtime_error(string_format(
  154. "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n"
  155. "hint: you may be using wrong mmproj\n",
  156. n_embd_text, n_embd_clip));
  157. }
  158. if (ctx_v) {
  159. init_vision();
  160. }
  161. if (ctx_a) {
  162. init_audio();
  163. }
  164. }
  165. void init_vision() {
  166. GGML_ASSERT(ctx_v != nullptr);
  167. use_mrope = clip_is_qwen2vl(ctx_v);
  168. projector_type proj = clip_get_projector_type(ctx_v);
  169. int minicpmv_version = clip_is_minicpmv(ctx_v);
  170. if (minicpmv_version == 2) {
  171. // minicpmv 2.5 format:
  172. // <slice>\n ... </slice>
  173. slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_5;
  174. tok_ov_img_start = lookup_token("");
  176. tok_slices_start = lookup_token("<slice>");
  177. tok_slices_end = lookup_token("</slice>");
  178. tok_sli_img_start = tok_ov_img_start;
  179. tok_sli_img_end = tok_ov_img_end;
  180. tok_row_end = lookup_token("\n");
  181. tok_row_end_trail = false; // no trailing end-of-row token
  182. ov_img_first = true;
  183. } else if (minicpmv_version == 3 || minicpmv_version == 4) {
  184. // minicpmv 2.6 format:
  185. // <slice> (slice) </slice><slice> (slice) </slice>\n ...
  186. slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_6;
  187. tok_ov_img_start = lookup_token("");
  189. tok_sli_img_start = lookup_token("<slice>");
  190. tok_sli_img_end = lookup_token("</slice>");
  191. tok_row_end = lookup_token("\n");
  192. tok_row_end_trail = false; // no trailing end-of-row token
  193. ov_img_first = true;
  194. } else if (minicpmv_version != 0) {
  195. GGML_ASSERT(false && "unsupported minicpmv version");
  196. } else if (proj == PROJECTOR_TYPE_LLAMA4) {
  197. // llama 4 format:
  198. // <|image_start|>
  199. // (slice) <|tile_x_separator|> (slice) <|tile_x_separator|> ... <|tile_y_separator|>
  200. // (slice) <|tile_x_separator|> (slice) <|tile_x_separator|> ... <|tile_y_separator|>
  201. // ... <|tile_y_separator|> <-- trailing end-of-row token
  202. // <|image|> (overview) <-- overview image is last
  203. // <|image_end|>
  204. slice_tmpl = MTMD_SLICE_TMPL_LLAMA4;
  205. tok_ov_img_start = lookup_token("<|image|>");
  206. tok_sli_img_mid = lookup_token("<|tile_x_separator|>");
  207. tok_row_end = lookup_token("<|tile_y_separator|>");
  208. tok_row_end_trail = true; // add trailing end-of-row token
  209. ov_img_first = false; // overview image is last
  210. }
  211. // set boi/eoi
  212. if (proj == PROJECTOR_TYPE_GEMMA3) {
  213. // <start_of_image> ... (image embeddings) ... <end_of_image>
  214. img_beg = "<start_of_image>";
  215. img_end = "<end_of_image>";
  216. } else if (proj == PROJECTOR_TYPE_IDEFICS3) {
  217. // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215
  218. img_beg = "<fake_token_around_image><global-img>";
  219. img_end = "<fake_token_around_image>";
  220. } else if (proj == PROJECTOR_TYPE_PIXTRAL) {
  221. // https://github.com/huggingface/transformers/blob/1cd110c6cb6a6237614130c470e9a902dbc1a4bd/docs/source/en/model_doc/pixtral.md
  222. img_end = "[IMG_END]";
  223. } else if (proj == PROJECTOR_TYPE_QWEN2VL || proj == PROJECTOR_TYPE_QWEN25VL) {
  224. // <|vision_start|> ... (image embeddings) ... <|vision_end|>
  225. img_beg = "<|vision_start|>";
  226. img_end = "<|vision_end|>";
  227. } else if (proj == PROJECTOR_TYPE_LLAMA4) {
  228. // (more details in mtmd_context constructor)
  229. img_beg = "<|image_start|>";
  230. img_end = "<|image_end|>";
  231. LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n"
  232. " https://github.com/ggml-org/llama.cpp/pull/13282\n", __func__);
  233. } else if (proj == PROJECTOR_TYPE_INTERNVL) {
  234. // <img> ... (image embeddings) ... </img>
  235. img_beg = "<img>";
  236. img_end = "</img>";
  237. }
  238. }
  239. void init_audio() {
  240. GGML_ASSERT(ctx_a != nullptr);
  241. projector_type proj = clip_get_projector_type(ctx_a);
  242. if (clip_has_whisper_encoder(ctx_a)) {
  243. // TODO @ngxson : check if model n_mel is 128 or 80
  244. w_filters = whisper_precalc_filters::get_128_bins();
  245. }
  246. LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n"
  247. " https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__);
  248. if (proj == PROJECTOR_TYPE_QWEN2A) {
  249. // <|audio_bos|> ... (embeddings) ... <|audio_eos|>
  250. aud_beg = "<|audio_bos|>";
  251. aud_end = "<|audio_eos|>";
  252. }
  253. }
  254. // get clip ctx based on chunk type
  255. clip_ctx * get_clip_ctx(const mtmd_input_chunk * chunk) const {
  256. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  257. return ctx_v;
  258. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  259. return ctx_a;
  260. }
  261. GGML_ABORT("unknown chunk type");
  262. }
  263. projector_type proj_type_v() const {
  264. return ctx_v ? clip_get_projector_type(ctx_v) : PROJECTOR_TYPE_UNKNOWN;
  265. }
  266. projector_type proj_type_a() const {
  267. return ctx_a ? clip_get_projector_type(ctx_a) : PROJECTOR_TYPE_UNKNOWN;
  268. }
  269. ~mtmd_context() {
  270. clip_free(ctx_a);
  271. clip_free(ctx_v);
  272. }
  273. private:
  274. llama_token lookup_token(const std::string & token_text) {
  275. const llama_vocab * vocab = llama_model_get_vocab(text_model);
  276. const int n_vocab = llama_vocab_n_tokens(vocab);
  277. for (int i = 0; i < n_vocab; i++) {
  278. if (token_to_piece(vocab, i, true) == token_text) {
  279. return i;
  280. }
  281. }
  282. return LLAMA_TOKEN_NULL;
  283. }
  284. std::string token_to_piece(const llama_vocab * vocab, llama_token token, bool special) {
  285. std::string piece;
  286. piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
  287. const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
  288. if (n_chars < 0) {
  289. piece.resize(-n_chars);
  290. int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
  291. GGML_ASSERT(check == -n_chars);
  292. } else {
  293. piece.resize(n_chars);
  294. }
  295. return piece;
  296. }
  297. };
  298. mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
  299. const struct llama_model * text_model,
  300. const struct mtmd_context_params ctx_params) {
  301. try {
  302. return new mtmd_context(mmproj_fname, text_model, ctx_params);
  303. } catch (const std::exception & e) {
  304. LOG_ERR("%s: error: %s\n", __func__, e.what());
  305. return nullptr;
  306. }
  307. }
  308. void mtmd_free(mtmd_context * ctx) {
  309. if (ctx) {
  310. delete ctx;
  311. }
  312. }
  313. struct mtmd_tokenizer {
  314. mtmd_context * ctx;
  315. std::vector<const mtmd_bitmap *> bitmaps;
  316. std::string input_text;
  317. bool add_special;
  318. bool parse_special;
  319. const llama_vocab * vocab;
  320. mtmd_input_chunks cur;
  321. mtmd_tokenizer(mtmd_context * ctx,
  322. const mtmd_input_text * text,
  323. const mtmd_bitmap ** bitmaps,
  324. size_t n_bitmaps) : ctx(ctx), bitmaps(bitmaps, bitmaps + n_bitmaps) {
  325. add_special = text->add_special;
  326. parse_special = text->parse_special;
  327. input_text = text->text;
  328. vocab = llama_model_get_vocab(ctx->text_model);
  329. // for compatibility, we convert image marker to media marker
  330. string_replace_all(input_text, MTMD_DEFAULT_IMAGE_MARKER, ctx->media_marker);
  331. }
  332. int32_t tokenize(mtmd_input_chunks * output) {
  333. cur.entries.clear();
  334. std::vector<std::string> parts = split_text(input_text, ctx->media_marker);
  335. size_t i_bm = 0; // index of the current bitmap
  336. for (auto & part : parts) {
  337. if (part == ctx->media_marker) {
  338. // this is a marker, we should add the next bitmap
  339. if (i_bm >= bitmaps.size()) {
  340. LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n",
  341. __func__, bitmaps.size(), parts.size() - 1);
  342. return 1;
  343. }
  344. const mtmd_bitmap * bitmap = bitmaps[i_bm++];
  345. int32_t res = add_media(bitmap);
  346. if (res != 0) {
  347. return res;
  348. }
  349. } else {
  350. // this is a text part, we should add it as text
  351. add_text(part, parse_special);
  352. }
  353. }
  354. if (add_special && llama_vocab_get_add_bos(vocab)) {
  355. // if first chunk is text, we add BOS token to first text chunk
  356. // otherwise, create a new text chunk with BOS token
  357. if (!cur.entries.empty() && cur.entries[0].type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  358. // add BOS token to the beginning of first text chunk
  359. cur.entries[0].tokens_text.insert(cur.entries[0].tokens_text.begin(), llama_vocab_bos(vocab));
  360. } else {
  361. // create a new text chunk with BOS token at the beginning
  362. mtmd_input_chunk bos_chunk{
  363. MTMD_INPUT_CHUNK_TYPE_TEXT,
  364. {llama_vocab_bos(vocab)},
  365. nullptr, // image tokens
  366. nullptr, // audio tokens
  367. };
  368. cur.entries.insert(cur.entries.begin(), std::move(bos_chunk));
  369. }
  370. }
  371. if (add_special && llama_vocab_get_add_eos(vocab)) {
  372. // if last chunk is text, we add EOS token to it
  373. add_text({llama_vocab_eos(vocab)});
  374. }
  375. if (i_bm != bitmaps.size()) {
  376. LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n",
  377. __func__, bitmaps.size(), parts.size() - 1);
  378. return 1;
  379. }
  380. *output = std::move(cur);
  381. return 0;
  382. }
  383. void add_text(const std::string & txt, bool parse_special) {
  384. LOG_DBG("%s: %s\n", __func__, txt.c_str());
  385. auto tokens = mtmd_tokenize_text_internal(vocab, txt, /* add_special */ false, parse_special);
  386. add_text(tokens);
  387. }
  388. void add_text(const std::vector<llama_token> & tokens) {
  389. if (tokens.empty()) {
  390. return;
  391. }
  392. // if last entry is also a text chunk, add tokens to it instead of creating new chunk
  393. if (!cur.entries.empty() && cur.entries.back().type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  394. cur.entries.back().tokens_text.insert(
  395. cur.entries.back().tokens_text.end(),
  396. tokens.begin(),
  397. tokens.end());
  398. } else {
  399. mtmd_input_chunk chunk{
  400. MTMD_INPUT_CHUNK_TYPE_TEXT,
  401. tokens,
  402. nullptr, // image tokens
  403. nullptr, // audio tokens
  404. };
  405. cur.entries.emplace_back(std::move(chunk));
  406. }
  407. }
  408. int32_t add_media(const mtmd_bitmap * bitmap) {
  409. if (!bitmap->is_audio) {
  410. // handle image
  411. if (!ctx->ctx_v) {
  412. LOG_ERR("%s: error: model does not support vision input\n", __func__);
  413. return 2;
  414. }
  415. if (!ctx->img_beg.empty()) {
  416. add_text(ctx->img_beg, true); // add image begin token
  417. }
  418. // convert mtmd_bitmap to clip_image_u8
  419. clip_image_u8_ptr img_u8(clip_image_u8_init());
  420. img_u8->nx = bitmap->nx;
  421. img_u8->ny = bitmap->ny;
  422. img_u8->buf.resize(bitmap->data.size());
  423. std::memcpy(img_u8->buf.data(), bitmap->data.data(), img_u8->nx * img_u8->ny * 3);
  424. // preprocess image
  425. clip_image_f32_batch batch_f32;
  426. bool ok = clip_image_preprocess(ctx->ctx_v, img_u8.get(), &batch_f32);
  427. if (!ok) {
  428. LOG_ERR("Unable to preprocess image\n");
  429. return 2;
  430. }
  431. // handle llava-uhd style preprocessing
  432. if (
  433. ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_5
  434. || ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_6
  435. || ctx->slice_tmpl == MTMD_SLICE_TMPL_LLAMA4
  436. ) {
  437. // split batch into chunks of single images
  438. auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmap->id);
  439. GGML_ASSERT(chunks.size() > 0);
  440. auto ov_chunk = std::move(chunks.front());
  441. chunks.erase(chunks.begin());
  442. // add overview image (first)
  443. if (ctx->ov_img_first) {
  444. if (ctx->tok_ov_img_start != LLAMA_TOKEN_NULL) {
  445. add_text({ctx->tok_ov_img_start});
  446. }
  447. cur.entries.emplace_back(std::move(ov_chunk));
  448. if (ctx->tok_ov_img_end != LLAMA_TOKEN_NULL) {
  449. add_text({ctx->tok_ov_img_end});
  450. }
  451. }
  452. // add slices (or tiles)
  453. if (!chunks.empty()) {
  454. const int n_col = batch_f32.grid_x;
  455. const int n_row = batch_f32.grid_y;
  456. if (ctx->tok_slices_start != LLAMA_TOKEN_NULL) {
  457. add_text({ctx->tok_slices_start});
  458. }
  459. for (int y = 0; y < n_row; y++) {
  460. for (int x = 0; x < n_col; x++) {
  461. const bool is_last_in_row = (x == n_col - 1);
  462. if (ctx->tok_sli_img_start != LLAMA_TOKEN_NULL) {
  463. add_text({ctx->tok_sli_img_start});
  464. }
  465. cur.entries.emplace_back(std::move(chunks[y * n_col + x]));
  466. if (ctx->tok_sli_img_end != LLAMA_TOKEN_NULL) {
  467. add_text({ctx->tok_sli_img_end});
  468. }
  469. if (!is_last_in_row && ctx->tok_sli_img_mid != LLAMA_TOKEN_NULL) {
  470. add_text({ctx->tok_sli_img_mid});
  471. }
  472. }
  473. if ((y != n_row - 1 || ctx->tok_row_end_trail) && ctx->tok_row_end != LLAMA_TOKEN_NULL) {
  474. add_text({ctx->tok_row_end});
  475. }
  476. }
  477. if (ctx->tok_slices_end != LLAMA_TOKEN_NULL) {
  478. add_text({ctx->tok_slices_end});
  479. }
  480. }
  481. // add overview image (last)
  482. if (!ctx->ov_img_first) {
  483. if (ctx->tok_ov_img_start != LLAMA_TOKEN_NULL) {
  484. add_text({ctx->tok_ov_img_start});
  485. }
  486. cur.entries.emplace_back(std::move(ov_chunk));
  487. if (ctx->tok_ov_img_end != LLAMA_TOKEN_NULL) {
  488. add_text({ctx->tok_ov_img_end});
  489. }
  490. }
  491. } else {
  492. size_t n_tokens = 0;
  493. for (const auto & entry : batch_f32.entries) {
  494. n_tokens += clip_n_output_tokens(ctx->ctx_v, entry.get());
  495. }
  496. mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
  497. if (ctx->use_mrope) {
  498. // for Qwen2VL, we need this information for M-RoPE decoding positions
  499. image_tokens->nx = clip_n_output_tokens_x(ctx->ctx_v, batch_f32.entries[0].get());
  500. image_tokens->ny = clip_n_output_tokens_y(ctx->ctx_v, batch_f32.entries[0].get());
  501. image_tokens->use_mrope_pos = true;
  502. } else {
  503. // other models, we only need the total number of tokens
  504. image_tokens->nx = n_tokens;
  505. image_tokens->ny = 1;
  506. }
  507. image_tokens->batch_f32 = std::move(batch_f32);
  508. image_tokens->id = bitmap->id; // optional
  509. LOG_DBG("image_tokens->nx = %d\n", image_tokens->nx);
  510. LOG_DBG("image_tokens->ny = %d\n", image_tokens->ny);
  511. LOG_DBG("batch_f32 size = %d\n", (int)image_tokens->batch_f32.entries.size());
  512. mtmd_input_chunk chunk{
  513. MTMD_INPUT_CHUNK_TYPE_IMAGE,
  514. {}, // text tokens
  515. std::move(image_tokens),
  516. nullptr, // audio tokens
  517. };
  518. cur.entries.emplace_back(std::move(chunk));
  519. }
  520. if (!ctx->img_end.empty()) {
  521. add_text(ctx->img_end, true); // add image end token
  522. }
  523. } else {
  524. // handle audio
  525. if (!ctx->ctx_a) {
  526. LOG_ERR("%s: error: model does not support audio input\n", __func__);
  527. return 2;
  528. }
  529. if (bitmap->data.size() == 0) {
  530. LOG_ERR("%s: error: empty audio data\n", __func__);
  531. return 2;
  532. }
  533. if (!ctx->aud_beg.empty()) {
  534. add_text(ctx->aud_beg, true); // add audio begin token
  535. }
  536. // preprocess audio
  537. GGML_ASSERT(ctx->w_filters.n_mel); // make sure we have filter preloaded
  538. std::vector<whisper_preprocessor::whisper_mel> mel_spec_chunks;
  539. const float * samples = (const float *)bitmap->data.data();
  540. size_t n_samples = bitmap->data.size() / sizeof(float);
  541. bool ok = whisper_preprocessor::preprocess_audio(samples, n_samples, ctx->w_filters, mel_spec_chunks);
  542. if (!ok) {
  543. LOG_ERR("Unable to preprocess audio\n");
  544. return 2;
  545. }
  546. // consider each mel_spec as a separate audio chunk
  547. // TODO: maybe support batching, but this may come with memory cost
  548. for (auto & mel_spec : mel_spec_chunks) {
  549. clip_image_f32_ptr mel_f32(clip_image_f32_init());
  550. mel_f32->nx = mel_spec.n_len;
  551. mel_f32->ny = mel_spec.n_mel;
  552. mel_f32->buf = std::move(mel_spec.data);
  553. size_t n_tokens = clip_n_output_tokens(ctx->ctx_a, mel_f32.get());
  554. clip_image_f32_batch batch_f32;
  555. batch_f32.is_audio = true;
  556. batch_f32.entries.push_back(std::move(mel_f32));
  557. mtmd_audio_tokens_ptr audio_tokens(new mtmd_audio_tokens);
  558. audio_tokens->n_tokens = n_tokens;
  559. audio_tokens->batch_f32 = std::move(batch_f32);
  560. audio_tokens->id = bitmap->id; // optional
  561. LOG_DBG("audio_tokens->n_tokens = %d\n", audio_tokens->n_tokens);
  562. mtmd_input_chunk chunk{
  563. MTMD_INPUT_CHUNK_TYPE_AUDIO,
  564. {}, // text tokens
  565. nullptr, // image tokens
  566. std::move(audio_tokens),
  567. };
  568. cur.entries.emplace_back(std::move(chunk));
  569. }
  570. if (!ctx->aud_end.empty()) {
  571. add_text(ctx->aud_end, true); // add audio end token
  572. }
  573. }
  574. return 0;
  575. }
  576. std::vector<mtmd_input_chunk> split_batch_to_chunk(clip_image_f32_batch && batch_f32, const std::string & id) {
  577. std::vector<mtmd_input_chunk> chunks;
  578. for (auto & entry : batch_f32.entries) {
  579. mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
  580. image_tokens->nx = clip_n_output_tokens(ctx->ctx_v, entry.get());
  581. image_tokens->ny = 1;
  582. image_tokens->batch_f32.entries.push_back(std::move(entry));
  583. image_tokens->id = id;
  584. mtmd_input_chunk chunk{
  585. MTMD_INPUT_CHUNK_TYPE_IMAGE,
  586. {}, // text tokens
  587. std::move(image_tokens),
  588. nullptr, // audio tokens
  589. };
  590. chunks.emplace_back(std::move(chunk));
  591. }
  592. return chunks;
  593. }
  594. // for example: "a <__media__> b <__media__> c" --> "a", "<__media__>", "b", "<__media__>", "c"
  595. static std::vector<std::string> split_text(const std::string & input, const std::string & delimiter) {
  596. std::vector<std::string> result;
  597. if (input.empty()) {
  598. return result;
  599. }
  600. size_t start = 0;
  601. size_t pos = 0;
  602. while ((pos = input.find(delimiter, start)) != std::string::npos) {
  603. if (pos > start) {
  604. result.push_back(input.substr(start, pos - start));
  605. }
  606. result.push_back(delimiter);
  607. start = pos + delimiter.length();
  608. }
  609. if (start < input.length()) {
  610. result.push_back(input.substr(start));
  611. }
  612. return result;
  613. }
  614. // copied from common_tokenize
  615. static std::vector<llama_token> mtmd_tokenize_text_internal(
  616. const struct llama_vocab * vocab,
  617. const std::string & text,
  618. bool add_special,
  619. bool parse_special) {
  620. // upper limit for the number of tokens
  621. int n_tokens = text.length() + 2 * add_special;
  622. std::vector<llama_token> result(n_tokens);
  623. n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
  624. if (n_tokens < 0) {
  625. result.resize(-n_tokens);
  626. int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
  627. GGML_ASSERT(check == -n_tokens);
  628. } else {
  629. result.resize(n_tokens);
  630. }
  631. return result;
  632. }
  633. };
  634. int32_t mtmd_tokenize(mtmd_context * ctx,
  635. mtmd_input_chunks * output,
  636. const mtmd_input_text * text,
  637. const mtmd_bitmap ** bitmaps,
  638. size_t n_bitmaps) {
  639. mtmd_tokenizer tokenizer(ctx, text, bitmaps, n_bitmaps);
  640. return tokenizer.tokenize(output);
  641. }
  642. int32_t mtmd_encode_chunk(mtmd_context * ctx, const mtmd_input_chunk * chunk) {
  643. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  644. LOG_WRN("mtmd_encode_chunk has no effect for text chunks\n");
  645. return 0;
  646. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  647. if (!ctx->ctx_v) {
  648. LOG_ERR("%s: model does not support vision input\n", __func__);
  649. return 1;
  650. }
  651. return mtmd_encode(ctx, chunk->tokens_image.get());
  652. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  653. if (!ctx->ctx_a) {
  654. LOG_ERR("%s: model does not support audio input\n", __func__);
  655. return 1;
  656. }
  657. int n_mmproj_embd = ctx->n_embd_text;
  658. ctx->image_embd_v.resize(chunk->tokens_audio->n_tokens * n_mmproj_embd);
  659. bool ok = clip_image_batch_encode(
  660. ctx->ctx_a,
  661. ctx->n_threads,
  662. &chunk->tokens_audio->batch_f32,
  663. ctx->image_embd_v.data());
  664. return ok ? 0 : 1;
  665. }
  666. LOG_ERR("%s: unknown chunk type %d\n", __func__, (int)chunk->type);
  667. return 1;
  668. }
  669. int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) {
  670. clip_ctx * ctx_clip = ctx->ctx_v;
  671. if (!ctx_clip) {
  672. LOG_ERR("%s: this API does not support non-vision input, please use mtmd_encode_chunk instead\n", __func__);
  673. return 1;
  674. }
  675. int n_mmproj_embd = clip_n_mmproj_embd(ctx_clip);
  676. ctx->image_embd_v.resize(image_tokens->n_tokens() * n_mmproj_embd);
  677. bool ok = false;
  678. if (clip_is_llava(ctx_clip) || clip_is_minicpmv(ctx_clip) || clip_is_glm(ctx_clip)) {
  679. // TODO @ngxson : llava does not support batched encoding ; this should be fixed inside clip_image_batch_encode()
  680. const auto & entries = image_tokens->batch_f32.entries;
  681. for (size_t i = 0; i < entries.size(); i++) {
  682. int n_tokens_per_image = clip_n_output_tokens(ctx_clip, entries[i].get());
  683. ok = clip_image_encode(
  684. ctx_clip,
  685. ctx->n_threads,
  686. entries[i].get(),
  687. ctx->image_embd_v.data() + i*n_mmproj_embd*n_tokens_per_image);
  688. }
  689. } else {
  690. ok = clip_image_batch_encode(
  691. ctx_clip,
  692. ctx->n_threads,
  693. &image_tokens->batch_f32,
  694. ctx->image_embd_v.data());
  695. }
  696. return ok ? 0 : 1;
  697. }
  698. float * mtmd_get_output_embd(mtmd_context * ctx) {
  699. return ctx->image_embd_v.data();
  700. }
  701. bool mtmd_decode_use_non_causal(mtmd_context * ctx) {
  702. if (ctx->ctx_v && clip_get_projector_type(ctx->ctx_v) == PROJECTOR_TYPE_GEMMA3) {
  703. return true;
  704. }
  705. return false;
  706. }
  707. bool mtmd_decode_use_mrope(mtmd_context * ctx) {
  708. return ctx->use_mrope;
  709. }
  710. bool mtmd_support_vision(mtmd_context * ctx) {
  711. return ctx->ctx_v != nullptr;
  712. }
  713. bool mtmd_support_audio(mtmd_context * ctx) {
  714. return ctx->ctx_a != nullptr;
  715. }
  716. int mtmd_get_audio_bitrate(mtmd_context * ctx) {
  717. if (!ctx->ctx_a) {
  718. return -1;
  719. }
  720. // for now, we assume that all audio models have the same bitrate
  721. return 16000; // 16kHz
  722. }
  723. //
  724. // public API functions
  725. //
  726. // mtmd_bitmap
  727. mtmd_bitmap * mtmd_bitmap_init(uint32_t nx,
  728. uint32_t ny,
  729. const unsigned char * data) {
  730. mtmd_bitmap * bitmap = new mtmd_bitmap;
  731. bitmap->nx = nx;
  732. bitmap->ny = ny;
  733. size_t data_size = (size_t)nx * ny * 3;
  734. bitmap->data.resize(data_size);
  735. std::memcpy(bitmap->data.data(), data, data_size);
  736. return bitmap;
  737. }
  738. mtmd_bitmap * mtmd_bitmap_init_from_audio(size_t n_samples,
  739. const float * data) {
  740. mtmd_bitmap * bitmap = new mtmd_bitmap;
  741. bitmap->nx = n_samples;
  742. bitmap->ny = 1;
  743. bitmap->is_audio = true;
  744. size_t data_size = n_samples * sizeof(float);
  745. bitmap->data.resize(data_size);
  746. std::memcpy(bitmap->data.data(), data, data_size);
  747. return bitmap;
  748. }
  749. uint32_t mtmd_bitmap_get_nx(const mtmd_bitmap * bitmap) {
  750. return bitmap->nx;
  751. }
  752. uint32_t mtmd_bitmap_get_ny(const mtmd_bitmap * bitmap) {
  753. return bitmap->ny;
  754. }
  755. const unsigned char * mtmd_bitmap_get_data(const mtmd_bitmap * bitmap) {
  756. return bitmap->data.data();
  757. }
  758. size_t mtmd_bitmap_get_n_bytes(const mtmd_bitmap * bitmap) {
  759. return bitmap->data.size();
  760. }
  761. bool mtmd_bitmap_is_audio(const mtmd_bitmap * bitmap) {
  762. return bitmap->is_audio;
  763. }
  764. const char * mtmd_bitmap_get_id(const mtmd_bitmap * bitmap) {
  765. return bitmap->id.c_str();
  766. }
  767. void mtmd_bitmap_set_id(mtmd_bitmap * bitmap, const char * id) {
  768. if (id) {
  769. bitmap->id = std::string(id);
  770. } else {
  771. bitmap->id.clear();
  772. }
  773. }
  774. void mtmd_bitmap_free(mtmd_bitmap * bitmap) {
  775. if (bitmap) {
  776. delete bitmap;
  777. }
  778. }
  779. // mtmd_input_chunks
  780. mtmd_input_chunks * mtmd_input_chunks_init() {
  781. return new mtmd_input_chunks;
  782. }
  783. size_t mtmd_input_chunks_size(const mtmd_input_chunks * chunks) {
  784. return chunks->entries.size();
  785. }
  786. const mtmd_input_chunk * mtmd_input_chunks_get(const mtmd_input_chunks * chunks, size_t idx) {
  787. if (idx >= chunks->entries.size()) {
  788. return nullptr;
  789. }
  790. return &chunks->entries[idx];
  791. }
  792. void mtmd_input_chunks_free(mtmd_input_chunks * chunks) {
  793. if (chunks) {
  794. delete chunks;
  795. }
  796. }
  797. // mtmd_input_chunk
  798. enum mtmd_input_chunk_type mtmd_input_chunk_get_type(const mtmd_input_chunk * chunk) {
  799. return chunk->type;
  800. }
  801. const llama_token * mtmd_input_chunk_get_tokens_text(const mtmd_input_chunk * chunk, size_t * n_tokens_output) {
  802. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  803. *n_tokens_output = chunk->tokens_text.size();
  804. return chunk->tokens_text.data();
  805. }
  806. *n_tokens_output = 0;
  807. return nullptr;
  808. }
  809. const mtmd_image_tokens * mtmd_input_chunk_get_tokens_image(const mtmd_input_chunk * chunk) {
  810. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  811. return chunk->tokens_image.get();
  812. }
  813. return nullptr;
  814. }
  815. size_t mtmd_input_chunk_get_n_tokens(const mtmd_input_chunk * chunk) {
  816. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  817. return chunk->tokens_text.size();
  818. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  819. return mtmd_image_tokens_get_n_tokens(chunk->tokens_image.get());
  820. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  821. return chunk->tokens_audio->n_tokens;
  822. } else {
  823. GGML_ABORT("invalid chunk type");
  824. }
  825. }
  826. llama_pos mtmd_input_chunk_get_n_pos(const mtmd_input_chunk * chunk) {
  827. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  828. return chunk->tokens_text.size();
  829. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  830. return mtmd_image_tokens_get_n_pos(chunk->tokens_image.get());
  831. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  832. return chunk->tokens_audio->n_tokens;
  833. } else {
  834. GGML_ABORT("invalid chunk type");
  835. }
  836. }
  837. const char * mtmd_input_chunk_get_id(const mtmd_input_chunk * chunk) {
  838. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  839. return chunk->tokens_image->id.c_str();
  840. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  841. return chunk->tokens_audio->id.c_str();
  842. }
  843. return nullptr;
  844. }
  845. mtmd_input_chunk * mtmd_input_chunk_copy(const mtmd_input_chunk * chunk) {
  846. mtmd_input_chunk * copy = new mtmd_input_chunk{
  847. chunk->type,
  848. chunk->tokens_text,
  849. nullptr,
  850. nullptr,
  851. };
  852. if (chunk->tokens_image) {
  853. // copy the image tokens
  854. copy->tokens_image = mtmd_image_tokens_ptr(new mtmd_image_tokens());
  855. *copy->tokens_image = chunk->tokens_image->clone();
  856. }
  857. if (chunk->tokens_audio) {
  858. // copy the audio tokens
  859. copy->tokens_audio = mtmd_audio_tokens_ptr(new mtmd_audio_tokens());
  860. *copy->tokens_audio = chunk->tokens_audio->clone();
  861. }
  862. return copy;
  863. }
  864. void mtmd_input_chunk_free(mtmd_input_chunk * chunk) {
  865. if (chunk) {
  866. delete chunk;
  867. }
  868. }
  869. // mtmd_image_tokens
  870. size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens) {
  871. return image_tokens->n_tokens();
  872. }
  873. size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens) {
  874. return image_tokens->nx;
  875. }
  876. size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens) {
  877. return image_tokens->ny;
  878. }
  879. const char * mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens) {
  880. return image_tokens->id.c_str();
  881. }
  882. llama_pos mtmd_image_tokens_get_n_pos(const mtmd_image_tokens * image_tokens) {
  883. if (image_tokens->use_mrope_pos) {
  884. return 1; // for M-RoPE, the whole image is 1 in temporal dimension
  885. }
  886. return image_tokens->n_tokens();
  887. }
  888. // test function
  889. mtmd_input_chunks * mtmd_test_create_input_chunks() {
  890. mtmd_input_chunks * chunks = mtmd_input_chunks_init();
  891. if (!chunks) {
  892. return nullptr;
  893. }
  894. // create a text chunk
  895. std::vector<llama_token> tokens_text = { 1, 2, 3, 4, 5 };
  896. mtmd_input_chunk chunk_text{
  897. MTMD_INPUT_CHUNK_TYPE_TEXT,
  898. std::move(tokens_text),
  899. nullptr, // image tokens
  900. nullptr, // audio tokens
  901. };
  902. chunks->entries.emplace_back(std::move(chunk_text));
  903. // create an image chunk
  904. mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
  905. image_tokens->nx = 4;
  906. image_tokens->ny = 4;
  907. image_tokens->batch_f32.entries.resize(16);
  908. image_tokens->id = "image_1";
  909. mtmd_input_chunk chunk_image{
  910. MTMD_INPUT_CHUNK_TYPE_IMAGE,
  911. {}, // text tokens
  912. std::move(image_tokens),
  913. nullptr, // audio tokens
  914. };
  915. chunks->entries.emplace_back(std::move(chunk_image));
  916. return chunks;
  917. }