mtmd.cpp 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. #include "clip.h"
  2. #include "clip-impl.h"
  3. #include "mtmd.h"
  4. #include "mtmd-audio.h"
  5. #include "llama.h"
  6. // fix problem with std::min and std::max
  7. #if defined(_WIN32)
  8. #define WIN32_LEAN_AND_MEAN
  9. #ifndef NOMINMAX
  10. # define NOMINMAX
  11. #endif
  12. #include <windows.h>
  13. #endif
  14. #include <algorithm>
  15. #include <cerrno>
  16. #include <cstdio>
  17. #include <cstdlib>
  18. #include <cstring>
  19. #include <vector>
  20. // represents raw image data, layout is RGBRGBRGB...
  21. // length of data must be nx * ny * 3
  22. struct mtmd_bitmap {
  23. uint32_t nx;
  24. uint32_t ny;
  25. std::vector<unsigned char> data;
  26. std::string id; // optional user-defined id, for ex: can be set to image hash, useful for KV cache tracking
  27. bool is_audio = false; // true if the bitmap is audio
  28. };
  29. struct mtmd_image_tokens {
  30. uint32_t nx; // number of tokens in x direction
  31. uint32_t ny; // number of tokens in y direction
  32. bool use_mrope_pos = false; // use M-RoPE position counting (the whole image is 1 temporal position)
  33. uint32_t n_tokens() const { return nx * ny; }
  34. clip_image_f32_batch batch_f32; // preprocessed image patches
  35. std::string id; // optional user-defined ID, useful for KV cache tracking
  36. mtmd_image_tokens clone() {
  37. return mtmd_image_tokens{
  38. nx,
  39. ny,
  40. use_mrope_pos,
  41. batch_f32.clone(),
  42. id
  43. };
  44. }
  45. };
  46. using mtmd_image_tokens_ptr = std::unique_ptr<mtmd_image_tokens>;
  47. struct mtmd_audio_tokens {
  48. uint32_t n_tokens; // number of tokens
  49. clip_image_f32_batch batch_f32; // preprocessed image patches
  50. std::string id; // optional user-defined ID, useful for KV cache tracking
  51. mtmd_audio_tokens clone() {
  52. return mtmd_audio_tokens{
  53. n_tokens,
  54. batch_f32.clone(),
  55. id
  56. };
  57. }
  58. };
  59. using mtmd_audio_tokens_ptr = std::unique_ptr<mtmd_audio_tokens>;
  60. struct mtmd_input_chunk {
  61. mtmd_input_chunk_type type;
  62. std::vector<llama_token> tokens_text;
  63. mtmd_image_tokens_ptr tokens_image;
  64. mtmd_audio_tokens_ptr tokens_audio;
  65. };
  66. struct mtmd_input_chunks {
  67. std::vector<mtmd_input_chunk> entries;
  68. };
  69. // slice template, used by some llava-uhd models to correctly place the special tokens around image embeddings
  70. // models not having it (llava-1.6) will process embeddings without any special tokens in-between
  71. enum mtmd_slice_tmpl {
  72. MTMD_SLICE_TMPL_NONE,
  73. MTMD_SLICE_TMPL_MINICPMV_2_5,
  74. MTMD_SLICE_TMPL_MINICPMV_2_6,
  75. MTMD_SLICE_TMPL_LLAMA4,
  76. MTMD_SLICE_TMPL_IDEFICS3,
  77. };
  78. const char * mtmd_default_marker() {
  79. return "<__media__>";
  80. }
  81. static clip_flash_attn_type mtmd_get_clip_flash_attn_type(enum llama_flash_attn_type flash_attn_type) {
  82. switch (flash_attn_type) {
  83. case LLAMA_FLASH_ATTN_TYPE_AUTO: return CLIP_FLASH_ATTN_TYPE_AUTO;
  84. case LLAMA_FLASH_ATTN_TYPE_DISABLED: return CLIP_FLASH_ATTN_TYPE_DISABLED;
  85. case LLAMA_FLASH_ATTN_TYPE_ENABLED: return CLIP_FLASH_ATTN_TYPE_ENABLED;
  86. }
  87. return CLIP_FLASH_ATTN_TYPE_AUTO;
  88. }
  89. mtmd_context_params mtmd_context_params_default() {
  90. mtmd_context_params params {
  91. /* use_gpu */ true,
  92. /* print_timings */ true,
  93. /* n_threads */ 4,
  94. /* image_marker */ MTMD_DEFAULT_IMAGE_MARKER,
  95. /* media_marker */ mtmd_default_marker(),
  96. /* flash_attn_type */ LLAMA_FLASH_ATTN_TYPE_AUTO,
  97. /* warmup */ true,
  98. /* image_min_tokens */ -1,
  99. /* image_max_tokens */ -1,
  100. };
  101. return params;
  102. }
  103. struct mtmd_context {
  104. struct clip_ctx * ctx_v; // vision
  105. struct clip_ctx * ctx_a; // audio
  106. const struct llama_model * text_model;
  107. std::vector<float> image_embd_v; // image embedding vector
  108. bool print_timings;
  109. int n_threads;
  110. std::string media_marker;
  111. const int n_embd_text;
  112. // these are not token, but strings used to mark the beginning and end of image/audio embeddings
  113. std::string img_beg;
  114. std::string img_end;
  115. std::string aud_beg;
  116. std::string aud_end;
  117. // for llava-uhd style models, we need special tokens in-between slices
  118. // minicpmv calls them "slices", llama 4 calls them "tiles"
  119. mtmd_slice_tmpl slice_tmpl = MTMD_SLICE_TMPL_NONE;
  120. std::vector<llama_token> tok_ov_img_start; // overview image
  121. std::vector<llama_token> tok_ov_img_end; // overview image
  122. std::vector<llama_token> tok_slices_start; // start of all slices
  123. std::vector<llama_token> tok_slices_end; // end of all slices
  124. std::vector<llama_token> tok_sli_img_start; // single slice start
  125. std::vector<llama_token> tok_sli_img_end; // single slice end
  126. std::vector<llama_token> tok_sli_img_mid; // between 2 slices
  127. std::vector<llama_token> tok_row_end; // end of row
  128. bool tok_row_end_trail = false;
  129. bool ov_img_first = false;
  130. bool use_mrope = false; // for Qwen2VL, we need to use M-RoPE
  131. // string template for slice image delimiters with row/col (idefics3)
  132. std::string sli_img_start_tmpl;
  133. std::unique_ptr<mtmd_audio_preprocessor> audio_preproc;
  134. // TODO @ngxson : add timings
  135. mtmd_context(const char * mmproj_fname,
  136. const llama_model * text_model,
  137. const mtmd_context_params & ctx_params) :
  138. text_model (text_model),
  139. print_timings(ctx_params.print_timings),
  140. n_threads (ctx_params.n_threads),
  141. media_marker (ctx_params.media_marker),
  142. n_embd_text (llama_model_n_embd_inp(text_model))
  143. {
  144. if (std::string(ctx_params.image_marker) != MTMD_DEFAULT_IMAGE_MARKER) {
  145. throw std::runtime_error("custom image_marker is not supported anymore, use media_marker instead");
  146. }
  147. if (media_marker.empty()) {
  148. throw std::runtime_error("media_marker must not be empty");
  149. }
  150. clip_context_params ctx_clip_params {
  151. /* use_gpu */ ctx_params.use_gpu,
  152. /* flash_attn_type */ CLIP_FLASH_ATTN_TYPE_AUTO,
  153. /* image_min_tokens */ ctx_params.image_min_tokens,
  154. /* image_max_tokens */ ctx_params.image_max_tokens,
  155. /* warmup */ ctx_params.warmup,
  156. };
  157. auto res = clip_init(mmproj_fname, ctx_clip_params);
  158. ctx_v = res.ctx_v;
  159. ctx_a = res.ctx_a;
  160. if (!ctx_v && !ctx_a) {
  161. throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname));
  162. }
  163. // if both vision and audio mmproj are present, we need to validate their n_embd
  164. if (ctx_v && ctx_a) {
  165. int n_embd_v = clip_n_mmproj_embd(ctx_v);
  166. int n_embd_a = clip_n_mmproj_embd(ctx_a);
  167. if (n_embd_v != n_embd_a) {
  168. throw std::runtime_error(string_format(
  169. "mismatch between vision and audio mmproj (n_embd_v = %d, n_embd_a = %d)\n",
  170. n_embd_v, n_embd_a));
  171. }
  172. }
  173. // since we already validate n_embd of vision and audio mmproj,
  174. // we can safely assume that they are the same
  175. int n_embd_clip = clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a);
  176. if (n_embd_text != n_embd_clip) {
  177. throw std::runtime_error(string_format(
  178. "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n"
  179. "hint: you may be using wrong mmproj\n",
  180. n_embd_text, n_embd_clip));
  181. }
  182. if (ctx_v) {
  183. init_vision();
  184. }
  185. if (ctx_a) {
  186. init_audio();
  187. }
  188. }
  189. void init_vision() {
  190. GGML_ASSERT(ctx_v != nullptr);
  191. use_mrope = clip_is_mrope(ctx_v);
  192. projector_type proj = clip_get_projector_type(ctx_v);
  193. int minicpmv_version = clip_is_minicpmv(ctx_v);
  194. if (minicpmv_version == 2) {
  195. // minicpmv 2.5 format:
  196. // <slice>\n ... </slice>
  197. slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_5;
  198. tok_ov_img_start = {lookup_token("")};
  200. tok_slices_start = {lookup_token("<slice>")};
  201. tok_slices_end = {lookup_token("</slice>")};
  202. tok_sli_img_start = tok_ov_img_start;
  203. tok_sli_img_end = tok_ov_img_end;
  204. tok_row_end = {lookup_token("\n")};
  205. tok_row_end_trail = false; // no trailing end-of-row token
  206. ov_img_first = true;
  207. } else if (minicpmv_version == 3 || minicpmv_version == 4 || minicpmv_version == 5 || minicpmv_version == 6) {
  208. // minicpmv 2.6 format:
  209. // <slice> (slice) </slice><slice> (slice) </slice>\n ...
  210. slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_6;
  211. tok_ov_img_start = {lookup_token("")};
  213. tok_sli_img_start = {lookup_token("<slice>")};
  214. tok_sli_img_end = {lookup_token("</slice>")};
  215. tok_row_end = {lookup_token("\n")};
  216. tok_row_end_trail = false; // no trailing end-of-row token
  217. ov_img_first = true;
  218. } else if (minicpmv_version != 0) {
  219. GGML_ASSERT(false && "unsupported minicpmv version");
  220. } else if (proj == PROJECTOR_TYPE_LLAMA4) {
  221. // llama 4 format:
  222. // <|image_start|>
  223. // (slice) <|tile_x_separator|> (slice) <|tile_x_separator|> ... <|tile_y_separator|>
  224. // (slice) <|tile_x_separator|> (slice) <|tile_x_separator|> ... <|tile_y_separator|>
  225. // ... <|tile_y_separator|> <-- trailing end-of-row token
  226. // <|image|> (overview) <-- overview image is last
  227. // <|image_end|>
  228. slice_tmpl = MTMD_SLICE_TMPL_LLAMA4;
  229. tok_ov_img_start = {lookup_token("<|image|>")};
  230. tok_sli_img_mid = {lookup_token("<|tile_x_separator|>")};
  231. tok_row_end = {lookup_token("<|tile_y_separator|>")};
  232. tok_row_end_trail = true; // add trailing end-of-row token
  233. ov_img_first = false; // overview image is last
  234. }
  235. // set boi/eoi
  236. if (proj == PROJECTOR_TYPE_GEMMA3) {
  237. // <start_of_image> ... (image embeddings) ... <end_of_image>
  238. img_beg = "<start_of_image>";
  239. img_end = "<end_of_image>";
  240. } else if (proj == PROJECTOR_TYPE_IDEFICS3) {
  241. // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215
  242. slice_tmpl = MTMD_SLICE_TMPL_IDEFICS3;
  243. tok_ov_img_start = {lookup_token("\n\n"), lookup_token("<fake_token_around_image>"), lookup_token("<global-img>")};
  244. tok_ov_img_end = {lookup_token("<fake_token_around_image>")};
  245. tok_row_end = {lookup_token("\n")};
  246. sli_img_start_tmpl = "<fake_token_around_image><row_%d_col_%d>";
  247. } else if (proj == PROJECTOR_TYPE_PIXTRAL) {
  248. // https://github.com/huggingface/transformers/blob/1cd110c6cb6a6237614130c470e9a902dbc1a4bd/docs/source/en/model_doc/pixtral.md
  249. img_end = "[IMG_END]";
  250. } else if (proj == PROJECTOR_TYPE_QWEN2VL || proj == PROJECTOR_TYPE_QWEN25VL || proj == PROJECTOR_TYPE_QWEN3VL) {
  251. // <|vision_start|> ... (image embeddings) ... <|vision_end|>
  252. img_beg = "<|vision_start|>";
  253. img_end = "<|vision_end|>";
  254. } else if (proj == PROJECTOR_TYPE_LLAMA4) {
  255. // (more details in mtmd_context constructor)
  256. img_beg = "<|image_start|>";
  257. img_end = "<|image_end|>";
  258. LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n"
  259. " https://github.com/ggml-org/llama.cpp/pull/13282\n", __func__);
  260. } else if (proj == PROJECTOR_TYPE_INTERNVL) {
  261. // <img> ... (image embeddings) ... </img>
  262. img_beg = "<img>";
  263. img_end = "</img>";
  264. } else if (proj == PROJECTOR_TYPE_LIGHTONOCR) {
  265. // <|im_start|> ... (image embeddings) ... <|im_end|>
  266. img_beg = "<|im_start|>";
  267. img_end = "<|im_end|>";
  268. } else if (proj == PROJECTOR_TYPE_LFM2) {
  269. img_beg = "<|image_start|>";
  270. img_end = "<|image_end|>";
  271. } else if (proj == PROJECTOR_TYPE_GLM4V) {
  272. img_beg = "<|begin_of_image|>";
  273. img_end = "<|end_of_image|>";
  274. }
  275. }
  276. void init_audio() {
  277. GGML_ASSERT(ctx_a != nullptr);
  278. projector_type proj = clip_get_projector_type(ctx_a);
  279. LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n"
  280. " https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__);
  281. // set preprocessor
  282. switch (proj) {
  283. case PROJECTOR_TYPE_QWEN2A:
  284. case PROJECTOR_TYPE_QWEN25O:
  285. case PROJECTOR_TYPE_ULTRAVOX:
  286. case PROJECTOR_TYPE_VOXTRAL:
  287. case PROJECTOR_TYPE_GLMA:
  288. audio_preproc = std::make_unique<mtmd_audio_preprocessor_whisper>(ctx_a);
  289. break;
  290. default:
  291. GGML_ABORT("unsupported audio projector type");
  292. }
  293. // initialize audio preprocessor
  294. audio_preproc->initialize();
  295. // set special tokens
  296. if (proj == PROJECTOR_TYPE_QWEN2A) {
  297. // <|audio_bos|> ... (embeddings) ... <|audio_eos|>
  298. aud_beg = "<|audio_bos|>";
  299. aud_end = "<|audio_eos|>";
  300. } else if (proj == PROJECTOR_TYPE_ULTRAVOX) {
  301. // [BEGIN_AUDIO] ... (embeddings) ...
  302. aud_beg = "[BEGIN_AUDIO]";
  303. }
  304. }
  305. // get clip ctx based on chunk type
  306. clip_ctx * get_clip_ctx(const mtmd_input_chunk * chunk) const {
  307. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  308. return ctx_v;
  309. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  310. return ctx_a;
  311. }
  312. GGML_ABORT("unknown chunk type");
  313. }
  314. projector_type proj_type_v() const {
  315. return ctx_v ? clip_get_projector_type(ctx_v) : PROJECTOR_TYPE_UNKNOWN;
  316. }
  317. projector_type proj_type_a() const {
  318. return ctx_a ? clip_get_projector_type(ctx_a) : PROJECTOR_TYPE_UNKNOWN;
  319. }
  320. ~mtmd_context() {
  321. clip_free(ctx_a);
  322. clip_free(ctx_v);
  323. }
  324. private:
  325. llama_token lookup_token(const std::string & token_text) {
  326. const llama_vocab * vocab = llama_model_get_vocab(text_model);
  327. const int n_vocab = llama_vocab_n_tokens(vocab);
  328. for (int i = 0; i < n_vocab; i++) {
  329. if (token_to_piece(vocab, i, true) == token_text) {
  330. return i;
  331. }
  332. }
  333. return LLAMA_TOKEN_NULL;
  334. }
  335. std::string token_to_piece(const llama_vocab * vocab, llama_token token, bool special) {
  336. std::string piece;
  337. piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
  338. const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
  339. if (n_chars < 0) {
  340. piece.resize(-n_chars);
  341. int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
  342. GGML_ASSERT(check == -n_chars);
  343. } else {
  344. piece.resize(n_chars);
  345. }
  346. return piece;
  347. }
  348. };
  349. mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
  350. const struct llama_model * text_model,
  351. const struct mtmd_context_params ctx_params) {
  352. try {
  353. return new mtmd_context(mmproj_fname, text_model, ctx_params);
  354. } catch (const std::exception & e) {
  355. LOG_ERR("%s: error: %s\n", __func__, e.what());
  356. return nullptr;
  357. }
  358. }
  359. void mtmd_free(mtmd_context * ctx) {
  360. delete ctx;
  361. }
  362. struct mtmd_tokenizer {
  363. mtmd_context * ctx;
  364. std::vector<const mtmd_bitmap *> bitmaps;
  365. std::string input_text;
  366. bool add_special;
  367. bool parse_special;
  368. const llama_vocab * vocab;
  369. mtmd_input_chunks cur;
  370. mtmd_tokenizer(mtmd_context * ctx,
  371. const mtmd_input_text * text,
  372. const mtmd_bitmap ** bitmaps,
  373. size_t n_bitmaps) : ctx(ctx), bitmaps(bitmaps, bitmaps + n_bitmaps) {
  374. add_special = text->add_special;
  375. parse_special = text->parse_special;
  376. input_text = text->text;
  377. vocab = llama_model_get_vocab(ctx->text_model);
  378. // for compatibility, we convert image marker to media marker
  379. string_replace_all(input_text, MTMD_DEFAULT_IMAGE_MARKER, ctx->media_marker);
  380. }
  381. int32_t tokenize(mtmd_input_chunks * output) {
  382. cur.entries.clear();
  383. std::vector<std::string> parts = split_text(input_text, ctx->media_marker);
  384. size_t i_bm = 0; // index of the current bitmap
  385. for (auto & part : parts) {
  386. if (part == ctx->media_marker) {
  387. // this is a marker, we should add the next bitmap
  388. if (i_bm >= bitmaps.size()) {
  389. LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n",
  390. __func__, bitmaps.size(), parts.size() - 1);
  391. return 1;
  392. }
  393. const mtmd_bitmap * bitmap = bitmaps[i_bm++];
  394. int32_t res = add_media(bitmap);
  395. if (res != 0) {
  396. return res;
  397. }
  398. } else {
  399. // this is a text part, we should add it as text
  400. add_text(part, parse_special);
  401. }
  402. }
  403. if (add_special && llama_vocab_get_add_bos(vocab)) {
  404. // if first chunk is text, we add BOS token to first text chunk
  405. // otherwise, create a new text chunk with BOS token
  406. if (!cur.entries.empty() && cur.entries[0].type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  407. // add BOS token to the beginning of first text chunk
  408. cur.entries[0].tokens_text.insert(cur.entries[0].tokens_text.begin(), llama_vocab_bos(vocab));
  409. } else {
  410. // create a new text chunk with BOS token at the beginning
  411. mtmd_input_chunk bos_chunk{
  412. MTMD_INPUT_CHUNK_TYPE_TEXT,
  413. {llama_vocab_bos(vocab)},
  414. nullptr, // image tokens
  415. nullptr, // audio tokens
  416. };
  417. cur.entries.insert(cur.entries.begin(), std::move(bos_chunk));
  418. }
  419. }
  420. if (add_special && llama_vocab_get_add_eos(vocab)) {
  421. // if last chunk is text, we add EOS token to it
  422. add_text({llama_vocab_eos(vocab)});
  423. }
  424. if (i_bm != bitmaps.size()) {
  425. LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n",
  426. __func__, bitmaps.size(), parts.size() - 1);
  427. return 1;
  428. }
  429. *output = std::move(cur);
  430. return 0;
  431. }
  432. void add_text(const std::string & txt, bool parse_special) {
  433. LOG_DBG("%s: %s\n", __func__, txt.c_str());
  434. auto tokens = mtmd_tokenize_text_internal(vocab, txt, /* add_special */ false, parse_special);
  435. add_text(tokens);
  436. }
  437. void add_text(const std::vector<llama_token> & tokens) {
  438. if (tokens.empty()) {
  439. return;
  440. }
  441. // if last entry is also a text chunk, add tokens to it instead of creating new chunk
  442. if (!cur.entries.empty() && cur.entries.back().type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  443. cur.entries.back().tokens_text.insert(
  444. cur.entries.back().tokens_text.end(),
  445. tokens.begin(),
  446. tokens.end());
  447. } else {
  448. mtmd_input_chunk chunk{
  449. MTMD_INPUT_CHUNK_TYPE_TEXT,
  450. tokens,
  451. nullptr, // image tokens
  452. nullptr, // audio tokens
  453. };
  454. cur.entries.emplace_back(std::move(chunk));
  455. }
  456. }
  457. int32_t add_media(const mtmd_bitmap * bitmap) {
  458. if (!bitmap->is_audio) {
  459. // handle image
  460. if (!ctx->ctx_v) {
  461. LOG_ERR("%s: error: model does not support vision input\n", __func__);
  462. return 2;
  463. }
  464. if (!ctx->img_beg.empty()) {
  465. add_text(ctx->img_beg, true); // add image begin token
  466. }
  467. // convert mtmd_bitmap to clip_image_u8
  468. clip_image_u8_ptr img_u8(clip_image_u8_init());
  469. img_u8->nx = bitmap->nx;
  470. img_u8->ny = bitmap->ny;
  471. img_u8->buf.resize(bitmap->data.size());
  472. std::memcpy(img_u8->buf.data(), bitmap->data.data(), img_u8->nx * img_u8->ny * 3);
  473. // preprocess image
  474. clip_image_f32_batch batch_f32;
  475. bool ok = clip_image_preprocess(ctx->ctx_v, img_u8.get(), &batch_f32);
  476. if (!ok) {
  477. LOG_ERR("Unable to preprocess image\n");
  478. return 2;
  479. }
  480. // handle llava-uhd style preprocessing
  481. if (
  482. ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_5
  483. || ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_6
  484. || ctx->slice_tmpl == MTMD_SLICE_TMPL_LLAMA4
  485. || ctx->slice_tmpl == MTMD_SLICE_TMPL_IDEFICS3
  486. ) {
  487. const int n_col = batch_f32.grid_x;
  488. const int n_row = batch_f32.grid_y;
  489. // split batch into chunks of single images
  490. // NOTE: batch_f32 will be invalidated after this call
  491. auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmap->id);
  492. GGML_ASSERT(chunks.size() > 0);
  493. auto ov_chunk = std::move(chunks.front());
  494. chunks.erase(chunks.begin());
  495. // add overview image (first)
  496. if (ctx->ov_img_first) {
  497. add_text(ctx->tok_ov_img_start);
  498. cur.entries.emplace_back(std::move(ov_chunk));
  499. add_text(ctx->tok_ov_img_end);
  500. }
  501. // add slices (or tiles)
  502. if (!chunks.empty()) {
  503. GGML_ASSERT((int)chunks.size() == n_row * n_col);
  504. add_text(ctx->tok_slices_start);
  505. for (int y = 0; y < n_row; y++) {
  506. for (int x = 0; x < n_col; x++) {
  507. const bool is_last_in_row = (x == n_col - 1);
  508. if (!ctx->tok_sli_img_start.empty()) {
  509. add_text(ctx->tok_sli_img_start);
  510. } else if (!ctx->sli_img_start_tmpl.empty()) {
  511. // If using a template to preceed a slice image
  512. const size_t sz = std::snprintf(nullptr, 0, ctx->sli_img_start_tmpl.c_str(), y+1, x+1) + 1;
  513. std::unique_ptr<char[]> buf(new char[sz]);
  514. std::snprintf(buf.get(), sz, ctx->sli_img_start_tmpl.c_str(), y+1, x+1);
  515. add_text(std::string(buf.get(), buf.get() + sz - 1), true);
  516. }
  517. cur.entries.emplace_back(std::move(chunks[y * n_col + x]));
  518. add_text(ctx->tok_sli_img_end);
  519. if (!is_last_in_row) {
  520. add_text(ctx->tok_sli_img_mid);
  521. }
  522. }
  523. if ((y != n_row - 1 || ctx->tok_row_end_trail)) {
  524. add_text(ctx->tok_row_end);
  525. }
  526. }
  527. add_text(ctx->tok_slices_end);
  528. }
  529. // add overview image (last)
  530. if (!ctx->ov_img_first) {
  531. add_text(ctx->tok_ov_img_start);
  532. cur.entries.emplace_back(std::move(ov_chunk));
  533. add_text(ctx->tok_ov_img_end);
  534. }
  535. } else {
  536. size_t n_tokens = 0;
  537. for (const auto & entry : batch_f32.entries) {
  538. n_tokens += clip_n_output_tokens(ctx->ctx_v, entry.get());
  539. }
  540. mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
  541. if (ctx->use_mrope) {
  542. // for Qwen2VL, we need this information for M-RoPE decoding positions
  543. image_tokens->nx = clip_n_output_tokens_x(ctx->ctx_v, batch_f32.entries[0].get());
  544. image_tokens->ny = clip_n_output_tokens_y(ctx->ctx_v, batch_f32.entries[0].get());
  545. image_tokens->use_mrope_pos = true;
  546. } else {
  547. // other models, we only need the total number of tokens
  548. image_tokens->nx = n_tokens;
  549. image_tokens->ny = 1;
  550. }
  551. image_tokens->batch_f32 = std::move(batch_f32);
  552. image_tokens->id = bitmap->id; // optional
  553. LOG_DBG("image_tokens->nx = %d\n", image_tokens->nx);
  554. LOG_DBG("image_tokens->ny = %d\n", image_tokens->ny);
  555. LOG_DBG("batch_f32 size = %d\n", (int)image_tokens->batch_f32.entries.size());
  556. mtmd_input_chunk chunk{
  557. MTMD_INPUT_CHUNK_TYPE_IMAGE,
  558. {}, // text tokens
  559. std::move(image_tokens),
  560. nullptr, // audio tokens
  561. };
  562. cur.entries.emplace_back(std::move(chunk));
  563. }
  564. if (!ctx->img_end.empty()) {
  565. add_text(ctx->img_end, true); // add image end token
  566. }
  567. } else {
  568. // handle audio
  569. if (!ctx->ctx_a) {
  570. LOG_ERR("%s: error: model does not support audio input\n", __func__);
  571. return 2;
  572. }
  573. if (bitmap->data.size() == 0) {
  574. LOG_ERR("%s: error: empty audio data\n", __func__);
  575. return 2;
  576. }
  577. if (!ctx->aud_beg.empty()) {
  578. add_text(ctx->aud_beg, true); // add audio begin token
  579. }
  580. // preprocess audio
  581. std::vector<mtmd_audio_mel> mel_spec_chunks;
  582. const float * samples = (const float *)bitmap->data.data();
  583. size_t n_samples = bitmap->data.size() / sizeof(float);
  584. bool ok = ctx->audio_preproc->preprocess(samples, n_samples, mel_spec_chunks);
  585. if (!ok) {
  586. LOG_ERR("Unable to preprocess audio\n");
  587. return 2;
  588. }
  589. // consider each mel_spec as a separate audio chunk
  590. // TODO: maybe support batching, but this may come with memory cost
  591. for (auto & mel_spec : mel_spec_chunks) {
  592. clip_image_f32_ptr mel_f32(clip_image_f32_init());
  593. mel_f32->nx = mel_spec.n_len;
  594. mel_f32->ny = mel_spec.n_mel;
  595. mel_f32->buf = std::move(mel_spec.data);
  596. size_t n_tokens = clip_n_output_tokens(ctx->ctx_a, mel_f32.get());
  597. clip_image_f32_batch batch_f32;
  598. batch_f32.is_audio = true;
  599. batch_f32.entries.push_back(std::move(mel_f32));
  600. mtmd_audio_tokens_ptr audio_tokens(new mtmd_audio_tokens);
  601. audio_tokens->n_tokens = n_tokens;
  602. audio_tokens->batch_f32 = std::move(batch_f32);
  603. audio_tokens->id = bitmap->id; // optional
  604. LOG_DBG("audio_tokens->n_tokens = %d\n", audio_tokens->n_tokens);
  605. mtmd_input_chunk chunk{
  606. MTMD_INPUT_CHUNK_TYPE_AUDIO,
  607. {}, // text tokens
  608. nullptr, // image tokens
  609. std::move(audio_tokens),
  610. };
  611. cur.entries.emplace_back(std::move(chunk));
  612. }
  613. if (!ctx->aud_end.empty()) {
  614. add_text(ctx->aud_end, true); // add audio end token
  615. }
  616. }
  617. return 0;
  618. }
  619. std::vector<mtmd_input_chunk> split_batch_to_chunk(clip_image_f32_batch && batch_f32, const std::string & id) {
  620. std::vector<mtmd_input_chunk> chunks;
  621. for (auto & entry : batch_f32.entries) {
  622. mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
  623. image_tokens->nx = clip_n_output_tokens(ctx->ctx_v, entry.get());
  624. image_tokens->ny = 1;
  625. image_tokens->batch_f32.entries.push_back(std::move(entry));
  626. image_tokens->id = id;
  627. mtmd_input_chunk chunk{
  628. MTMD_INPUT_CHUNK_TYPE_IMAGE,
  629. {}, // text tokens
  630. std::move(image_tokens),
  631. nullptr, // audio tokens
  632. };
  633. chunks.emplace_back(std::move(chunk));
  634. }
  635. return chunks;
  636. }
  637. // for example: "a <__media__> b <__media__> c" --> "a", "<__media__>", "b", "<__media__>", "c"
  638. static std::vector<std::string> split_text(const std::string & input, const std::string & delimiter) {
  639. std::vector<std::string> result;
  640. if (input.empty()) {
  641. return result;
  642. }
  643. size_t start = 0;
  644. size_t pos = 0;
  645. while ((pos = input.find(delimiter, start)) != std::string::npos) {
  646. if (pos > start) {
  647. result.push_back(input.substr(start, pos - start));
  648. }
  649. result.push_back(delimiter);
  650. start = pos + delimiter.length();
  651. }
  652. if (start < input.length()) {
  653. result.push_back(input.substr(start));
  654. }
  655. return result;
  656. }
  657. // copied from common_tokenize
  658. static std::vector<llama_token> mtmd_tokenize_text_internal(
  659. const struct llama_vocab * vocab,
  660. const std::string & text,
  661. bool add_special,
  662. bool parse_special) {
  663. // upper limit for the number of tokens
  664. int n_tokens = text.length() + 2 * add_special;
  665. std::vector<llama_token> result(n_tokens);
  666. n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
  667. if (n_tokens < 0) {
  668. result.resize(-n_tokens);
  669. int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
  670. GGML_ASSERT(check == -n_tokens);
  671. } else {
  672. result.resize(n_tokens);
  673. }
  674. return result;
  675. }
  676. };
  677. int32_t mtmd_tokenize(mtmd_context * ctx,
  678. mtmd_input_chunks * output,
  679. const mtmd_input_text * text,
  680. const mtmd_bitmap ** bitmaps,
  681. size_t n_bitmaps) {
  682. mtmd_tokenizer tokenizer(ctx, text, bitmaps, n_bitmaps);
  683. return tokenizer.tokenize(output);
  684. }
  685. int32_t mtmd_encode_chunk(mtmd_context * ctx, const mtmd_input_chunk * chunk) {
  686. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  687. LOG_WRN("mtmd_encode_chunk has no effect for text chunks\n");
  688. return 0;
  689. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  690. if (!ctx->ctx_v) {
  691. LOG_ERR("%s: model does not support vision input\n", __func__);
  692. return 1;
  693. }
  694. return mtmd_encode(ctx, chunk->tokens_image.get());
  695. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  696. if (!ctx->ctx_a) {
  697. LOG_ERR("%s: model does not support audio input\n", __func__);
  698. return 1;
  699. }
  700. int n_mmproj_embd = ctx->n_embd_text;
  701. ctx->image_embd_v.resize(chunk->tokens_audio->n_tokens * n_mmproj_embd);
  702. bool ok = clip_image_batch_encode(
  703. ctx->ctx_a,
  704. ctx->n_threads,
  705. &chunk->tokens_audio->batch_f32,
  706. ctx->image_embd_v.data());
  707. return ok ? 0 : 1;
  708. }
  709. LOG_ERR("%s: unknown chunk type %d\n", __func__, (int)chunk->type);
  710. return 1;
  711. }
  712. int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) {
  713. clip_ctx * ctx_clip = ctx->ctx_v;
  714. if (!ctx_clip) {
  715. LOG_ERR("%s: this API does not support non-vision input, please use mtmd_encode_chunk instead\n", __func__);
  716. return 1;
  717. }
  718. int n_mmproj_embd = clip_n_mmproj_embd(ctx_clip);
  719. ctx->image_embd_v.resize(image_tokens->n_tokens() * n_mmproj_embd);
  720. bool ok = false;
  721. if (clip_is_llava(ctx_clip)
  722. || clip_is_minicpmv(ctx_clip)
  723. || clip_is_glm(ctx_clip)) {
  724. // TODO @ngxson : llava does not support batched encoding ; this should be fixed inside clip_image_batch_encode()
  725. const auto & entries = image_tokens->batch_f32.entries;
  726. for (size_t i = 0; i < entries.size(); i++) {
  727. int n_tokens_per_image = clip_n_output_tokens(ctx_clip, entries[i].get());
  728. ok = clip_image_encode(
  729. ctx_clip,
  730. ctx->n_threads,
  731. entries[i].get(),
  732. ctx->image_embd_v.data() + i*n_mmproj_embd*n_tokens_per_image);
  733. }
  734. } else {
  735. ok = clip_image_batch_encode(
  736. ctx_clip,
  737. ctx->n_threads,
  738. &image_tokens->batch_f32,
  739. ctx->image_embd_v.data());
  740. }
  741. return ok ? 0 : 1;
  742. }
  743. float * mtmd_get_output_embd(mtmd_context * ctx) {
  744. return ctx->image_embd_v.data();
  745. }
  746. bool mtmd_decode_use_non_causal(mtmd_context * ctx) {
  747. if (ctx->ctx_v && clip_get_projector_type(ctx->ctx_v) == PROJECTOR_TYPE_GEMMA3) {
  748. return true;
  749. }
  750. return false;
  751. }
  752. bool mtmd_decode_use_mrope(mtmd_context * ctx) {
  753. return ctx->use_mrope;
  754. }
  755. bool mtmd_support_vision(mtmd_context * ctx) {
  756. return ctx->ctx_v != nullptr;
  757. }
  758. bool mtmd_support_audio(mtmd_context * ctx) {
  759. return ctx->ctx_a != nullptr;
  760. }
  761. int mtmd_get_audio_bitrate(mtmd_context * ctx) {
  762. if (!ctx->ctx_a) {
  763. return -1;
  764. }
  765. return clip_get_hparams(ctx->ctx_a)->audio_sample_rate;
  766. }
  767. //
  768. // public API functions
  769. //
  770. // mtmd_bitmap
  771. mtmd_bitmap * mtmd_bitmap_init(uint32_t nx,
  772. uint32_t ny,
  773. const unsigned char * data) {
  774. mtmd_bitmap * bitmap = new mtmd_bitmap;
  775. bitmap->nx = nx;
  776. bitmap->ny = ny;
  777. size_t data_size = (size_t)nx * ny * 3;
  778. bitmap->data.resize(data_size);
  779. std::memcpy(bitmap->data.data(), data, data_size);
  780. return bitmap;
  781. }
  782. mtmd_bitmap * mtmd_bitmap_init_from_audio(size_t n_samples,
  783. const float * data) {
  784. mtmd_bitmap * bitmap = new mtmd_bitmap;
  785. bitmap->nx = n_samples;
  786. bitmap->ny = 1;
  787. bitmap->is_audio = true;
  788. size_t data_size = n_samples * sizeof(float);
  789. bitmap->data.resize(data_size);
  790. std::memcpy(bitmap->data.data(), data, data_size);
  791. return bitmap;
  792. }
  793. uint32_t mtmd_bitmap_get_nx(const mtmd_bitmap * bitmap) {
  794. return bitmap->nx;
  795. }
  796. uint32_t mtmd_bitmap_get_ny(const mtmd_bitmap * bitmap) {
  797. return bitmap->ny;
  798. }
  799. const unsigned char * mtmd_bitmap_get_data(const mtmd_bitmap * bitmap) {
  800. return bitmap->data.data();
  801. }
  802. size_t mtmd_bitmap_get_n_bytes(const mtmd_bitmap * bitmap) {
  803. return bitmap->data.size();
  804. }
  805. bool mtmd_bitmap_is_audio(const mtmd_bitmap * bitmap) {
  806. return bitmap->is_audio;
  807. }
  808. const char * mtmd_bitmap_get_id(const mtmd_bitmap * bitmap) {
  809. return bitmap->id.c_str();
  810. }
  811. void mtmd_bitmap_set_id(mtmd_bitmap * bitmap, const char * id) {
  812. if (id) {
  813. bitmap->id = std::string(id);
  814. } else {
  815. bitmap->id.clear();
  816. }
  817. }
  818. void mtmd_bitmap_free(mtmd_bitmap * bitmap) {
  819. if (bitmap) {
  820. delete bitmap;
  821. }
  822. }
  823. // mtmd_input_chunks
  824. mtmd_input_chunks * mtmd_input_chunks_init() {
  825. return new mtmd_input_chunks;
  826. }
  827. size_t mtmd_input_chunks_size(const mtmd_input_chunks * chunks) {
  828. return chunks->entries.size();
  829. }
  830. const mtmd_input_chunk * mtmd_input_chunks_get(const mtmd_input_chunks * chunks, size_t idx) {
  831. if (idx >= chunks->entries.size()) {
  832. return nullptr;
  833. }
  834. return &chunks->entries[idx];
  835. }
  836. void mtmd_input_chunks_free(mtmd_input_chunks * chunks) {
  837. if (chunks) {
  838. delete chunks;
  839. }
  840. }
  841. // mtmd_input_chunk
  842. enum mtmd_input_chunk_type mtmd_input_chunk_get_type(const mtmd_input_chunk * chunk) {
  843. return chunk->type;
  844. }
  845. const llama_token * mtmd_input_chunk_get_tokens_text(const mtmd_input_chunk * chunk, size_t * n_tokens_output) {
  846. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  847. *n_tokens_output = chunk->tokens_text.size();
  848. return chunk->tokens_text.data();
  849. }
  850. *n_tokens_output = 0;
  851. return nullptr;
  852. }
  853. const mtmd_image_tokens * mtmd_input_chunk_get_tokens_image(const mtmd_input_chunk * chunk) {
  854. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  855. return chunk->tokens_image.get();
  856. }
  857. return nullptr;
  858. }
  859. size_t mtmd_input_chunk_get_n_tokens(const mtmd_input_chunk * chunk) {
  860. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  861. return chunk->tokens_text.size();
  862. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  863. return mtmd_image_tokens_get_n_tokens(chunk->tokens_image.get());
  864. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  865. return chunk->tokens_audio->n_tokens;
  866. } else {
  867. GGML_ABORT("invalid chunk type");
  868. }
  869. }
  870. llama_pos mtmd_input_chunk_get_n_pos(const mtmd_input_chunk * chunk) {
  871. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
  872. return chunk->tokens_text.size();
  873. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  874. return mtmd_image_tokens_get_n_pos(chunk->tokens_image.get());
  875. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  876. return chunk->tokens_audio->n_tokens;
  877. } else {
  878. GGML_ABORT("invalid chunk type");
  879. }
  880. }
  881. const char * mtmd_input_chunk_get_id(const mtmd_input_chunk * chunk) {
  882. if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
  883. return chunk->tokens_image->id.c_str();
  884. } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
  885. return chunk->tokens_audio->id.c_str();
  886. }
  887. return nullptr;
  888. }
  889. mtmd_input_chunk * mtmd_input_chunk_copy(const mtmd_input_chunk * chunk) {
  890. mtmd_input_chunk * copy = new mtmd_input_chunk{
  891. chunk->type,
  892. chunk->tokens_text,
  893. nullptr,
  894. nullptr,
  895. };
  896. if (chunk->tokens_image) {
  897. // copy the image tokens
  898. copy->tokens_image = mtmd_image_tokens_ptr(new mtmd_image_tokens());
  899. *copy->tokens_image = chunk->tokens_image->clone();
  900. }
  901. if (chunk->tokens_audio) {
  902. // copy the audio tokens
  903. copy->tokens_audio = mtmd_audio_tokens_ptr(new mtmd_audio_tokens());
  904. *copy->tokens_audio = chunk->tokens_audio->clone();
  905. }
  906. return copy;
  907. }
  908. void mtmd_input_chunk_free(mtmd_input_chunk * chunk) {
  909. if (chunk) {
  910. delete chunk;
  911. }
  912. }
  913. // mtmd_image_tokens
  914. size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens) {
  915. return image_tokens->n_tokens();
  916. }
  917. size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens) {
  918. return image_tokens->nx;
  919. }
  920. size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens) {
  921. return image_tokens->ny;
  922. }
  923. const char * mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens) {
  924. return image_tokens->id.c_str();
  925. }
  926. llama_pos mtmd_image_tokens_get_n_pos(const mtmd_image_tokens * image_tokens) {
  927. if (image_tokens->use_mrope_pos) {
  928. // for M-RoPE, temporal dimension = max(t,h,w)
  929. // t is omitted as we don't support video input
  930. return std::max(image_tokens->nx, image_tokens->ny);
  931. }
  932. return image_tokens->n_tokens();
  933. }
  934. // test function
  935. mtmd_input_chunks * mtmd_test_create_input_chunks() {
  936. mtmd_input_chunks * chunks = mtmd_input_chunks_init();
  937. if (!chunks) {
  938. return nullptr;
  939. }
  940. // create a text chunk
  941. std::vector<llama_token> tokens_text = { 1, 2, 3, 4, 5 };
  942. mtmd_input_chunk chunk_text{
  943. MTMD_INPUT_CHUNK_TYPE_TEXT,
  944. std::move(tokens_text),
  945. nullptr, // image tokens
  946. nullptr, // audio tokens
  947. };
  948. chunks->entries.emplace_back(std::move(chunk_text));
  949. // create an image chunk
  950. mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
  951. image_tokens->nx = 4;
  952. image_tokens->ny = 4;
  953. image_tokens->batch_f32.entries.resize(16);
  954. image_tokens->id = "image_1";
  955. mtmd_input_chunk chunk_image{
  956. MTMD_INPUT_CHUNK_TYPE_IMAGE,
  957. {}, // text tokens
  958. std::move(image_tokens),
  959. nullptr, // audio tokens
  960. };
  961. chunks->entries.emplace_back(std::move(chunk_image));
  962. return chunks;
  963. }
  964. void mtmd_log_set(ggml_log_callback log_callback, void * user_data) {
  965. g_logger_state.log_callback = log_callback ? log_callback : clip_log_callback_default;
  966. g_logger_state.log_callback_user_data = user_data;
  967. }