1
0

clip.cpp 159 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646
  1. #include "clip.h"
  2. #include "clip-impl.h"
  3. #include "clip-model.h"
  4. #include "clip-graph.h"
  5. #include "models/models.h"
  6. #include "ggml.h"
  7. #include "ggml-cpp.h"
  8. #include "ggml-alloc.h"
  9. #include "ggml-backend.h"
  10. #include "gguf.h"
  11. #include <cassert>
  12. #include <cmath>
  13. #include <cstdlib>
  14. #include <cstring>
  15. #include <fstream>
  16. #include <map>
  17. #include <stdexcept>
  18. #include <unordered_set>
  19. #include <vector>
  20. #include <cinttypes>
  21. #include <limits>
  22. #include <array>
  23. #include <functional>
  24. struct clip_logger_state g_logger_state = {clip_log_callback_default, NULL};
  25. //#define CLIP_DEBUG_FUNCTIONS
  26. #ifdef CLIP_DEBUG_FUNCTIONS
  27. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  28. std::ofstream file(filename, std::ios::binary);
  29. if (!file.is_open()) {
  30. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  31. return;
  32. }
  33. // PPM header: P6 format, width, height, and max color value
  34. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  35. // Write pixel data
  36. for (size_t i = 0; i < img.buf.size(); i += 3) {
  37. // PPM expects binary data in RGB format, which matches our image buffer
  38. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  39. }
  40. file.close();
  41. }
  42. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  43. std::ofstream file(filename, std::ios::binary);
  44. if (!file.is_open()) {
  45. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  46. return;
  47. }
  48. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  49. int bytesPerPixel = 3;
  50. int widthInBytes = img.nx * bytesPerPixel;
  51. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  52. int stride = widthInBytes + paddingAmount;
  53. // Bitmap file header
  54. unsigned char fileHeader[14] = {
  55. 'B','M', // Signature
  56. 0,0,0,0, // Image file size in bytes
  57. 0,0,0,0, // Reserved
  58. 54,0,0,0 // Start of pixel array
  59. };
  60. // Total file size
  61. fileSize = 54 + (stride * img.ny);
  62. fileHeader[2] = (unsigned char)(fileSize);
  63. fileHeader[3] = (unsigned char)(fileSize >> 8);
  64. fileHeader[4] = (unsigned char)(fileSize >> 16);
  65. fileHeader[5] = (unsigned char)(fileSize >> 24);
  66. // Bitmap information header (BITMAPINFOHEADER)
  67. unsigned char infoHeader[40] = {
  68. 40,0,0,0, // Size of this header (40 bytes)
  69. 0,0,0,0, // Image width
  70. 0,0,0,0, // Image height
  71. 1,0, // Number of color planes
  72. 24,0, // Bits per pixel
  73. 0,0,0,0, // No compression
  74. 0,0,0,0, // Image size (can be 0 for no compression)
  75. 0,0,0,0, // X pixels per meter (not specified)
  76. 0,0,0,0, // Y pixels per meter (not specified)
  77. 0,0,0,0, // Total colors (color table not used)
  78. 0,0,0,0 // Important colors (all are important)
  79. };
  80. // Width and height in the information header
  81. infoHeader[4] = (unsigned char)(img.nx);
  82. infoHeader[5] = (unsigned char)(img.nx >> 8);
  83. infoHeader[6] = (unsigned char)(img.nx >> 16);
  84. infoHeader[7] = (unsigned char)(img.nx >> 24);
  85. infoHeader[8] = (unsigned char)(img.ny);
  86. infoHeader[9] = (unsigned char)(img.ny >> 8);
  87. infoHeader[10] = (unsigned char)(img.ny >> 16);
  88. infoHeader[11] = (unsigned char)(img.ny >> 24);
  89. // Write file headers
  90. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  91. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  92. // Pixel data
  93. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  94. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  95. for (int x = 0; x < img.nx; ++x) {
  96. // Each pixel
  97. size_t pixelIndex = (y * img.nx + x) * 3;
  98. unsigned char pixel[3] = {
  99. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  100. img.buf[pixelIndex + 1],
  101. img.buf[pixelIndex]
  102. };
  103. file.write(reinterpret_cast<char*>(pixel), 3);
  104. }
  105. // Write padding for the row
  106. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  107. }
  108. file.close();
  109. }
  110. // debug function to convert f32 to u8
  111. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  112. dst.nx = src.nx;
  113. dst.ny = src.ny;
  114. dst.buf.resize(3 * src.nx * src.ny);
  115. for (size_t i = 0; i < src.buf.size(); ++i) {
  116. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  117. }
  118. }
  119. #endif
  120. struct clip_ctx {
  121. clip_model model;
  122. gguf_context_ptr ctx_gguf;
  123. ggml_context_ptr ctx_data;
  124. std::vector<uint8_t> buf_compute_meta;
  125. std::vector<ggml_backend_t> backend_ptrs;
  126. std::vector<ggml_backend_buffer_type_t> backend_buft;
  127. ggml_backend_t backend = nullptr;
  128. ggml_backend_t backend_cpu = nullptr;
  129. ggml_backend_buffer_ptr buf;
  130. int max_nodes = 8192;
  131. ggml_backend_sched_ptr sched;
  132. clip_flash_attn_type flash_attn_type = CLIP_FLASH_ATTN_TYPE_AUTO;
  133. bool is_allocated = false;
  134. // for debugging
  135. bool debug_graph = false;
  136. std::vector<ggml_tensor *> debug_print_tensors;
  137. clip_ctx(clip_context_params & ctx_params) {
  138. flash_attn_type = ctx_params.flash_attn_type;
  139. debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
  140. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  141. if (!backend_cpu) {
  142. throw std::runtime_error("failed to initialize CPU backend");
  143. }
  144. if (ctx_params.use_gpu) {
  145. auto backend_name = std::getenv("MTMD_BACKEND_DEVICE");
  146. if (backend_name != nullptr) {
  147. backend = ggml_backend_init_by_name(backend_name, nullptr);
  148. if (!backend) {
  149. LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name);
  150. }
  151. }
  152. if (!backend) {
  153. backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
  154. backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr);
  155. }
  156. }
  157. if (backend) {
  158. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  159. backend_ptrs.push_back(backend);
  160. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  161. } else {
  162. backend = backend_cpu;
  163. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  164. }
  165. if (ctx_params.image_min_tokens > 0) {
  166. model.hparams.custom_image_min_tokens = ctx_params.image_min_tokens;
  167. }
  168. if (ctx_params.image_max_tokens > 0) {
  169. model.hparams.custom_image_max_tokens = ctx_params.image_max_tokens;
  170. }
  171. backend_ptrs.push_back(backend_cpu);
  172. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  173. sched.reset(
  174. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  175. );
  176. }
  177. ~clip_ctx() {
  178. ggml_backend_free(backend);
  179. if (backend != backend_cpu) {
  180. ggml_backend_free(backend_cpu);
  181. }
  182. }
  183. // this function is added so that we don't change too much of the existing code
  184. projector_type proj_type() const {
  185. return model.proj_type;
  186. }
  187. };
  188. //
  189. // clip_graph
  190. //
  191. clip_graph::clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  192. model(ctx->model),
  193. hparams(model.hparams),
  194. proj_type(ctx->proj_type()),
  195. img(img),
  196. patch_size(hparams.patch_size),
  197. n_patches_x(img.nx / patch_size),
  198. n_patches_y(img.ny / patch_size),
  199. n_patches(n_patches_x * n_patches_y),
  200. n_embd(hparams.n_embd),
  201. n_head(hparams.n_head),
  202. d_head(n_embd / n_head),
  203. n_layer(hparams.n_layer),
  204. n_mmproj_embd(clip_n_mmproj_embd(ctx)),
  205. eps(hparams.eps),
  206. kq_scale(1.0f / sqrtf((float)d_head)),
  207. flash_attn_type(ctx->flash_attn_type),
  208. debug_graph(ctx->debug_graph),
  209. debug_print_tensors(ctx->debug_print_tensors) {
  210. struct ggml_init_params params = {
  211. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  212. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  213. /*.no_alloc =*/ true,
  214. };
  215. ctx0_ptr.reset(ggml_init(params));
  216. ctx0 = ctx0_ptr.get();
  217. gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
  218. }
  219. void clip_graph::cb(ggml_tensor * cur0, const char * name, int il) const {
  220. if (debug_graph) {
  221. ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
  222. std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
  223. ggml_set_name(cur, cur_name.c_str());
  224. ggml_set_output(cur);
  225. ggml_build_forward_expand(gf, cur);
  226. debug_print_tensors.push_back(cur);
  227. }
  228. }
  229. // siglip2 naflex
  230. ggml_tensor * clip_graph::resize_position_embeddings(uint32_t interpolation_mode) {
  231. ggml_tensor * pos_embd = model.position_embeddings;
  232. const int height = img.ny / patch_size;
  233. const int width = img.nx / patch_size;
  234. const uint32_t mode = interpolation_mode;
  235. const int n_per_side = (int)std::sqrt(pos_embd->ne[1]);
  236. GGML_ASSERT(pos_embd);
  237. if (height == n_per_side && width == n_per_side) {
  238. return pos_embd;
  239. }
  240. pos_embd = ggml_reshape_3d(ctx0, pos_embd, n_embd, n_per_side, n_per_side); // -> (n_embd, n_per_side, n_per_side)
  241. pos_embd = ggml_permute(ctx0, pos_embd, 2, 0, 1, 3); // -> (n_per_side, n_per_side, n_embd)
  242. pos_embd = ggml_interpolate(ctx0, pos_embd, width, height, n_embd, 1, mode); // -> (width, height, n_embd)
  243. pos_embd = ggml_permute(ctx0, pos_embd, 1, 2, 0, 3); // -> (n_embd, width, height)
  244. pos_embd = ggml_cont_2d(ctx0, pos_embd, n_embd, width * height); // -> (n_embd, width * height)
  245. return pos_embd;
  246. }
  247. // build vision transformer (ViT) cgraph
  248. // this function should cover most of the models
  249. // if your model has specific features, you should probably duplicate this function
  250. ggml_tensor * clip_graph::build_vit(
  251. ggml_tensor * inp,
  252. int64_t n_pos,
  253. norm_type norm_t,
  254. ffn_op_type ffn_t,
  255. ggml_tensor * learned_pos_embd,
  256. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  257. ) {
  258. if (learned_pos_embd) {
  259. inp = ggml_add(ctx0, inp, learned_pos_embd);
  260. cb(inp, "pos_embed", -1);
  261. }
  262. ggml_tensor * inpL = inp;
  263. // pre-layernorm
  264. if (model.pre_ln_w) {
  265. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  266. cb(inpL, "pre_ln", -1);
  267. }
  268. // loop over layers
  269. for (int il = 0; il < n_layer; il++) {
  270. auto & layer = model.layers[il];
  271. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  272. // layernorm1
  273. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  274. cb(cur, "layer_inp_normed", il);
  275. // self-attention
  276. {
  277. ggml_tensor * Qcur = nullptr;
  278. ggml_tensor * Kcur = nullptr;
  279. ggml_tensor * Vcur = nullptr;
  280. if (layer.qkv_w != nullptr) {
  281. // fused qkv
  282. cur = ggml_mul_mat(ctx0, layer.qkv_w, cur);
  283. if (layer.qkv_b != nullptr) {
  284. cur = ggml_add(ctx0, cur, layer.qkv_b);
  285. }
  286. Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  287. /* nb1 */ ggml_row_size(cur->type, d_head),
  288. /* nb2 */ cur->nb[1],
  289. /* offset */ 0);
  290. Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  291. /* nb1 */ ggml_row_size(cur->type, d_head),
  292. /* nb2 */ cur->nb[1],
  293. /* offset */ ggml_row_size(cur->type, n_embd));
  294. Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  295. /* nb1 */ ggml_row_size(cur->type, d_head),
  296. /* nb2 */ cur->nb[1],
  297. /* offset */ ggml_row_size(cur->type, 2 * n_embd));
  298. // TODO: q/k norm requires row size == n_embd, while here it's d_head
  299. // we can add support in the future if needed
  300. GGML_ASSERT(layer.q_norm == nullptr && layer.k_norm == nullptr);
  301. } else {
  302. // separate q, k, v
  303. Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  304. if (layer.q_b) {
  305. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  306. }
  307. Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  308. if (layer.k_b) {
  309. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  310. }
  311. Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  312. if (layer.v_b) {
  313. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  314. }
  315. if (layer.q_norm) {
  316. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  317. cb(Qcur, "Qcur_norm", il);
  318. }
  319. if (layer.k_norm) {
  320. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  321. cb(Kcur, "Kcur_norm", il);
  322. }
  323. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  324. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  325. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  326. }
  327. cb(Qcur, "Qcur", il);
  328. cb(Kcur, "Kcur", il);
  329. cb(Vcur, "Vcur", il);
  330. if (add_pos) {
  331. Qcur = add_pos(Qcur, layer);
  332. Kcur = add_pos(Kcur, layer);
  333. cb(Qcur, "Qcur_pos", il);
  334. cb(Kcur, "Kcur_pos", il);
  335. }
  336. cur = build_attn(layer.o_w, layer.o_b,
  337. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  338. cb(cur, "attn_out", il);
  339. }
  340. if (layer.ls_1_w) {
  341. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  342. cb(cur, "attn_out_scaled", il);
  343. }
  344. // re-add the layer input, e.g., residual
  345. cur = ggml_add(ctx0, cur, inpL);
  346. inpL = cur; // inpL = residual, cur = hidden_states
  347. cb(cur, "ffn_inp", il);
  348. // layernorm2
  349. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  350. cb(cur, "ffn_inp_normed", il);
  351. // ffn
  352. cur = build_ffn(cur,
  353. layer.ff_up_w, layer.ff_up_b,
  354. layer.ff_gate_w, layer.ff_gate_b,
  355. layer.ff_down_w, layer.ff_down_b,
  356. ffn_t, il);
  357. cb(cur, "ffn_out", il);
  358. if (layer.ls_2_w) {
  359. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  360. cb(cur, "ffn_out_scaled", il);
  361. }
  362. // residual 2
  363. cur = ggml_add(ctx0, inpL, cur);
  364. cb(cur, "layer_out", il);
  365. inpL = cur;
  366. }
  367. if (model.audio_has_avgpool()) {
  368. ggml_tensor * cur = inpL;
  369. cur = ggml_transpose(ctx0, cur);
  370. cur = ggml_cont(ctx0, cur);
  371. cur = ggml_pool_1d(ctx0, cur, GGML_OP_POOL_AVG, 2, 2, 0);
  372. cur = ggml_transpose(ctx0, cur);
  373. cur = ggml_cont(ctx0, cur);
  374. inpL = cur;
  375. }
  376. // post-layernorm
  377. if (model.post_ln_w) {
  378. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  379. }
  380. return inpL;
  381. }
  382. // build the input after conv2d (inp_raw --> patches)
  383. // returns tensor with shape [n_embd, n_patches]
  384. ggml_tensor * clip_graph::build_inp() {
  385. ggml_tensor * inp_raw = build_inp_raw();
  386. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  387. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  388. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  389. if (model.patch_bias) {
  390. inp = ggml_add(ctx0, inp, model.patch_bias);
  391. cb(inp, "patch_bias", -1);
  392. }
  393. return inp;
  394. }
  395. ggml_tensor * clip_graph::build_inp_raw(int channels) {
  396. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
  397. ggml_set_name(inp_raw, "inp_raw");
  398. ggml_set_input(inp_raw);
  399. return inp_raw;
  400. }
  401. ggml_tensor * clip_graph::build_norm(
  402. ggml_tensor * cur,
  403. ggml_tensor * mw,
  404. ggml_tensor * mb,
  405. norm_type type,
  406. float norm_eps,
  407. int il) const {
  408. cur = type == NORM_TYPE_RMS
  409. ? ggml_rms_norm(ctx0, cur, norm_eps)
  410. : ggml_norm(ctx0, cur, norm_eps);
  411. if (mw) {
  412. cur = ggml_mul(ctx0, cur, mw);
  413. cb(cur, "norm_w", il);
  414. }
  415. if (mb) {
  416. cur = ggml_add(ctx0, cur, mb);
  417. cb(cur, "norm_b", il);
  418. }
  419. return cur;
  420. }
  421. ggml_tensor * clip_graph::build_ffn(
  422. ggml_tensor * cur,
  423. ggml_tensor * up,
  424. ggml_tensor * up_b,
  425. ggml_tensor * gate,
  426. ggml_tensor * gate_b,
  427. ggml_tensor * down,
  428. ggml_tensor * down_b,
  429. ffn_op_type type_op,
  430. int il) const {
  431. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  432. cb(tmp, "ffn_up", il);
  433. if (up_b) {
  434. tmp = ggml_add(ctx0, tmp, up_b);
  435. cb(tmp, "ffn_up_b", il);
  436. }
  437. if (gate) {
  438. cur = ggml_mul_mat(ctx0, gate, cur);
  439. cb(cur, "ffn_gate", il);
  440. if (gate_b) {
  441. cur = ggml_add(ctx0, cur, gate_b);
  442. cb(cur, "ffn_gate_b", il);
  443. }
  444. } else {
  445. cur = tmp;
  446. }
  447. // we only support parallel ffn for now
  448. switch (type_op) {
  449. case FFN_SILU:
  450. if (gate) {
  451. cur = ggml_swiglu_split(ctx0, cur, tmp);
  452. cb(cur, "ffn_swiglu", il);
  453. } else {
  454. cur = ggml_silu(ctx0, cur);
  455. cb(cur, "ffn_silu", il);
  456. } break;
  457. case FFN_GELU:
  458. if (gate) {
  459. cur = ggml_geglu_split(ctx0, cur, tmp);
  460. cb(cur, "ffn_geglu", il);
  461. } else {
  462. cur = ggml_gelu(ctx0, cur);
  463. cb(cur, "ffn_gelu", il);
  464. } break;
  465. case FFN_GELU_ERF:
  466. if (gate) {
  467. cur = ggml_geglu_erf_split(ctx0, cur, tmp);
  468. cb(cur, "ffn_geglu_erf", il);
  469. } else {
  470. cur = ggml_gelu_erf(ctx0, cur);
  471. cb(cur, "ffn_gelu_erf", il);
  472. } break;
  473. case FFN_GELU_QUICK:
  474. if (gate) {
  475. cur = ggml_geglu_quick_split(ctx0, cur, tmp);
  476. cb(cur, "ffn_geglu_quick", il);
  477. } else {
  478. cur = ggml_gelu_quick(ctx0, cur);
  479. cb(cur, "ffn_gelu_quick", il);
  480. } break;
  481. }
  482. if (down) {
  483. cur = ggml_mul_mat(ctx0, down, cur);
  484. }
  485. if (down_b) {
  486. cb(cur, "ffn_down", il);
  487. }
  488. if (down_b) {
  489. cur = ggml_add(ctx0, cur, down_b);
  490. }
  491. return cur;
  492. }
  493. ggml_tensor * clip_graph::build_attn(
  494. ggml_tensor * wo,
  495. ggml_tensor * wo_b,
  496. ggml_tensor * q_cur,
  497. ggml_tensor * k_cur,
  498. ggml_tensor * v_cur,
  499. ggml_tensor * kq_mask,
  500. float kq_scale,
  501. int il) const {
  502. // these nodes are added to the graph together so that they are not reordered
  503. // by doing so, the number of splits in the graph is reduced
  504. ggml_build_forward_expand(gf, q_cur);
  505. ggml_build_forward_expand(gf, k_cur);
  506. ggml_build_forward_expand(gf, v_cur);
  507. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  508. //cb(q, "q", il);
  509. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  510. //cb(k, "k", il);
  511. ggml_tensor * cur;
  512. if (flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  513. ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3);
  514. k = ggml_cast(ctx0, k, GGML_TYPE_F16);
  515. v = ggml_cast(ctx0, v, GGML_TYPE_F16);
  516. cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, 0.0f, 0.0f);
  517. ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
  518. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
  519. } else {
  520. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  521. v = ggml_cont(ctx0, v);
  522. const auto n_tokens = q->ne[1];
  523. const auto n_head = q->ne[2];
  524. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  525. // F32 may not needed for vision encoders?
  526. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  527. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  528. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  529. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  530. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  531. }
  532. cb(cur, "kqv_out", il);
  533. if (wo) {
  534. cur = ggml_mul_mat(ctx0, wo, cur);
  535. }
  536. if (wo_b) {
  537. cur = ggml_add(ctx0, cur, wo_b);
  538. }
  539. return cur;
  540. }
  541. // implementation of the 2D RoPE without adding a new op in ggml
  542. // this is not efficient (use double the memory), but works on all backends
  543. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  544. ggml_tensor * clip_graph::build_rope_2d(
  545. ggml_context * ctx0,
  546. ggml_tensor * cur,
  547. ggml_tensor * pos_a, // first half
  548. ggml_tensor * pos_b, // second half
  549. const float freq_base,
  550. const bool interleave_freq
  551. ) {
  552. const int64_t n_dim = cur->ne[0];
  553. const int64_t n_head = cur->ne[1];
  554. const int64_t n_pos = cur->ne[2];
  555. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  556. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  557. // first half of cur will use 1e-0, 1e-2 (even)
  558. // second half of cur will use 1e-1, 1e-3 (odd)
  559. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  560. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  561. // then for the second half, we use freq_scale to shift the inv_freq
  562. // ^ why? replace (2i) with (2i+1) in the above equation
  563. const float freq_scale_odd = interleave_freq
  564. ? std::pow(freq_base, (float)-2/n_dim)
  565. : 1.0;
  566. // first half
  567. ggml_tensor * first;
  568. {
  569. first = ggml_view_3d(ctx0, cur,
  570. n_dim/2, n_head, n_pos,
  571. ggml_row_size(cur->type, n_dim),
  572. ggml_row_size(cur->type, n_dim*n_head),
  573. 0);
  574. first = ggml_rope_ext(
  575. ctx0,
  576. first,
  577. pos_a, // positions
  578. nullptr, // freq factors
  579. n_dim/2, // n_dims
  580. 0, 0, freq_base,
  581. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  582. );
  583. }
  584. // second half
  585. ggml_tensor * second;
  586. {
  587. second = ggml_view_3d(ctx0, cur,
  588. n_dim/2, n_head, n_pos,
  589. ggml_row_size(cur->type, n_dim),
  590. ggml_row_size(cur->type, n_dim*n_head),
  591. n_dim/2 * ggml_element_size(cur));
  592. second = ggml_rope_ext(
  593. ctx0,
  594. second,
  595. pos_b, // positions
  596. nullptr, // freq factors
  597. n_dim/2, // n_dims
  598. 0, 0, freq_base,
  599. freq_scale_odd,
  600. 0.0f, 1.0f, 0.0f, 0.0f
  601. );
  602. }
  603. cur = ggml_concat(ctx0, first, second, 0);
  604. return cur;
  605. }
  606. // Generic function to stack frames for audio processing
  607. // Abstracts out the StackAudioFrames logic used by ultravox
  608. ggml_tensor * clip_graph::build_stack(ggml_tensor * cur, int32_t stack_factor, int32_t n_embed) {
  609. if (stack_factor <= 1) {
  610. return cur;
  611. }
  612. int64_t total_elements = ggml_nelements(cur);
  613. int64_t stride = n_embed * stack_factor;
  614. // Calculate padded length
  615. int64_t padded_len = GGML_PAD(total_elements, stride);
  616. int64_t pad = padded_len - total_elements;
  617. if (pad > 0) {
  618. // Pad the tensor to make it divisible by stride
  619. cur = ggml_view_1d(ctx0, cur, total_elements, 0);
  620. cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
  621. }
  622. // Reshape to [stride, padded_len / stride]
  623. cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
  624. ggml_row_size(cur->type, stride), 0);
  625. return cur;
  626. }
  627. // aka pixel_shuffle / pixel_unshuffle / patch_merger (Kimi-VL)
  628. // support dynamic resolution
  629. ggml_tensor * clip_graph::build_patch_merge_permute(ggml_tensor * cur, int scale_factor) {
  630. GGML_ASSERT(scale_factor > 1);
  631. const int n_embd = cur->ne[0];
  632. int width = img.nx / patch_size;
  633. int height = img.ny / patch_size;
  634. // pad width and height to factor
  635. const int64_t pad_width = CLIP_ALIGN(width, scale_factor) - width;
  636. const int64_t pad_height = CLIP_ALIGN(height, scale_factor) - height;
  637. cur = ggml_reshape_3d(ctx0, cur, n_embd, width, height);
  638. if (pad_width || pad_height) {
  639. cur = ggml_pad(ctx0, cur, 0, pad_width, pad_height, 0);
  640. width += pad_width;
  641. height += pad_height;
  642. }
  643. // unshuffle h
  644. cur = ggml_reshape_3d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height);
  645. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  646. // unshuffle w
  647. cur = ggml_cont_3d(ctx0, cur, n_embd * scale_factor * scale_factor, height / scale_factor, width / scale_factor);
  648. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  649. cur = ggml_cont_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  650. cb(cur, "pixel_shuffle", -1);
  651. return cur;
  652. }
  653. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  654. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  655. const clip_image_f32 & img = *imgs.entries[0];
  656. std::unique_ptr<clip_graph> builder;
  657. switch (ctx->proj_type()) {
  658. case PROJECTOR_TYPE_GEMMA3:
  659. case PROJECTOR_TYPE_IDEFICS3:
  660. case PROJECTOR_TYPE_LFM2:
  661. case PROJECTOR_TYPE_JANUS_PRO:
  662. {
  663. builder = std::make_unique<clip_graph_siglip>(ctx, img);
  664. } break;
  665. case PROJECTOR_TYPE_PIXTRAL:
  666. case PROJECTOR_TYPE_LIGHTONOCR:
  667. {
  668. builder = std::make_unique<clip_graph_pixtral>(ctx, img);
  669. } break;
  670. case PROJECTOR_TYPE_QWEN2VL:
  671. case PROJECTOR_TYPE_QWEN25VL:
  672. {
  673. builder = std::make_unique<clip_graph_qwen2vl>(ctx, img);
  674. } break;
  675. case PROJECTOR_TYPE_QWEN3VL:
  676. {
  677. builder = std::make_unique<clip_graph_qwen3vl>(ctx, img);
  678. } break;
  679. case PROJECTOR_TYPE_MINICPMV:
  680. {
  681. builder = std::make_unique<clip_graph_minicpmv>(ctx, img);
  682. } break;
  683. case PROJECTOR_TYPE_INTERNVL:
  684. {
  685. builder = std::make_unique<clip_graph_internvl>(ctx, img);
  686. } break;
  687. case PROJECTOR_TYPE_LLAMA4:
  688. {
  689. builder = std::make_unique<clip_graph_llama4>(ctx, img);
  690. } break;
  691. case PROJECTOR_TYPE_ULTRAVOX:
  692. case PROJECTOR_TYPE_VOXTRAL:
  693. case PROJECTOR_TYPE_QWEN2A:
  694. case PROJECTOR_TYPE_GLMA:
  695. {
  696. builder = std::make_unique<clip_graph_whisper_enc>(ctx, img);
  697. } break;
  698. case PROJECTOR_TYPE_KIMIVL:
  699. {
  700. builder = std::make_unique<clip_graph_kimivl>(ctx, img);
  701. } break;
  702. case PROJECTOR_TYPE_COGVLM:
  703. {
  704. builder = std::make_unique<clip_graph_cogvlm>(ctx, img);
  705. } break;
  706. case PROJECTOR_TYPE_MLP:
  707. case PROJECTOR_TYPE_MLP_NORM:
  708. case PROJECTOR_TYPE_LDP:
  709. case PROJECTOR_TYPE_LDPV2:
  710. case PROJECTOR_TYPE_GLM_EDGE:
  711. {
  712. builder = std::make_unique<clip_graph_llava>(ctx, img);
  713. } break;
  714. case PROJECTOR_TYPE_LFM2A:
  715. {
  716. builder = std::make_unique<clip_graph_conformer>(ctx, img);
  717. } break;
  718. case PROJECTOR_TYPE_GLM4V:
  719. {
  720. builder = std::make_unique<clip_graph_glm4v>(ctx, img);
  721. } break;
  722. default:
  723. GGML_ABORT("missing cgraph builder");
  724. }
  725. return builder->build();
  726. }
  727. //
  728. // clip_model_loader
  729. //
  730. struct clip_model_loader {
  731. ggml_context_ptr ctx_meta;
  732. gguf_context_ptr ctx_gguf;
  733. std::string fname;
  734. size_t model_size = 0; // in bytes
  735. bool has_vision = false;
  736. bool has_audio = false;
  737. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
  738. clip_model_loader(const char * fname) : fname(fname) {
  739. struct ggml_context * meta = nullptr;
  740. struct gguf_init_params params = {
  741. /*.no_alloc = */ true,
  742. /*.ctx = */ &meta,
  743. };
  744. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  745. if (!ctx_gguf.get()) {
  746. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  747. }
  748. ctx_meta.reset(meta);
  749. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  750. // print gguf info
  751. {
  752. std::string name;
  753. get_string(KEY_NAME, name, false);
  754. std::string description;
  755. get_string(KEY_DESCRIPTION, description, false);
  756. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  757. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  758. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  759. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  760. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  761. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  762. LOG_INF("\n");
  763. }
  764. // modalities
  765. {
  766. get_bool(KEY_HAS_VISION_ENC, has_vision, false);
  767. get_bool(KEY_HAS_AUDIO_ENC, has_audio, false);
  768. if (has_vision) {
  769. LOG_INF("%s: has vision encoder\n", __func__);
  770. }
  771. if (has_audio) {
  772. LOG_INF("%s: has audio encoder\n", __func__);
  773. }
  774. }
  775. // tensors
  776. {
  777. for (int i = 0; i < n_tensors; ++i) {
  778. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  779. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  780. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  781. ggml_tensor * cur = ggml_get_tensor(meta, name);
  782. size_t tensor_size = ggml_nbytes(cur);
  783. model_size += tensor_size;
  784. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  785. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  786. }
  787. }
  788. }
  789. void load_hparams(clip_model & model, clip_modality modality) {
  790. auto & hparams = model.hparams;
  791. std::string log_ffn_op; // for logging
  792. // sanity check
  793. if (modality == CLIP_MODALITY_VISION) {
  794. GGML_ASSERT(has_vision);
  795. } else if (modality == CLIP_MODALITY_AUDIO) {
  796. GGML_ASSERT(has_audio);
  797. }
  798. model.modality = modality;
  799. // projector type
  800. std::string proj_type;
  801. {
  802. // default key
  803. get_string(KEY_PROJ_TYPE, proj_type, false);
  804. // for models with mixed modalities
  805. if (proj_type.empty()) {
  806. if (modality == CLIP_MODALITY_VISION) {
  807. get_string(KEY_VISION_PROJ_TYPE, proj_type, false);
  808. } else if (modality == CLIP_MODALITY_AUDIO) {
  809. get_string(KEY_AUDIO_PROJ_TYPE, proj_type, false);
  810. } else {
  811. GGML_ABORT("unknown modality");
  812. }
  813. }
  814. model.proj_type = clip_projector_type_from_string(proj_type);
  815. if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  816. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  817. }
  818. // correct arch for multimodal models (legacy method)
  819. if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
  820. model.proj_type = modality == CLIP_MODALITY_VISION
  821. ? PROJECTOR_TYPE_QWEN25VL
  822. : PROJECTOR_TYPE_QWEN2A;
  823. }
  824. }
  825. const bool is_vision = model.modality == CLIP_MODALITY_VISION;
  826. const bool is_audio = model.modality == CLIP_MODALITY_AUDIO;
  827. // other hparams
  828. {
  829. const char * prefix = is_vision ? "vision" : "audio";
  830. get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
  831. get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
  832. get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
  833. get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
  834. get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
  835. get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
  836. if (is_vision) {
  837. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  838. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  839. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  840. get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
  841. get_u32(KEY_MINICPMV_QUERY_NUM, hparams.minicpmv_query_num, false);
  842. if (hparams.minicpmv_query_num == 0) {
  843. // Fallback to hardcoded values for legacy models
  844. if (hparams.minicpmv_version == 3) {
  845. hparams.minicpmv_query_num = 64;
  846. } else if (hparams.minicpmv_version == 4) {
  847. hparams.minicpmv_query_num = 64;
  848. } else if (hparams.minicpmv_version == 5) {
  849. hparams.minicpmv_query_num = 64;
  850. } else if (hparams.minicpmv_version == 6) {
  851. hparams.minicpmv_query_num = 64;
  852. } else {
  853. hparams.minicpmv_query_num = 96;
  854. }
  855. }
  856. } else if (is_audio) {
  857. get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
  858. // some hparams are unused, but still need to set to avoid issues
  859. hparams.image_size = 0;
  860. hparams.patch_size = 1;
  861. } else {
  862. GGML_ASSERT(false && "unknown modality");
  863. }
  864. // for pinpoints, we need to convert it into a list of resolution candidates
  865. {
  866. std::vector<int> pinpoints;
  867. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
  868. if (!pinpoints.empty()) {
  869. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  870. hparams.image_res_candidates.push_back({
  871. pinpoints[i],
  872. pinpoints[i+1],
  873. });
  874. }
  875. }
  876. }
  877. // default warmup value
  878. hparams.warmup_image_size = hparams.image_size;
  879. hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
  880. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  881. || model.proj_type == PROJECTOR_TYPE_LDP
  882. || model.proj_type == PROJECTOR_TYPE_LDPV2;
  883. {
  884. bool use_gelu = false;
  885. bool use_silu = false;
  886. get_bool(KEY_USE_GELU, use_gelu, false);
  887. get_bool(KEY_USE_SILU, use_silu, false);
  888. if (use_gelu && use_silu) {
  889. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  890. }
  891. if (use_gelu) {
  892. hparams.ffn_op = FFN_GELU;
  893. log_ffn_op = "gelu";
  894. } else if (use_silu) {
  895. hparams.ffn_op = FFN_SILU;
  896. log_ffn_op = "silu";
  897. } else {
  898. hparams.ffn_op = FFN_GELU_QUICK;
  899. log_ffn_op = "gelu_quick";
  900. }
  901. }
  902. {
  903. std::string mm_patch_merge_type;
  904. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  905. if (mm_patch_merge_type == "spatial_unpad") {
  906. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  907. }
  908. }
  909. if (is_vision) {
  910. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  911. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  912. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  913. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  914. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  915. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  916. for (int i = 0; i < 3; ++i) {
  917. hparams.image_mean[i] = mean_data[i];
  918. hparams.image_std[i] = std_data[i];
  919. }
  920. }
  921. // Load the vision feature layer indices if they are explicitly provided;
  922. // if multiple vision feature layers are present, the values will be concatenated
  923. // to form the final visual features.
  924. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  925. // be non-negative, since we use -1 to mark values as unset here.
  926. std::vector<int> vision_feature_layer;
  927. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  928. // convert std::vector to std::unordered_set
  929. for (auto & layer : vision_feature_layer) {
  930. hparams.vision_feature_layer.insert(layer);
  931. }
  932. // model-specific params
  933. switch (model.proj_type) {
  934. case PROJECTOR_TYPE_MINICPMV:
  935. {
  936. if (hparams.minicpmv_version == 0) {
  937. hparams.minicpmv_version = 2; // default to 2 if not set
  938. }
  939. } break;
  940. case PROJECTOR_TYPE_INTERNVL:
  941. {
  942. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  943. } break;
  944. case PROJECTOR_TYPE_IDEFICS3:
  945. {
  946. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  947. get_u32(KEY_PREPROC_IMAGE_SIZE, hparams.image_longest_edge, false);
  948. } break;
  949. case PROJECTOR_TYPE_LFM2:
  950. {
  951. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  952. // ref: https://huggingface.co/LiquidAI/LFM2-VL-3B/blob/main/preprocessor_config.json
  953. // config above specifies number of tokens after downsampling, while here it is before, relax lowerbound to 64
  954. hparams.set_limit_image_tokens(64, 1024);
  955. } break;
  956. case PROJECTOR_TYPE_PIXTRAL:
  957. case PROJECTOR_TYPE_LIGHTONOCR:
  958. {
  959. // ref: https://huggingface.co/mistral-community/pixtral-12b/blob/main/preprocessor_config.json
  960. // TODO: verify the image_min_tokens
  961. hparams.n_merge = 1; // the original pixtral does not use patch merging
  962. hparams.rope_theta = 10000.0f;
  963. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  964. hparams.set_limit_image_tokens(8, 1024);
  965. hparams.set_warmup_n_tokens(256); // avoid OOM on warmup
  966. } break;
  967. case PROJECTOR_TYPE_KIMIVL:
  968. {
  969. hparams.rope_theta = 10000.0f;
  970. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  971. // TODO: check kimivl preprocessor for exact values
  972. hparams.set_limit_image_tokens(8, 1024);
  973. hparams.set_warmup_n_tokens(256); // avoid OOM on warmup
  974. } break;
  975. case PROJECTOR_TYPE_GEMMA3:
  976. {
  977. // default value (used by all model sizes in gemma 3 family)
  978. // number of patches for each **side** is reduced by a factor of 4
  979. hparams.n_merge = 4;
  980. // test model (tinygemma3) has a different value, we optionally read it
  981. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  982. } break;
  983. case PROJECTOR_TYPE_QWEN2VL:
  984. case PROJECTOR_TYPE_QWEN25VL:
  985. case PROJECTOR_TYPE_QWEN3VL:
  986. {
  987. hparams.n_merge = 2; // default value for Qwen 2 and 2.5
  988. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  989. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern, model.proj_type == PROJECTOR_TYPE_QWEN25VL); // only 2.5 requires it
  990. // ref: https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  991. hparams.set_limit_image_tokens(8, 4096);
  992. hparams.set_warmup_n_tokens(46*46); // avoid OOM on warmup
  993. const int warn_min_pixels = 1024 * hparams.n_merge * hparams.n_merge * hparams.patch_size * hparams.patch_size;
  994. if (hparams.image_min_pixels < warn_min_pixels) {
  995. LOG_WRN("%s: Qwen-VL models require at minimum 1024 image tokens to function correctly on grounding tasks\n", __func__);
  996. LOG_WRN("%s: if you encounter problems with accuracy, try adding --image-min-tokens 1024\n", __func__);
  997. LOG_WRN("%s: more info: https://github.com/ggml-org/llama.cpp/issues/16842\n\n", __func__);
  998. }
  999. } break;
  1000. case PROJECTOR_TYPE_GLM4V:
  1001. {
  1002. hparams.rope_theta = 10000.0f;
  1003. hparams.n_merge = 2; // default value for GLM4-V
  1004. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  1005. hparams.set_limit_image_tokens(8, 4096);
  1006. hparams.set_warmup_n_tokens(46*46); // avoid OOM on warmup
  1007. } break;
  1008. case PROJECTOR_TYPE_LLAMA4:
  1009. {
  1010. hparams.rope_theta = 10000.0f;
  1011. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  1012. set_llava_uhd_res_candidates(model, 3);
  1013. } break;
  1014. case PROJECTOR_TYPE_ULTRAVOX:
  1015. case PROJECTOR_TYPE_QWEN2A:
  1016. case PROJECTOR_TYPE_GLMA:
  1017. case PROJECTOR_TYPE_VOXTRAL:
  1018. {
  1019. bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX ||
  1020. model.proj_type == PROJECTOR_TYPE_VOXTRAL ||
  1021. model.proj_type == PROJECTOR_TYPE_GLMA;
  1022. get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
  1023. hparams.ffn_op = FFN_GELU_ERF;
  1024. log_ffn_op = "gelu_erf"; // temporary solution for logging
  1025. // audio preprocessing params
  1026. hparams.audio_chunk_len = 30; // in seconds
  1027. hparams.audio_sample_rate = 16000;
  1028. hparams.audio_n_fft = 400;
  1029. hparams.audio_window_len = 400;
  1030. hparams.audio_hop_len = 160;
  1031. } break;
  1032. case PROJECTOR_TYPE_LFM2A:
  1033. {
  1034. // audio preprocessing params
  1035. hparams.audio_chunk_len = 1; // in seconds
  1036. hparams.audio_sample_rate = 16000;
  1037. hparams.audio_n_fft = 512;
  1038. hparams.audio_window_len = 400;
  1039. hparams.audio_hop_len = 160;
  1040. } break;
  1041. default:
  1042. break;
  1043. }
  1044. // sanity check
  1045. {
  1046. if (hparams.image_max_pixels < hparams.image_min_pixels) {
  1047. throw std::runtime_error(string_format("%s: image_max_pixels (%d) is less than image_min_pixels (%d)\n", __func__, hparams.image_max_pixels, hparams.image_min_pixels));
  1048. }
  1049. }
  1050. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  1051. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  1052. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  1053. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  1054. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  1055. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  1056. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  1057. if (is_vision) {
  1058. LOG_INF("\n--- vision hparams ---\n");
  1059. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  1060. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  1061. LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector);
  1062. LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version);
  1063. LOG_INF("%s: n_merge: %d\n", __func__, hparams.n_merge);
  1064. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  1065. if (hparams.image_min_pixels > 0) {
  1066. LOG_INF("%s: image_min_pixels: %d%s\n", __func__, hparams.image_min_pixels, hparams.custom_image_min_tokens > 0 ? " (custom value)" : "");
  1067. }
  1068. if (hparams.image_max_pixels > 0) {
  1069. LOG_INF("%s: image_max_pixels: %d%s\n", __func__, hparams.image_max_pixels, hparams.custom_image_max_tokens > 0 ? " (custom value)" : "");
  1070. }
  1071. } else if (is_audio) {
  1072. LOG_INF("\n--- audio hparams ---\n");
  1073. LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
  1074. LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
  1075. LOG_INF("%s: audio_chunk_len: %d\n", __func__, hparams.audio_chunk_len);
  1076. LOG_INF("%s: audio_sample_rate: %d\n", __func__, hparams.audio_sample_rate);
  1077. LOG_INF("%s: audio_n_fft: %d\n", __func__, hparams.audio_n_fft);
  1078. LOG_INF("%s: audio_window_len: %d\n", __func__, hparams.audio_window_len);
  1079. LOG_INF("%s: audio_hop_len: %d\n", __func__, hparams.audio_hop_len);
  1080. }
  1081. LOG_INF("\n");
  1082. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1083. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1084. }
  1085. }
  1086. void load_tensors(clip_ctx & ctx_clip) {
  1087. auto & model = ctx_clip.model;
  1088. auto & hparams = model.hparams;
  1089. std::map<std::string, size_t> tensor_offset;
  1090. std::vector<ggml_tensor *> tensors_to_load;
  1091. // TODO @ngxson : support both audio and video in the future
  1092. const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
  1093. // get offsets
  1094. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1095. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1096. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1097. }
  1098. // create data context
  1099. struct ggml_init_params params = {
  1100. /*.mem_size =*/ static_cast<size_t>(gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1101. /*.mem_buffer =*/ NULL,
  1102. /*.no_alloc =*/ true,
  1103. };
  1104. ctx_clip.ctx_data.reset(ggml_init(params));
  1105. if (!ctx_clip.ctx_data) {
  1106. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1107. }
  1108. // helper function
  1109. auto get_tensor = [&](const std::string & name, bool required = true) {
  1110. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1111. if (!cur && required) {
  1112. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1113. }
  1114. if (cur) {
  1115. tensors_to_load.push_back(cur);
  1116. // add tensors to context
  1117. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1118. ggml_set_name(data_tensor, cur->name);
  1119. cur = data_tensor;
  1120. }
  1121. return cur;
  1122. };
  1123. model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1124. model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
  1125. model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
  1126. model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
  1127. model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
  1128. model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1129. model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1130. model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1131. model.norm_embd_w = get_tensor(string_format(TN_NORM_EMBD, "weight"), false);
  1132. model.norm_embd_b = get_tensor(string_format(TN_NORM_EMBD, "bias"), false);
  1133. model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
  1134. // layers
  1135. model.layers.resize(hparams.n_layer);
  1136. for (int il = 0; il < hparams.n_layer; ++il) {
  1137. auto & layer = model.layers[il];
  1138. layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"), false);
  1139. layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"), false);
  1140. layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"), false);
  1141. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
  1142. layer.qkv_w = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "weight"), false);
  1143. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
  1144. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
  1145. layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
  1146. layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
  1147. layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
  1148. layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
  1149. layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
  1150. layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
  1151. layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
  1152. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
  1153. layer.qkv_b = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "bias"), false);
  1154. layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
  1155. layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
  1156. // ffn
  1157. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
  1158. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
  1159. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
  1160. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
  1161. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
  1162. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
  1163. // qwen3vl deepstack layer
  1164. layer.deepstack_norm_w = get_tensor(string_format(TN_DEEPSTACK_NORM, il, "weight"), false);
  1165. layer.deepstack_norm_b = get_tensor(string_format(TN_DEEPSTACK_NORM, il, "bias"), false);
  1166. layer.deepstack_fc1_w = get_tensor(string_format(TN_DEEPSTACK_FC1, il, "weight"), false);
  1167. layer.deepstack_fc1_b = get_tensor(string_format(TN_DEEPSTACK_FC1, il, "bias"), false);
  1168. layer.deepstack_fc2_w = get_tensor(string_format(TN_DEEPSTACK_FC2, il, "weight"), false);
  1169. layer.deepstack_fc2_b = get_tensor(string_format(TN_DEEPSTACK_FC2, il, "bias"), false);
  1170. if (layer.has_deepstack()) {
  1171. model.n_deepstack_layers++;
  1172. }
  1173. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  1174. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  1175. bool is_ffn_swapped = (
  1176. // only old models need this fix
  1177. model.proj_type == PROJECTOR_TYPE_MLP
  1178. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  1179. || model.proj_type == PROJECTOR_TYPE_LDP
  1180. || model.proj_type == PROJECTOR_TYPE_LDPV2
  1181. || model.proj_type == PROJECTOR_TYPE_QWEN2VL
  1182. || model.proj_type == PROJECTOR_TYPE_QWEN25VL
  1183. || model.proj_type == PROJECTOR_TYPE_GLM_EDGE
  1184. || model.proj_type == PROJECTOR_TYPE_GEMMA3
  1185. || model.proj_type == PROJECTOR_TYPE_IDEFICS3
  1186. || model.proj_type == PROJECTOR_TYPE_MINICPMV
  1187. ) && layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd;
  1188. if (is_ffn_swapped) {
  1189. // swap up and down weights
  1190. ggml_tensor * tmp = layer.ff_up_w;
  1191. layer.ff_up_w = layer.ff_down_w;
  1192. layer.ff_down_w = tmp;
  1193. // swap up and down biases
  1194. tmp = layer.ff_up_b;
  1195. layer.ff_up_b = layer.ff_down_b;
  1196. layer.ff_down_b = tmp;
  1197. if (il == 0) {
  1198. LOG_WRN("%s: ffn up/down are swapped\n", __func__);
  1199. }
  1200. }
  1201. }
  1202. switch (model.proj_type) {
  1203. case PROJECTOR_TYPE_MLP:
  1204. case PROJECTOR_TYPE_MLP_NORM:
  1205. {
  1206. // LLaVA projection
  1207. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  1208. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  1209. // Yi-type llava
  1210. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  1211. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1212. // missing in Yi-type llava
  1213. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  1214. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1215. // Yi-type llava
  1216. model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  1217. model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  1218. model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  1219. model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  1220. if (model.mm_3_w) {
  1221. // TODO: this is a hack to support Yi-type llava
  1222. model.proj_type = PROJECTOR_TYPE_MLP_NORM;
  1223. }
  1224. model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  1225. } break;
  1226. case PROJECTOR_TYPE_LDP:
  1227. {
  1228. // MobileVLM projection
  1229. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1230. model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1231. model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1232. model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1233. model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1234. model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1235. model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1236. model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1237. model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1238. model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1239. model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1240. model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1241. model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1242. model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1243. model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1244. model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1245. model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  1246. model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  1247. model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  1248. model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  1249. model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  1250. model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  1251. model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  1252. model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  1253. } break;
  1254. case PROJECTOR_TYPE_LDPV2:
  1255. {
  1256. // MobilVLM_V2 projection
  1257. model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1258. model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1259. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1260. model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  1261. model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  1262. model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  1263. } break;
  1264. case PROJECTOR_TYPE_MINICPMV:
  1265. {
  1266. // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  1267. model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  1268. model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  1269. model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  1270. model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  1271. model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  1272. model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  1273. model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  1274. model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  1275. model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  1276. model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  1277. model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  1278. model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  1279. model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  1280. model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  1281. model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  1282. model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  1283. model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  1284. model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  1285. } break;
  1286. case PROJECTOR_TYPE_GLM_EDGE:
  1287. {
  1288. model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  1289. model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  1290. model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  1291. model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  1292. model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  1293. model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  1294. model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  1295. model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  1296. model.mm_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  1297. model.mm_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  1298. } break;
  1299. case PROJECTOR_TYPE_QWEN2VL:
  1300. case PROJECTOR_TYPE_QWEN25VL:
  1301. {
  1302. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1303. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1304. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1305. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1306. } break;
  1307. case PROJECTOR_TYPE_QWEN3VL:
  1308. {
  1309. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1310. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1311. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1312. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1313. } break;
  1314. case PROJECTOR_TYPE_GLM4V:
  1315. {
  1316. model.projection = get_tensor(TN_MM_PROJECTOR);
  1317. model.mm_ffn_up_w = get_tensor(string_format(TN_MM_UP, "weight"));
  1318. model.mm_ffn_up_b = get_tensor(string_format(TN_MM_UP, "bias"), false);
  1319. model.mm_ffn_gate_w = get_tensor(string_format(TN_MM_GATE, "weight"));
  1320. model.mm_ffn_gate_b = get_tensor(string_format(TN_MM_GATE, "bias"), false);
  1321. model.mm_ffn_down_w = get_tensor(string_format(TN_MM_DOWN, "weight"));
  1322. model.mm_ffn_down_b = get_tensor(string_format(TN_MM_DOWN, "bias"), false);
  1323. model.mm_post_norm_w = get_tensor(string_format(TN_MM_POST_NORM, "weight"));
  1324. model.mm_post_norm_b = get_tensor(string_format(TN_MM_POST_NORM, "bias"), false);
  1325. model.mm_patch_merger_w = get_tensor(string_format(TN_MM_PATCH_MERGER, "weight"));
  1326. model.mm_patch_merger_b = get_tensor(string_format(TN_MM_PATCH_MERGER, "bias"));
  1327. } break;
  1328. case PROJECTOR_TYPE_GEMMA3:
  1329. {
  1330. model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  1331. model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  1332. } break;
  1333. case PROJECTOR_TYPE_IDEFICS3:
  1334. {
  1335. model.projection = get_tensor(TN_MM_PROJECTOR);
  1336. } break;
  1337. case PROJECTOR_TYPE_LFM2:
  1338. case PROJECTOR_TYPE_KIMIVL:
  1339. {
  1340. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
  1341. model.mm_input_norm_b = get_tensor(TN_MM_INP_NORM_B);
  1342. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1343. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  1344. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1345. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1346. } break;
  1347. case PROJECTOR_TYPE_PIXTRAL:
  1348. {
  1349. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1350. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1351. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1352. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1353. // [IMG_BREAK] token embedding
  1354. model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  1355. // for mistral small 3.1
  1356. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  1357. model.mm_patch_merger_w = get_tensor(string_format(TN_MM_PATCH_MERGER, "weight"), false);
  1358. } break;
  1359. case PROJECTOR_TYPE_LIGHTONOCR:
  1360. {
  1361. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1362. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1363. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1364. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1365. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  1366. model.mm_patch_merger_w = get_tensor(string_format(TN_MM_PATCH_MERGER, "weight"), false);
  1367. } break;
  1368. case PROJECTOR_TYPE_ULTRAVOX:
  1369. {
  1370. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1371. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1372. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1373. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1374. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1375. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  1376. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  1377. model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
  1378. } break;
  1379. case PROJECTOR_TYPE_QWEN2A:
  1380. {
  1381. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1382. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1383. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1384. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1385. model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
  1386. model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
  1387. } break;
  1388. case PROJECTOR_TYPE_VOXTRAL:
  1389. {
  1390. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1391. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1392. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1393. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1394. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1395. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  1396. } break;
  1397. case PROJECTOR_TYPE_INTERNVL:
  1398. {
  1399. model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1400. model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1401. model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1402. model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1403. model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1404. model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1405. } break;
  1406. case PROJECTOR_TYPE_GLMA:
  1407. {
  1408. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1409. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1410. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1411. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1412. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1413. model.mm_1_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "bias"));
  1414. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  1415. model.mm_2_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "bias"));
  1416. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  1417. model.mm_norm_pre_b = get_tensor(string_format(TN_MM_NORM_PRE, "bias"));
  1418. model.mm_boi = get_tensor(string_format(TN_TOK_BOI, "weight"));
  1419. model.mm_eoi = get_tensor(string_format(TN_TOK_EOI, "weight"));
  1420. } break;
  1421. case PROJECTOR_TYPE_LLAMA4:
  1422. {
  1423. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  1424. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1425. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1426. } break;
  1427. case PROJECTOR_TYPE_COGVLM:
  1428. {
  1429. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  1430. model.mm_post_fc_norm_w = get_tensor(string_format(TN_MM_POST_FC_NORM, "weight"));
  1431. model.mm_post_fc_norm_b = get_tensor(string_format(TN_MM_POST_FC_NORM, "bias"));
  1432. model.mm_h_to_4h_w = get_tensor(string_format(TN_MM_H_TO_4H, "weight"));
  1433. model.mm_gate_w = get_tensor(string_format(TN_MM_GATE, "weight"));
  1434. model.mm_4h_to_h_w = get_tensor(string_format(TN_MM_4H_TO_H, "weight"));
  1435. model.mm_boi = get_tensor(TN_TOK_BOI);
  1436. model.mm_eoi = get_tensor(TN_TOK_EOI);
  1437. } break;
  1438. case PROJECTOR_TYPE_JANUS_PRO:
  1439. {
  1440. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1441. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1442. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1443. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  1444. } break;
  1445. case PROJECTOR_TYPE_LFM2A:
  1446. {
  1447. for (int i : {0, 2, 3, 5, 6}) {
  1448. model.pre_encode_conv_X_w[i] = get_tensor(string_format(TN_CONV1D, i, "weight"));
  1449. model.pre_encode_conv_X_b[i] = get_tensor(string_format(TN_CONV1D, i, "bias"));
  1450. }
  1451. model.pre_encode_out_w = get_tensor(string_format(TN_PRE_ENCODE_OUT, "weight"));
  1452. model.pre_encode_out_b = get_tensor(string_format(TN_PRE_ENCODE_OUT, "bias"));
  1453. model.mm_0_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 0, "weight"));
  1454. model.mm_0_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 0, "bias"));
  1455. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1456. model.mm_1_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "bias"));
  1457. model.mm_3_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 3, "weight"));
  1458. model.mm_3_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 3, "bias"));
  1459. for (int il = 0; il < hparams.n_layer; ++il) {
  1460. auto & layer = model.layers[il];
  1461. layer.ff_norm_w = get_tensor(string_format(TN_FFN_NORM, prefix, il, "weight"));
  1462. layer.ff_norm_b = get_tensor(string_format(TN_FFN_NORM, prefix, il, "bias"));
  1463. layer.ff_norm_1_w = get_tensor(string_format(TN_FFN_NORM_1, prefix, il, "weight"));
  1464. layer.ff_norm_1_b = get_tensor(string_format(TN_FFN_NORM_1, prefix, il, "bias"));
  1465. layer.ff_up_1_w = get_tensor(string_format(TN_FFN_UP_1, prefix, il, "weight"));
  1466. layer.ff_up_1_b = get_tensor(string_format(TN_FFN_UP_1, prefix, il, "bias"));
  1467. layer.ff_down_1_w = get_tensor(string_format(TN_FFN_DOWN_1, prefix, il, "weight"));
  1468. layer.ff_down_1_b = get_tensor(string_format(TN_FFN_DOWN_1, prefix, il, "bias"));
  1469. layer.pos_bias_u = get_tensor(string_format(TN_POS_BIAS_U, prefix, il));
  1470. layer.pos_bias_v = get_tensor(string_format(TN_POS_BIAS_V, prefix, il));
  1471. layer.norm_conv_w = get_tensor(string_format(TN_NORM_CONV, prefix, il, "weight"));
  1472. layer.norm_conv_b = get_tensor(string_format(TN_NORM_CONV, prefix, il, "bias"));
  1473. layer.linear_pos_w = get_tensor(string_format(TN_LINEAR_POS, prefix, il, "weight"));
  1474. layer.conv_norm_w = get_tensor(string_format(TN_CONV_NORM, prefix, il, "weight"));
  1475. layer.conv_norm_b = get_tensor(string_format(TN_CONV_NORM, prefix, il, "bias"));
  1476. layer.conv_dw_w = get_tensor(string_format(TN_CONV_DW, prefix, il, "weight"));
  1477. layer.conv_dw_b = get_tensor(string_format(TN_CONV_DW, prefix, il, "bias"));
  1478. layer.conv_pw1_w = get_tensor(string_format(TN_CONV_PW1, prefix, il, "weight"));
  1479. layer.conv_pw1_b = get_tensor(string_format(TN_CONV_PW1, prefix, il, "bias"));
  1480. layer.conv_pw2_w = get_tensor(string_format(TN_CONV_PW2, prefix, il, "weight"));
  1481. layer.conv_pw2_b = get_tensor(string_format(TN_CONV_PW2, prefix, il, "bias"));
  1482. }
  1483. } break;
  1484. default:
  1485. GGML_ASSERT(false && "unknown projector type");
  1486. }
  1487. // load data
  1488. {
  1489. std::vector<uint8_t> read_buf;
  1490. auto fin = std::ifstream(fname, std::ios::binary);
  1491. if (!fin) {
  1492. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  1493. }
  1494. // alloc memory and offload data
  1495. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  1496. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  1497. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  1498. for (auto & t : tensors_to_load) {
  1499. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  1500. const size_t offset = tensor_offset[t->name];
  1501. fin.seekg(offset, std::ios::beg);
  1502. if (!fin) {
  1503. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  1504. }
  1505. size_t num_bytes = ggml_nbytes(cur);
  1506. if (ggml_backend_buft_is_host(buft)) {
  1507. // for the CPU and Metal backend, we can read directly into the tensor
  1508. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  1509. } else {
  1510. // read into a temporary buffer first, then copy to device memory
  1511. read_buf.resize(num_bytes);
  1512. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  1513. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  1514. }
  1515. }
  1516. fin.close();
  1517. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  1518. }
  1519. }
  1520. struct support_info_op {
  1521. ggml_tensor * op;
  1522. // true if the op runs on the accelerated ctx_clip.backend
  1523. bool is_accel = true;
  1524. };
  1525. struct support_info_graph {
  1526. // whether the clip_ctx.backend supports flash attention
  1527. bool fattn = true;
  1528. ggml_tensor * fattn_op = nullptr; // for debugging
  1529. std::vector<support_info_op> ops;
  1530. };
  1531. static void warmup(clip_ctx & ctx_clip) {
  1532. // create a fake batch
  1533. const auto & hparams = ctx_clip.model.hparams;
  1534. clip_image_f32_batch batch;
  1535. clip_image_f32_ptr img(clip_image_f32_init());
  1536. if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
  1537. img->nx = hparams.warmup_image_size;
  1538. img->ny = hparams.warmup_image_size;
  1539. LOG_INF("%s: warmup with image size = %d x %d\n", __func__, img->nx, img->ny);
  1540. } else {
  1541. img->nx = hparams.warmup_audio_size;
  1542. img->ny = hparams.n_mel_bins;
  1543. LOG_INF("%s: warmup with audio size = %d\n", __func__, img->nx);
  1544. }
  1545. batch.entries.push_back(std::move(img));
  1546. warmup(ctx_clip, batch);
  1547. }
  1548. static void warmup(clip_ctx & ctx_clip, const clip_image_f32_batch & batch) {
  1549. support_info_graph info;
  1550. if (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_AUTO) {
  1551. // try to enable flash attention to see if it's supported
  1552. ctx_clip.flash_attn_type = CLIP_FLASH_ATTN_TYPE_ENABLED;
  1553. info = alloc_compute_meta(ctx_clip, batch);
  1554. if (!info.fattn && info.fattn_op) {
  1555. auto op = info.fattn_op;
  1556. LOG_WRN("%s: *****************************************************************\n", __func__);
  1557. LOG_WRN("%s: WARNING: flash attention not supported by %s, memory usage will increase\n", __func__, ggml_backend_name(ctx_clip.backend));
  1558. LOG_WRN("%s: op params: \n", __func__);
  1559. static auto print_shape = [](const char * fn, const char * name, ggml_tensor * t) {
  1560. LOG_WRN("%s: %s: type = %s, ne = [%d %d %d %d], nb = [%d %d %d %d]\n", fn,
  1561. name, ggml_type_name(t->type),
  1562. t->ne[0], t->ne[1], t->ne[2], t->ne[3],
  1563. t->nb[0], t->nb[1], t->nb[2], t->nb[3]);
  1564. };
  1565. print_shape(__func__, " dst", op);
  1566. print_shape(__func__, "src0", op->src[0]);
  1567. print_shape(__func__, "src1", op->src[1]);
  1568. print_shape(__func__, "src2", op->src[2]);
  1569. LOG_WRN("%s: please report this on github as an issue\n", __func__);
  1570. LOG_WRN("%s: *****************************************************************\n", __func__);
  1571. ctx_clip.flash_attn_type = CLIP_FLASH_ATTN_TYPE_DISABLED;
  1572. alloc_compute_meta(ctx_clip, batch);
  1573. }
  1574. } else {
  1575. info = alloc_compute_meta(ctx_clip, batch);
  1576. if (!info.fattn && ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  1577. LOG_WRN("%s: flash attention is not supported by the current backend; falling back to CPU (performance will be degraded)\n", __func__);
  1578. }
  1579. }
  1580. ctx_clip.is_allocated = true; // mark buffers as allocated
  1581. LOG_INF("%s: flash attention is %s\n", __func__,
  1582. (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) ? "enabled" : "disabled");
  1583. // print ops that are not supported by the GPU backend (if there is one)
  1584. if (ctx_clip.backend && ctx_clip.backend != ctx_clip.backend_cpu) {
  1585. std::vector<support_info_op> unsupported_ops;
  1586. for (const auto & op : info.ops) {
  1587. if (!op.is_accel) {
  1588. unsupported_ops.push_back(op);
  1589. }
  1590. }
  1591. if (!unsupported_ops.empty()) {
  1592. LOG_WRN("%s: *****************************************************************\n", __func__);
  1593. LOG_WRN("%s: WARNING: the CLIP graph uses unsupported operators by the backend\n", __func__);
  1594. LOG_WRN("%s: the performance will be suboptimal \n", __func__);
  1595. LOG_WRN("%s: list of unsupported ops (backend=%s):\n", __func__, ggml_backend_name(ctx_clip.backend));
  1596. for (const auto & op : unsupported_ops) {
  1597. LOG_WRN("%s: %16s: type = %s, ne = [%d %d %d %d]\n", __func__,
  1598. ggml_op_name(op.op->op),
  1599. ggml_type_name(op.op->type),
  1600. op.op->ne[0], op.op->ne[1], op.op->ne[2], op.op->ne[3]);
  1601. }
  1602. LOG_WRN("%s: flash attention is %s\n", __func__,
  1603. (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) ? "enabled" : "disabled");
  1604. LOG_WRN("%s: please report this on github as an issue\n", __func__);
  1605. LOG_WRN("%s: ref: https://github.com/ggml-org/llama.cpp/pull/16837#issuecomment-3461676118\n", __func__);
  1606. LOG_WRN("%s: *****************************************************************\n", __func__);
  1607. }
  1608. }
  1609. }
  1610. static support_info_graph alloc_compute_meta(clip_ctx & ctx_clip, const clip_image_f32_batch & batch) {
  1611. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  1612. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  1613. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  1614. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  1615. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  1616. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  1617. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  1618. if (size > 1) {
  1619. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  1620. ggml_backend_buft_name(buft),
  1621. size / 1024.0 / 1024.0);
  1622. }
  1623. }
  1624. const int n_splits = ggml_backend_sched_get_n_splits(ctx_clip.sched.get());
  1625. const int n_nodes = ggml_graph_n_nodes(gf);
  1626. LOG_INF("%s: graph splits = %d, nodes = %d\n", __func__, n_splits, n_nodes);
  1627. support_info_graph res {
  1628. /*.fattn = */ true,
  1629. /*.fattn_op = */ nullptr,
  1630. /*.ops = */ {},
  1631. };
  1632. // check op support
  1633. for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
  1634. ggml_tensor * node = ggml_graph_node(gf, i);
  1635. res.ops.push_back({node, true});
  1636. if (!ggml_backend_supports_op(ctx_clip.backend, node)) {
  1637. res.ops.back().is_accel = false;
  1638. if (node->op == GGML_OP_FLASH_ATTN_EXT) {
  1639. res.fattn = false;
  1640. res.fattn_op = node;
  1641. }
  1642. }
  1643. }
  1644. return res;
  1645. }
  1646. void get_bool(const std::string & key, bool & output, bool required = true) const {
  1647. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1648. if (i < 0) {
  1649. if (required) {
  1650. throw std::runtime_error("Key not found: " + key);
  1651. }
  1652. return;
  1653. }
  1654. output = gguf_get_val_bool(ctx_gguf.get(), i);
  1655. }
  1656. void get_i32(const std::string & key, int & output, bool required = true) const {
  1657. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1658. if (i < 0) {
  1659. if (required) {
  1660. throw std::runtime_error("Key not found: " + key);
  1661. }
  1662. return;
  1663. }
  1664. output = gguf_get_val_i32(ctx_gguf.get(), i);
  1665. }
  1666. void get_u32(const std::string & key, int & output, bool required = true) const {
  1667. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1668. if (i < 0) {
  1669. if (required) {
  1670. throw std::runtime_error("Key not found: " + key);
  1671. }
  1672. return;
  1673. }
  1674. output = gguf_get_val_u32(ctx_gguf.get(), i);
  1675. }
  1676. void get_f32(const std::string & key, float & output, bool required = true) const {
  1677. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1678. if (i < 0) {
  1679. if (required) {
  1680. throw std::runtime_error("Key not found: " + key);
  1681. }
  1682. return;
  1683. }
  1684. output = gguf_get_val_f32(ctx_gguf.get(), i);
  1685. }
  1686. void get_string(const std::string & key, std::string & output, bool required = true) const {
  1687. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1688. if (i < 0) {
  1689. if (required) {
  1690. throw std::runtime_error("Key not found: " + key);
  1691. }
  1692. return;
  1693. }
  1694. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  1695. }
  1696. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) const {
  1697. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1698. if (i < 0) {
  1699. if (required) {
  1700. throw std::runtime_error("Key not found: " + key);
  1701. }
  1702. return;
  1703. }
  1704. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  1705. output.resize(n);
  1706. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  1707. for (int i = 0; i < n; ++i) {
  1708. output[i] = values[i];
  1709. }
  1710. }
  1711. static void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
  1712. auto & hparams = model.hparams;
  1713. for (int x = 1; x <= max_patches_per_side; x++) {
  1714. for (int y = 1; y <= max_patches_per_side; y++) {
  1715. if (x == 1 && y == 1) {
  1716. continue; // skip the first point
  1717. }
  1718. hparams.image_res_candidates.push_back(clip_image_size{
  1719. x*hparams.image_size,
  1720. y*hparams.image_size,
  1721. });
  1722. }
  1723. }
  1724. }
  1725. };
  1726. struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
  1727. clip_ctx * ctx_vision = nullptr;
  1728. clip_ctx * ctx_audio = nullptr;
  1729. try {
  1730. clip_model_loader loader(fname);
  1731. if (loader.has_vision) {
  1732. ctx_vision = new clip_ctx(ctx_params);
  1733. loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
  1734. loader.load_tensors(*ctx_vision);
  1735. if (ctx_params.warmup) {
  1736. loader.warmup(*ctx_vision);
  1737. }
  1738. // clip_debug_encode(ctx_vision, 24*14, 24*14, 0.5f);
  1739. }
  1740. if (loader.has_audio) {
  1741. ctx_audio = new clip_ctx(ctx_params);
  1742. loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
  1743. loader.load_tensors(*ctx_audio);
  1744. if (ctx_params.warmup) {
  1745. loader.warmup(*ctx_audio);
  1746. }
  1747. }
  1748. } catch (const std::exception & e) {
  1749. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  1750. delete ctx_vision;
  1751. delete ctx_audio;
  1752. return {nullptr, nullptr};
  1753. }
  1754. return {ctx_vision, ctx_audio};
  1755. }
  1756. struct clip_image_size * clip_image_size_init() {
  1757. struct clip_image_size * load_image_size = new struct clip_image_size();
  1758. load_image_size->width = 448;
  1759. load_image_size->height = 448;
  1760. return load_image_size;
  1761. }
  1762. struct clip_image_u8 * clip_image_u8_init() {
  1763. return new clip_image_u8();
  1764. }
  1765. struct clip_image_f32 * clip_image_f32_init() {
  1766. return new clip_image_f32();
  1767. }
  1768. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  1769. return new clip_image_f32_batch();
  1770. }
  1771. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  1772. if (nx) *nx = img->nx;
  1773. if (ny) *ny = img->ny;
  1774. return img->buf.data();
  1775. }
  1776. void clip_image_size_free(struct clip_image_size * load_image_size) {
  1777. if (load_image_size == nullptr) {
  1778. return;
  1779. }
  1780. delete load_image_size;
  1781. }
  1782. void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
  1783. void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
  1784. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { delete batch; }
  1785. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { delete batch; }
  1786. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  1787. return batch->entries.size();
  1788. }
  1789. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  1790. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1791. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1792. return 0;
  1793. }
  1794. return batch->entries[idx]->nx;
  1795. }
  1796. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  1797. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1798. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1799. return 0;
  1800. }
  1801. return batch->entries[idx]->ny;
  1802. }
  1803. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  1804. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1805. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1806. return nullptr;
  1807. }
  1808. return batch->entries[idx].get();
  1809. }
  1810. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  1811. img->nx = nx;
  1812. img->ny = ny;
  1813. img->buf.resize(3 * nx * ny);
  1814. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  1815. }
  1816. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  1817. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  1818. dst.nx = src.nx;
  1819. dst.ny = src.ny;
  1820. dst.buf.resize(src.buf.size());
  1821. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  1822. for (size_t i = 0; i < src.buf.size(); ++i) {
  1823. int c = i % 3; // rgb
  1824. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  1825. }
  1826. }
  1827. // set of tools to manupulate images
  1828. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  1829. struct img_tool {
  1830. enum resize_algo {
  1831. RESIZE_ALGO_BILINEAR,
  1832. RESIZE_ALGO_BICUBIC,
  1833. // RESIZE_ALGO_LANCZOS, // TODO
  1834. };
  1835. static void resize(
  1836. const clip_image_u8 & src,
  1837. clip_image_u8 & dst,
  1838. const clip_image_size & target_resolution,
  1839. resize_algo algo,
  1840. bool add_padding = true, // TODO: define the behavior for add_padding = false
  1841. std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  1842. dst.nx = target_resolution.width;
  1843. dst.ny = target_resolution.height;
  1844. dst.buf.resize(3 * dst.nx * dst.ny);
  1845. if (dst.nx == src.nx && dst.ny == src.ny) {
  1846. // no resize needed, simple copy
  1847. dst.buf = src.buf;
  1848. return;
  1849. }
  1850. if (!add_padding) {
  1851. // direct resize
  1852. switch (algo) {
  1853. case RESIZE_ALGO_BILINEAR:
  1854. resize_bilinear(src, dst, target_resolution.width, target_resolution.height);
  1855. break;
  1856. case RESIZE_ALGO_BICUBIC:
  1857. resize_bicubic(src, dst, target_resolution.width, target_resolution.height);
  1858. break;
  1859. default:
  1860. throw std::runtime_error("Unsupported resize algorithm");
  1861. }
  1862. } else {
  1863. // resize with padding
  1864. clip_image_u8 resized_image;
  1865. float scale_w = static_cast<float>(target_resolution.width) / src.nx;
  1866. float scale_h = static_cast<float>(target_resolution.height) / src.ny;
  1867. float scale = std::min(scale_w, scale_h);
  1868. int new_width = std::min(static_cast<int>(std::ceil(src.nx * scale)), target_resolution.width);
  1869. int new_height = std::min(static_cast<int>(std::ceil(src.ny * scale)), target_resolution.height);
  1870. switch (algo) {
  1871. case RESIZE_ALGO_BILINEAR:
  1872. resize_bilinear(src, resized_image, new_width, new_height);
  1873. break;
  1874. case RESIZE_ALGO_BICUBIC:
  1875. resize_bicubic(src, resized_image, new_width, new_height);
  1876. break;
  1877. default:
  1878. throw std::runtime_error("Unsupported resize algorithm");
  1879. }
  1880. // fill dst with pad_color
  1881. fill(dst, pad_color);
  1882. int offset_x = (target_resolution.width - new_width) / 2;
  1883. int offset_y = (target_resolution.height - new_height) / 2;
  1884. composite(dst, resized_image, offset_x, offset_y);
  1885. }
  1886. }
  1887. static void crop(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  1888. dst.nx = w;
  1889. dst.ny = h;
  1890. dst.buf.resize(3 * w * h);
  1891. for (int i = 0; i < h; ++i) {
  1892. for (int j = 0; j < w; ++j) {
  1893. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  1894. int dst_idx = 3 * (i*w + j);
  1895. dst.buf[dst_idx] = image.buf[src_idx];
  1896. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  1897. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  1898. }
  1899. }
  1900. }
  1901. // calculate the size of the **resized** image, while preserving the aspect ratio
  1902. // the calculated size will be aligned to the nearest multiple of align_size
  1903. // if H or W size is larger than longest_edge, it will be resized to longest_edge
  1904. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int longest_edge) {
  1905. GGML_ASSERT(align_size > 0);
  1906. if (inp_size.width <= 0 || inp_size.height <= 0 || longest_edge <= 0) {
  1907. return {0, 0};
  1908. }
  1909. float scale = std::min(static_cast<float>(longest_edge) / inp_size.width,
  1910. static_cast<float>(longest_edge) / inp_size.height);
  1911. float target_width_f = static_cast<float>(inp_size.width) * scale;
  1912. float target_height_f = static_cast<float>(inp_size.height) * scale;
  1913. auto ceil_by_factor = [f = align_size](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  1914. int aligned_width = ceil_by_factor(target_width_f);
  1915. int aligned_height = ceil_by_factor(target_height_f);
  1916. return {aligned_width, aligned_height};
  1917. }
  1918. // calculate the size of the **resized** image, while preserving the aspect ratio
  1919. // the calculated size will have min_pixels <= W*H <= max_pixels
  1920. // this is referred as "smart_resize" in transformers code
  1921. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int min_pixels, const int max_pixels) {
  1922. GGML_ASSERT(align_size > 0);
  1923. const int width = inp_size.width;
  1924. const int height = inp_size.height;
  1925. auto round_by_factor = [f = align_size](float x) { return static_cast<int>(std::round(x / static_cast<float>(f))) * f; };
  1926. auto ceil_by_factor = [f = align_size](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  1927. auto floor_by_factor = [f = align_size](float x) { return static_cast<int>(std::floor(x / static_cast<float>(f))) * f; };
  1928. // always align up first
  1929. int h_bar = std::max(align_size, round_by_factor(height));
  1930. int w_bar = std::max(align_size, round_by_factor(width));
  1931. if (h_bar * w_bar > max_pixels) {
  1932. const auto beta = std::sqrt(static_cast<float>(height * width) / max_pixels);
  1933. h_bar = std::max(align_size, floor_by_factor(height / beta));
  1934. w_bar = std::max(align_size, floor_by_factor(width / beta));
  1935. } else if (h_bar * w_bar < min_pixels) {
  1936. const auto beta = std::sqrt(static_cast<float>(min_pixels) / (height * width));
  1937. h_bar = ceil_by_factor(height * beta);
  1938. w_bar = ceil_by_factor(width * beta);
  1939. }
  1940. return {w_bar, h_bar};
  1941. }
  1942. // draw src image into dst image at offset (offset_x, offset_y)
  1943. static void composite(clip_image_u8 & dst, const clip_image_u8 & src, int offset_x, int offset_y) {
  1944. for (int y = 0; y < src.ny; ++y) {
  1945. for (int x = 0; x < src.nx; ++x) {
  1946. int dx = x + offset_x;
  1947. int dy = y + offset_y;
  1948. // skip pixels that would be out of bounds in the destination
  1949. if (dx < 0 || dy < 0 || dx >= dst.nx || dy >= dst.ny) {
  1950. continue;
  1951. }
  1952. size_t dst_idx = 3 * (static_cast<size_t>(dy) * dst.nx + static_cast<size_t>(dx));
  1953. size_t src_idx = 3 * (static_cast<size_t>(y) * src.nx + static_cast<size_t>(x));
  1954. dst.buf[dst_idx + 0] = src.buf[src_idx + 0];
  1955. dst.buf[dst_idx + 1] = src.buf[src_idx + 1];
  1956. dst.buf[dst_idx + 2] = src.buf[src_idx + 2];
  1957. }
  1958. }
  1959. }
  1960. // fill the image with a solid color
  1961. static void fill(clip_image_u8 & img, const std::array<uint8_t, 3> & color) {
  1962. for (size_t i = 0; i < img.buf.size(); i += 3) {
  1963. img.buf[i] = color[0];
  1964. img.buf[i + 1] = color[1];
  1965. img.buf[i + 2] = color[2];
  1966. }
  1967. }
  1968. private:
  1969. // Bilinear resize function
  1970. static void resize_bilinear(const clip_image_u8 & src, clip_image_u8 & dst, int target_width, int target_height) {
  1971. dst.nx = target_width;
  1972. dst.ny = target_height;
  1973. dst.buf.resize(3 * target_width * target_height);
  1974. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  1975. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  1976. for (int y = 0; y < target_height; y++) {
  1977. for (int x = 0; x < target_width; x++) {
  1978. float px = x_ratio * x;
  1979. float py = y_ratio * y;
  1980. int x_floor = static_cast<int>(px);
  1981. int y_floor = static_cast<int>(py);
  1982. float x_lerp = px - x_floor;
  1983. float y_lerp = py - y_floor;
  1984. for (int c = 0; c < 3; c++) {
  1985. float top = lerp(
  1986. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  1987. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  1988. x_lerp
  1989. );
  1990. float bottom = lerp(
  1991. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  1992. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  1993. x_lerp
  1994. );
  1995. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  1996. }
  1997. }
  1998. }
  1999. }
  2000. // Bicubic resize function
  2001. // part of image will be cropped if the aspect ratio is different
  2002. static bool resize_bicubic(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  2003. const int nx = img.nx;
  2004. const int ny = img.ny;
  2005. dst.nx = target_width;
  2006. dst.ny = target_height;
  2007. dst.buf.resize(3 * target_width * target_height);
  2008. float Cc;
  2009. float C[5] = {};
  2010. float d0, d2, d3, a0, a1, a2, a3;
  2011. int i, j, k, jj;
  2012. int x, y;
  2013. float dx, dy;
  2014. float tx, ty;
  2015. tx = (float)nx / (float)target_width;
  2016. ty = (float)ny / (float)target_height;
  2017. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  2018. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  2019. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  2020. for (i = 0; i < target_height; i++) {
  2021. for (j = 0; j < target_width; j++) {
  2022. x = (int)(tx * j);
  2023. y = (int)(ty * i);
  2024. dx = tx * j - x;
  2025. dy = ty * i - y;
  2026. for (k = 0; k < 3; k++) {
  2027. for (jj = 0; jj <= 3; jj++) {
  2028. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2029. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2030. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2031. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2032. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2033. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2034. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2035. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  2036. d0 = C[0] - C[1];
  2037. d2 = C[2] - C[1];
  2038. d3 = C[3] - C[1];
  2039. a0 = C[1];
  2040. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2041. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2042. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2043. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  2044. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  2045. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  2046. }
  2047. }
  2048. }
  2049. }
  2050. return true;
  2051. }
  2052. static inline int clip(int x, int lower, int upper) {
  2053. return std::max(lower, std::min(x, upper));
  2054. }
  2055. // Linear interpolation between two points
  2056. static inline float lerp(float s, float e, float t) {
  2057. return s + (e - s) * t;
  2058. }
  2059. };
  2060. /**
  2061. * implementation of LLaVA-UHD:
  2062. * - https://arxiv.org/pdf/2403.11703
  2063. * - https://github.com/thunlp/LLaVA-UHD
  2064. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  2065. *
  2066. * overview:
  2067. * - an image always have a single overview (downscaled image)
  2068. * - an image can have 0 or multiple slices, depending on the image size
  2069. * - each slice can then be considered as a separate image
  2070. *
  2071. * for example:
  2072. *
  2073. * [overview] --> [slice 1] --> [slice 2]
  2074. * | |
  2075. * +--> [slice 3] --> [slice 4]
  2076. */
  2077. struct llava_uhd {
  2078. struct slice_coordinates {
  2079. int x;
  2080. int y;
  2081. clip_image_size size;
  2082. };
  2083. struct slice_instructions {
  2084. clip_image_size overview_size; // size of downscaled image
  2085. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2086. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2087. std::vector<slice_coordinates> slices;
  2088. img_tool::resize_algo interpolation_overview = img_tool::RESIZE_ALGO_BILINEAR;
  2089. bool padding_overview = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2090. std::array<uint8_t, 3> pad_color_overview = {0, 0, 0};
  2091. img_tool::resize_algo interpolation_refined = img_tool::RESIZE_ALGO_BICUBIC;
  2092. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2093. std::array<uint8_t, 3> pad_color_refined = {0, 0, 0};
  2094. };
  2095. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2096. slice_instructions res;
  2097. const int patch_size = clip_get_patch_size(ctx);
  2098. const int slice_size = clip_get_image_size(ctx);
  2099. const int original_width = original_size.width;
  2100. const int original_height = original_size.height;
  2101. const bool has_slices = original_size.width > slice_size || original_size.height > slice_size;
  2102. const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
  2103. if (!has_slices) {
  2104. // skip slicing logic
  2105. res.overview_size = clip_image_size{slice_size, slice_size};
  2106. res.refined_size = clip_image_size{0, 0};
  2107. res.grid_size = clip_image_size{0, 0};
  2108. return res;
  2109. }
  2110. if (has_pinpoints) {
  2111. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2112. auto refine_size = llava_uhd::select_best_resolution(
  2113. original_size,
  2114. ctx->model.hparams.image_res_candidates);
  2115. res.overview_size = clip_image_size{slice_size, slice_size};
  2116. res.refined_size = refine_size;
  2117. res.grid_size = clip_image_size{0, 0};
  2118. res.padding_refined = true;
  2119. res.interpolation_refined = img_tool::RESIZE_ALGO_BILINEAR; // preserve old behavior when padding
  2120. LOG_DBG("%s: using pinpoints for slicing\n", __func__);
  2121. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
  2122. __func__, original_width, original_height,
  2123. res.overview_size.width, res.overview_size.height,
  2124. res.refined_size.width, res.refined_size.height);
  2125. for (int y = 0; y < refine_size.height; y += slice_size) {
  2126. for (int x = 0; x < refine_size.width; x += slice_size) {
  2127. slice_coordinates slice;
  2128. slice.x = x;
  2129. slice.y = y;
  2130. slice.size.width = std::min(slice_size, refine_size.width - x);
  2131. slice.size.height = std::min(slice_size, refine_size.height - y);
  2132. res.slices.push_back(slice);
  2133. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2134. __func__, (int)res.slices.size() - 1,
  2135. slice.x, slice.y, slice.size.width, slice.size.height);
  2136. }
  2137. }
  2138. res.grid_size.height = refine_size.height / slice_size;
  2139. res.grid_size.width = refine_size.width / slice_size;
  2140. LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
  2141. return res;
  2142. }
  2143. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2144. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  2145. res.overview_size = best_size;
  2146. {
  2147. const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
  2148. const float log_ratio = log((float)original_width / original_height);
  2149. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2150. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2151. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2152. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2153. res.grid_size = best_grid;
  2154. res.refined_size = refine_size;
  2155. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
  2156. __func__, original_width, original_height,
  2157. res.overview_size.width, res.overview_size.height,
  2158. res.refined_size.width, res.refined_size.height,
  2159. res.grid_size.width, res.grid_size.height);
  2160. int width = refine_size.width;
  2161. int height = refine_size.height;
  2162. int grid_x = int(width / best_grid.width);
  2163. int grid_y = int(height / best_grid.height);
  2164. for (int patches_y = 0, ic = 0;
  2165. patches_y < refine_size.height && ic < best_grid.height;
  2166. patches_y += grid_y, ic += 1) {
  2167. for (int patches_x = 0, jc = 0;
  2168. patches_x < refine_size.width && jc < best_grid.width;
  2169. patches_x += grid_x, jc += 1) {
  2170. slice_coordinates slice;
  2171. slice.x = patches_x;
  2172. slice.y = patches_y;
  2173. slice.size.width = grid_x;
  2174. slice.size.height = grid_y;
  2175. res.slices.push_back(slice);
  2176. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2177. __func__, (int)res.slices.size() - 1,
  2178. slice.x, slice.y, slice.size.width, slice.size.height);
  2179. }
  2180. }
  2181. }
  2182. return res;
  2183. }
  2184. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2185. std::vector<clip_image_u8_ptr> output;
  2186. // resize to overview size
  2187. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2188. img_tool::resize(*img, *resized_img, inst.overview_size, inst.interpolation_overview,
  2189. inst.padding_overview, inst.pad_color_overview);
  2190. output.push_back(std::move(resized_img));
  2191. if (inst.slices.empty()) {
  2192. // no slices, just return the resized image
  2193. return output;
  2194. }
  2195. // resize to refined size
  2196. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2197. img_tool::resize(*img, *refined_img, inst.refined_size, inst.interpolation_refined,
  2198. inst.padding_refined, inst.pad_color_refined);
  2199. // create slices
  2200. for (const auto & slice : inst.slices) {
  2201. int x = slice.x;
  2202. int y = slice.y;
  2203. int w = slice.size.width;
  2204. int h = slice.size.height;
  2205. clip_image_u8_ptr img_slice(clip_image_u8_init());
  2206. img_tool::crop(*refined_img, *img_slice, x, y, w, h);
  2207. output.push_back(std::move(img_slice));
  2208. }
  2209. return output;
  2210. }
  2211. private:
  2212. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2213. int width = original_size.width;
  2214. int height = original_size.height;
  2215. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  2216. float r = static_cast<float>(width) / height;
  2217. height = static_cast<int>(scale_resolution / std::sqrt(r));
  2218. width = static_cast<int>(height * r);
  2219. }
  2220. clip_image_size res;
  2221. res.width = ensure_divide(width, patch_size);
  2222. res.height = ensure_divide(height, patch_size);
  2223. return res;
  2224. }
  2225. static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
  2226. float scale_width = static_cast<float>(target_max.width) / orig.width;
  2227. float scale_height = static_cast<float>(target_max.height) / orig.height;
  2228. float scale = std::min(scale_width, scale_height);
  2229. return clip_image_size{
  2230. static_cast<int>(orig.width * scale),
  2231. static_cast<int>(orig.height * scale),
  2232. };
  2233. }
  2234. /**
  2235. * Selects the best resolution from a list of possible resolutions based on the original size.
  2236. *
  2237. * For example, when given a list of resolutions:
  2238. * - 100x100
  2239. * - 200x100
  2240. * - 100x200
  2241. * - 200x200
  2242. *
  2243. * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
  2244. *
  2245. * @param original_size The original size of the image
  2246. * @param possible_resolutions A list of possible resolutions
  2247. * @return The best fit resolution
  2248. */
  2249. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  2250. clip_image_size best_fit;
  2251. int min_wasted_area = std::numeric_limits<int>::max();
  2252. int max_effective_resolution = 0;
  2253. for (const clip_image_size & candidate : possible_resolutions) {
  2254. auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
  2255. int effective_resolution = std::min(
  2256. target_size.width * target_size.height,
  2257. original_size.width * original_size.height);
  2258. int wasted_area = (candidate.width * candidate.height) - effective_resolution;
  2259. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
  2260. max_effective_resolution = effective_resolution;
  2261. min_wasted_area = wasted_area;
  2262. best_fit = candidate;
  2263. }
  2264. LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
  2265. }
  2266. return best_fit;
  2267. }
  2268. static int ensure_divide(int length, int patch_size) {
  2269. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  2270. }
  2271. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2272. int width = original_size.width;
  2273. int height = original_size.height;
  2274. int grid_x = grid.width;
  2275. int grid_y = grid.height;
  2276. int refine_width = ensure_divide(width, grid_x);
  2277. int refine_height = ensure_divide(height, grid_y);
  2278. clip_image_size grid_size;
  2279. grid_size.width = refine_width / grid_x;
  2280. grid_size.height = refine_height / grid_y;
  2281. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  2282. int best_grid_width = best_grid_size.width;
  2283. int best_grid_height = best_grid_size.height;
  2284. clip_image_size refine_size;
  2285. refine_size.width = best_grid_width * grid_x;
  2286. refine_size.height = best_grid_height * grid_y;
  2287. return refine_size;
  2288. }
  2289. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  2290. std::vector<int> candidate_split_grids_nums;
  2291. for (int i : {multiple - 1, multiple, multiple + 1}) {
  2292. if (i == 1 || i > max_slice_nums) {
  2293. continue;
  2294. }
  2295. candidate_split_grids_nums.push_back(i);
  2296. }
  2297. std::vector<clip_image_size> candidate_grids;
  2298. for (int split_grids_nums : candidate_split_grids_nums) {
  2299. int m = 1;
  2300. while (m <= split_grids_nums) {
  2301. if (split_grids_nums % m == 0) {
  2302. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  2303. }
  2304. ++m;
  2305. }
  2306. }
  2307. clip_image_size best_grid{1, 1};
  2308. float min_error = std::numeric_limits<float>::infinity();
  2309. for (const auto& grid : candidate_grids) {
  2310. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  2311. if (error < min_error) {
  2312. best_grid = grid;
  2313. min_error = error;
  2314. }
  2315. }
  2316. return best_grid;
  2317. }
  2318. };
  2319. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  2320. // res_imgs memory is being allocated here, previous allocations will be freed if found
  2321. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  2322. clip_image_size original_size{img->nx, img->ny};
  2323. auto & params = ctx->model.hparams;
  2324. switch (ctx->proj_type()) {
  2325. case PROJECTOR_TYPE_MINICPMV:
  2326. {
  2327. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2328. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2329. for (size_t i = 0; i < imgs.size(); ++i) {
  2330. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2331. clip_image_f32_ptr res(clip_image_f32_init());
  2332. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2333. res_imgs->entries.push_back(std::move(res));
  2334. }
  2335. res_imgs->grid_x = inst.grid_size.width;
  2336. res_imgs->grid_y = inst.grid_size.height;
  2337. } break;
  2338. case PROJECTOR_TYPE_QWEN2VL:
  2339. case PROJECTOR_TYPE_QWEN25VL:
  2340. case PROJECTOR_TYPE_QWEN3VL:
  2341. case PROJECTOR_TYPE_GLM4V:
  2342. {
  2343. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  2344. clip_image_u8 resized;
  2345. const clip_image_size new_size = img_tool::calc_size_preserved_ratio(
  2346. original_size,
  2347. params.patch_size * 2,
  2348. params.image_min_pixels,
  2349. params.image_max_pixels);
  2350. img_tool::resize(*img, resized, new_size, img_tool::RESIZE_ALGO_BILINEAR, false);
  2351. // clip_image_save_to_bmp(resized, "preproc.bmp");
  2352. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2353. // clip_image_f32_ptr res(clip_image_f32_init());
  2354. normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
  2355. // res_imgs->data[0] = *res;
  2356. res_imgs->entries.push_back(std::move(img_f32));
  2357. } break;
  2358. case PROJECTOR_TYPE_IDEFICS3:
  2359. {
  2360. // The refined size has two steps:
  2361. // 1. Resize w/ aspect-ratio preserving such that the longer side is
  2362. // the preprocessor longest size
  2363. // 2. Resize w/out preserving aspect ratio such that both sides are
  2364. // multiples of image_size (always rounding up)
  2365. //
  2366. // CITE: https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics3/image_processing_idefics3.py#L737
  2367. const clip_image_size refined_size = img_tool::calc_size_preserved_ratio(
  2368. original_size, params.image_size, params.image_longest_edge);
  2369. // LOG_INF("%s: original size: %d x %d, refined size: %d x %d\n",
  2370. // __func__, original_size.width, original_size.height,
  2371. // refined_size.width, refined_size.height);
  2372. llava_uhd::slice_instructions instructions;
  2373. instructions.overview_size = clip_image_size{params.image_size, params.image_size};
  2374. instructions.refined_size = refined_size;
  2375. instructions.grid_size = clip_image_size{
  2376. static_cast<int>(std::ceil(static_cast<float>(refined_size.width) / params.image_size)),
  2377. static_cast<int>(std::ceil(static_cast<float>(refined_size.height) / params.image_size)),
  2378. };
  2379. for (int y = 0; y < refined_size.height; y += params.image_size) {
  2380. for (int x = 0; x < refined_size.width; x += params.image_size) {
  2381. // LOG_INF("%s: adding slice at x=%d, y=%d\n", __func__, x, y);
  2382. instructions.slices.push_back(llava_uhd::slice_coordinates{
  2383. /* x */x,
  2384. /* y */y,
  2385. /* size */clip_image_size{
  2386. std::min(params.image_size, refined_size.width - x),
  2387. std::min(params.image_size, refined_size.height - y)
  2388. }
  2389. });
  2390. }
  2391. }
  2392. auto imgs = llava_uhd::slice_image(img, instructions);
  2393. // cast and normalize to f32
  2394. for (size_t i = 0; i < imgs.size(); ++i) {
  2395. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2396. clip_image_f32_ptr res(clip_image_f32_init());
  2397. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2398. res_imgs->entries.push_back(std::move(res));
  2399. }
  2400. res_imgs->grid_x = instructions.grid_size.width;
  2401. res_imgs->grid_y = instructions.grid_size.height;
  2402. } break;
  2403. case PROJECTOR_TYPE_GLM_EDGE:
  2404. case PROJECTOR_TYPE_GEMMA3:
  2405. case PROJECTOR_TYPE_INTERNVL: // TODO @ngxson : support dynamic resolution
  2406. {
  2407. clip_image_u8 resized_image;
  2408. int sz = params.image_size;
  2409. img_tool::resize(*img, resized_image, {sz, sz}, img_tool::RESIZE_ALGO_BILINEAR);
  2410. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2411. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  2412. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2413. res_imgs->entries.push_back(std::move(img_f32));
  2414. } break;
  2415. case PROJECTOR_TYPE_JANUS_PRO:
  2416. {
  2417. // Janus Pro preprocessing: pad to square with gray(127), resize to 384x384
  2418. const std::array<uint8_t, 3> pad_color = {127, 127, 127};
  2419. clip_image_u8 resized_image;
  2420. int sz = params.image_size;
  2421. img_tool::resize(*img, resized_image, {sz, sz}, img_tool::RESIZE_ALGO_BILINEAR, true, pad_color);
  2422. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2423. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2424. res_imgs->entries.push_back(std::move(img_f32));
  2425. } break;
  2426. case PROJECTOR_TYPE_PIXTRAL:
  2427. case PROJECTOR_TYPE_LIGHTONOCR:
  2428. {
  2429. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  2430. clip_image_u8 resized_image;
  2431. // the original pixtral model doesn't have n_merge
  2432. const int cur_merge = params.n_merge == 0 ? 1 : params.n_merge;
  2433. const clip_image_size target_size = img_tool::calc_size_preserved_ratio(
  2434. original_size,
  2435. params.patch_size * cur_merge,
  2436. params.image_min_pixels,
  2437. params.image_max_pixels);
  2438. img_tool::resize(*img, resized_image, target_size, img_tool::RESIZE_ALGO_BILINEAR);
  2439. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2440. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2441. res_imgs->entries.push_back(std::move(img_f32));
  2442. } break;
  2443. case PROJECTOR_TYPE_LLAMA4:
  2444. {
  2445. GGML_ASSERT(!params.image_res_candidates.empty());
  2446. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2447. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2448. for (size_t i = 0; i < imgs.size(); ++i) {
  2449. clip_image_f32_ptr res(clip_image_f32_init());
  2450. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2451. res_imgs->entries.push_back(std::move(res));
  2452. }
  2453. res_imgs->grid_x = inst.grid_size.width;
  2454. res_imgs->grid_y = inst.grid_size.height;
  2455. } break;
  2456. case PROJECTOR_TYPE_LFM2:
  2457. case PROJECTOR_TYPE_KIMIVL:
  2458. {
  2459. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  2460. const clip_image_size target_size = img_tool::calc_size_preserved_ratio(
  2461. original_size,
  2462. params.patch_size * params.n_merge,
  2463. params.image_min_pixels,
  2464. params.image_max_pixels);
  2465. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2466. clip_image_u8 resized_img;
  2467. const bool pad = (ctx->proj_type() != PROJECTOR_TYPE_LFM2);
  2468. img_tool::resize(*img, resized_img, target_size, img_tool::RESIZE_ALGO_BILINEAR, pad, pad_color);
  2469. clip_image_f32_ptr res(clip_image_f32_init());
  2470. normalize_image_u8_to_f32(resized_img, *res, params.image_mean, params.image_std);
  2471. res_imgs->entries.push_back(std::move(res));
  2472. } break;
  2473. case PROJECTOR_TYPE_MLP:
  2474. case PROJECTOR_TYPE_MLP_NORM:
  2475. case PROJECTOR_TYPE_LDP:
  2476. case PROJECTOR_TYPE_LDPV2:
  2477. case PROJECTOR_TYPE_COGVLM: // TODO @ngxson : is this correct for cogvlm?
  2478. {
  2479. // TODO @ngxson : refactor the code below to avoid duplicated logic
  2480. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  2481. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2482. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  2483. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  2484. if (params.image_res_candidates.empty()) { // pad_to_square
  2485. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  2486. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2487. const int longer_side = std::max(img->nx, img->ny);
  2488. temp->nx = longer_side;
  2489. temp->ny = longer_side;
  2490. temp->buf.resize(3 * longer_side * longer_side);
  2491. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  2492. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2493. // resize the image to the target_size
  2494. img_tool::resize(*img, *temp, clip_image_size{params.image_size, params.image_size}, img_tool::RESIZE_ALGO_BILINEAR, true, pad_color);
  2495. clip_image_f32_ptr res(clip_image_f32_init());
  2496. normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
  2497. res_imgs->entries.push_back(std::move(res));
  2498. } else {
  2499. // "spatial_unpad" with "anyres" processing for llava-1.6
  2500. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2501. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2502. for (size_t i = 0; i < imgs.size(); ++i) {
  2503. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2504. clip_image_f32_ptr res(clip_image_f32_init());
  2505. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2506. res_imgs->entries.push_back(std::move(res));
  2507. }
  2508. }
  2509. } break;
  2510. default:
  2511. LOG_ERR("%s: unsupported projector type %d\n", __func__, ctx->proj_type());
  2512. return false;
  2513. }
  2514. return true;
  2515. }
  2516. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  2517. return ctx->model.image_newline;
  2518. }
  2519. void clip_free(clip_ctx * ctx) {
  2520. if (ctx == nullptr) {
  2521. return;
  2522. }
  2523. delete ctx;
  2524. }
  2525. // deprecated
  2526. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  2527. const int32_t nx = ctx->model.hparams.image_size;
  2528. const int32_t ny = ctx->model.hparams.image_size;
  2529. return clip_embd_nbytes_by_img(ctx, nx, ny);
  2530. }
  2531. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  2532. clip_image_f32 img;
  2533. img.nx = img_w;
  2534. img.ny = img_h;
  2535. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2536. }
  2537. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  2538. return ctx->model.hparams.image_size;
  2539. }
  2540. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  2541. return ctx->model.hparams.patch_size;
  2542. }
  2543. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  2544. return ctx->model.hparams.n_embd;
  2545. }
  2546. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  2547. return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  2548. }
  2549. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2550. const auto & params = ctx->model.hparams;
  2551. const int n_total = clip_n_output_tokens(ctx, img);
  2552. const auto & proj = ctx->proj_type();
  2553. switch (proj) {
  2554. case PROJECTOR_TYPE_QWEN2VL:
  2555. case PROJECTOR_TYPE_QWEN25VL:
  2556. case PROJECTOR_TYPE_QWEN3VL:
  2557. case PROJECTOR_TYPE_GLM4V:
  2558. return (img->nx / params.patch_size) / 2;
  2559. default:
  2560. break;
  2561. }
  2562. return n_total;
  2563. }
  2564. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2565. const auto & params = ctx->model.hparams;
  2566. const auto & proj = ctx->proj_type();
  2567. switch (proj) {
  2568. case PROJECTOR_TYPE_QWEN2VL:
  2569. case PROJECTOR_TYPE_QWEN25VL:
  2570. case PROJECTOR_TYPE_QWEN3VL:
  2571. case PROJECTOR_TYPE_GLM4V:
  2572. return (img->ny / params.patch_size) / 2;
  2573. default:
  2574. break;
  2575. }
  2576. return 1;
  2577. }
  2578. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2579. const auto & params = ctx->model.hparams;
  2580. // for models with fixed size image, the input image is already pre-processed and resized to square
  2581. int patch_size = params.patch_size;
  2582. int n_patches = (img->nx / patch_size) * (img->ny / patch_size);
  2583. projector_type proj = ctx->proj_type();
  2584. switch (proj) {
  2585. case PROJECTOR_TYPE_MLP:
  2586. case PROJECTOR_TYPE_MLP_NORM:
  2587. case PROJECTOR_TYPE_JANUS_PRO:
  2588. {
  2589. // do nothing
  2590. } break;
  2591. case PROJECTOR_TYPE_LDP:
  2592. case PROJECTOR_TYPE_LDPV2:
  2593. case PROJECTOR_TYPE_GLM_EDGE:
  2594. {
  2595. n_patches /= 4;
  2596. if (ctx->model.mm_boi) {
  2597. n_patches += 2; // for BOI and EOI token embeddings
  2598. }
  2599. } break;
  2600. case PROJECTOR_TYPE_MINICPMV:
  2601. {
  2602. // Use actual config value if available, otherwise fall back to hardcoded values
  2603. if (params.minicpmv_query_num > 0) {
  2604. n_patches = params.minicpmv_query_num;
  2605. } else {
  2606. // Fallback to hardcoded values for legacy models
  2607. if (params.minicpmv_version == 2) {
  2608. n_patches = 96;
  2609. } else if (params.minicpmv_version == 3) {
  2610. n_patches = 64;
  2611. } else if (params.minicpmv_version == 4) {
  2612. n_patches = 64;
  2613. } else if (params.minicpmv_version == 5) {
  2614. // MiniCPM-V 4.0
  2615. n_patches = 64;
  2616. } else if (params.minicpmv_version == 6) {
  2617. // MiniCPM-V 4.5
  2618. n_patches = 64;
  2619. } else {
  2620. GGML_ABORT("Unknown minicpmv version");
  2621. }
  2622. }
  2623. } break;
  2624. case PROJECTOR_TYPE_QWEN2VL:
  2625. case PROJECTOR_TYPE_QWEN25VL:
  2626. case PROJECTOR_TYPE_QWEN3VL:
  2627. case PROJECTOR_TYPE_GLM4V:
  2628. {
  2629. // dynamic size (2 conv, so double patch size)
  2630. int x_patch = img->nx / (params.patch_size * 2);
  2631. int y_patch = img->ny / (params.patch_size * 2);
  2632. n_patches = x_patch * y_patch;
  2633. } break;
  2634. case PROJECTOR_TYPE_GEMMA3:
  2635. case PROJECTOR_TYPE_IDEFICS3:
  2636. case PROJECTOR_TYPE_INTERNVL:
  2637. case PROJECTOR_TYPE_LLAMA4:
  2638. {
  2639. // both X and Y are downscaled by the scale factor
  2640. int scale_factor = ctx->model.hparams.n_merge;
  2641. n_patches /= (scale_factor * scale_factor);
  2642. } break;
  2643. case PROJECTOR_TYPE_LFM2:
  2644. case PROJECTOR_TYPE_KIMIVL:
  2645. {
  2646. // dynamic size
  2647. int out_patch_size = params.patch_size * ctx->model.hparams.n_merge;
  2648. int x_patch = CLIP_ALIGN(img->nx, out_patch_size) / out_patch_size;
  2649. int y_patch = CLIP_ALIGN(img->ny, out_patch_size) / out_patch_size;
  2650. n_patches = x_patch * y_patch;
  2651. } break;
  2652. case PROJECTOR_TYPE_PIXTRAL:
  2653. case PROJECTOR_TYPE_LIGHTONOCR:
  2654. {
  2655. // dynamic size
  2656. int n_merge = ctx->model.hparams.n_merge;
  2657. int n_patches_x = img->nx / patch_size / (n_merge > 0 ? n_merge : 1);
  2658. int n_patches_y = img->ny / patch_size / (n_merge > 0 ? n_merge : 1);
  2659. if (ctx->model.token_embd_img_break) {
  2660. n_patches = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  2661. } else {
  2662. n_patches = n_patches_y * n_patches_x;
  2663. }
  2664. } break;
  2665. case PROJECTOR_TYPE_VOXTRAL:
  2666. case PROJECTOR_TYPE_ULTRAVOX:
  2667. case PROJECTOR_TYPE_QWEN2A:
  2668. {
  2669. n_patches = img->nx;
  2670. const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
  2671. if (ctx->model.audio_has_stack_frames()) {
  2672. GGML_ASSERT(proj_stack_factor > 0);
  2673. const int n_len = CLIP_ALIGN(n_patches, proj_stack_factor);
  2674. n_patches = n_len / proj_stack_factor;
  2675. }
  2676. // whisper downscales input token by half after conv1d
  2677. n_patches /= 2;
  2678. if (ctx->model.audio_has_avgpool()) {
  2679. // divide by 2 because of nn.AvgPool1d(2, stride=2)
  2680. n_patches /= 2;
  2681. }
  2682. } break;
  2683. case PROJECTOR_TYPE_GLMA:
  2684. {
  2685. n_patches = img->nx;
  2686. // whisper downscales input token by half after conv1d
  2687. n_patches /= 2;
  2688. // reshape by merge_factor
  2689. n_patches /= ctx->model.hparams.proj_stack_factor;
  2690. // for BOI and EOI token embeddings
  2691. n_patches += 2;
  2692. } break;
  2693. case PROJECTOR_TYPE_COGVLM:
  2694. {
  2695. n_patches += 2; // for BOI and EOI token embeddings
  2696. } break;
  2697. case PROJECTOR_TYPE_LFM2A:
  2698. {
  2699. n_patches = ((((img->nx + 1) / 2) + 1) / 2 + 1) / 2;
  2700. } break;
  2701. default:
  2702. GGML_ABORT("unsupported projector type");
  2703. }
  2704. return n_patches;
  2705. }
  2706. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  2707. clip_image_f32_batch imgs;
  2708. clip_image_f32_ptr img_copy(clip_image_f32_init());
  2709. *img_copy = *img;
  2710. imgs.entries.push_back(std::move(img_copy));
  2711. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  2712. }
  2713. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  2714. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  2715. int batch_size = imgs.entries.size();
  2716. // TODO @ngxson : implement batch size > 1 as a loop
  2717. // we don't need true batching support because the cgraph will gonna be big anyway
  2718. if (batch_size != 1) {
  2719. return false; // only support batch size of 1
  2720. }
  2721. // if buffers are not allocated, we need to do a warmup run to allocate them
  2722. if (!ctx->is_allocated) {
  2723. clip_model_loader::warmup(*ctx, *imgs_c_ptr);
  2724. }
  2725. // build the inference graph
  2726. ctx->debug_print_tensors.clear();
  2727. ggml_backend_sched_reset(ctx->sched.get());
  2728. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  2729. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  2730. // set inputs
  2731. const auto & model = ctx->model;
  2732. const auto & hparams = model.hparams;
  2733. const int image_size_width = imgs.entries[0]->nx;
  2734. const int image_size_height = imgs.entries[0]->ny;
  2735. const int patch_size = hparams.patch_size;
  2736. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  2737. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  2738. const int pos_w = image_size_width / patch_size;
  2739. const int pos_h = image_size_height / patch_size;
  2740. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  2741. auto get_inp_tensor = [&gf](const char * name) {
  2742. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  2743. if (inp == nullptr) {
  2744. GGML_ABORT("Failed to get tensor %s", name);
  2745. }
  2746. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  2747. GGML_ABORT("Tensor %s is not an input tensor", name);
  2748. }
  2749. return inp;
  2750. };
  2751. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  2752. ggml_tensor * cur = get_inp_tensor(name);
  2753. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  2754. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2755. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2756. };
  2757. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  2758. ggml_tensor * cur = get_inp_tensor(name);
  2759. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  2760. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2761. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2762. };
  2763. // set input pixel values
  2764. if (!imgs.is_audio) {
  2765. size_t nelem = 0;
  2766. for (const auto & img : imgs.entries) {
  2767. nelem += img->nx * img->ny * 3;
  2768. }
  2769. std::vector<float> inp_raw(nelem);
  2770. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  2771. //
  2772. // ┌──W──┐
  2773. // │ H │ channel = R
  2774. // ├─────┤ │
  2775. // │ H │ channel = G
  2776. // ├─────┤ │
  2777. // │ H │ channel = B
  2778. // └─────┘ │
  2779. // ──────┘ x B
  2780. for (size_t i = 0; i < imgs.entries.size(); i++) {
  2781. const int nx = imgs.entries[i]->nx;
  2782. const int ny = imgs.entries[i]->ny;
  2783. const int n = nx * ny;
  2784. for (int b = 0; b < batch_size; b++) {
  2785. float * batch_entry = inp_raw.data() + b * (3*n);
  2786. for (int y = 0; y < ny; y++) {
  2787. for (int x = 0; x < nx; x++) {
  2788. size_t base_src = 3*(y * nx + x); // idx of the first channel
  2789. size_t base_dst = y * nx + x; // idx of the first channel
  2790. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  2791. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  2792. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  2793. }
  2794. }
  2795. }
  2796. }
  2797. set_input_f32("inp_raw", inp_raw);
  2798. } else {
  2799. // audio input
  2800. GGML_ASSERT(imgs.entries.size() == 1);
  2801. const auto & mel_inp = imgs.entries[0];
  2802. const int n_step = mel_inp->nx;
  2803. const int n_mel = mel_inp->ny;
  2804. std::vector<float> inp_raw(n_step * n_mel);
  2805. std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
  2806. set_input_f32("inp_raw", inp_raw);
  2807. }
  2808. // set input per projector
  2809. switch (ctx->model.proj_type) {
  2810. case PROJECTOR_TYPE_MINICPMV:
  2811. {
  2812. // inspired from siglip:
  2813. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  2814. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  2815. std::vector<int32_t> positions(pos_h * pos_w);
  2816. int bucket_coords_h[1024];
  2817. int bucket_coords_w[1024];
  2818. for (int i = 0; i < pos_h; i++){
  2819. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  2820. }
  2821. for (int i = 0; i < pos_w; i++){
  2822. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  2823. }
  2824. for (int i = 0, id = 0; i < pos_h; i++){
  2825. for (int j = 0; j < pos_w; j++){
  2826. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  2827. }
  2828. }
  2829. set_input_i32("positions", positions);
  2830. // inputs for resampler projector
  2831. // set the 2D positions (using float for sinusoidal embedding)
  2832. int n_patches_per_col = image_size_width / patch_size;
  2833. std::vector<float> pos_data(n_pos);
  2834. // dimension H
  2835. for (int i = 0; i < n_pos; i++) {
  2836. pos_data[i] = static_cast<float>(i / n_patches_per_col);
  2837. }
  2838. set_input_f32("pos_h", pos_data);
  2839. // dimension W
  2840. for (int i = 0; i < n_pos; i++) {
  2841. pos_data[i] = static_cast<float>(i % n_patches_per_col);
  2842. }
  2843. set_input_f32("pos_w", pos_data);
  2844. // base frequency omega
  2845. const float base_freq = 10000.0f;
  2846. const int n_embd_proj = clip_n_mmproj_embd(ctx);
  2847. std::vector<float> omega(n_embd_proj / 4);
  2848. for (int i = 0; i < n_embd_proj / 4; ++i) {
  2849. omega[i] = 1.0f / std::pow(base_freq, static_cast<float>(i) / (n_embd_proj / 4));
  2850. }
  2851. set_input_f32("omega", omega);
  2852. } break;
  2853. case PROJECTOR_TYPE_QWEN2VL:
  2854. case PROJECTOR_TYPE_QWEN3VL:
  2855. case PROJECTOR_TYPE_GLM4V:
  2856. {
  2857. const int merge_ratio = hparams.n_merge;
  2858. const int pw = image_size_width / patch_size;
  2859. const int ph = image_size_height / patch_size;
  2860. std::vector<int> positions(n_pos * 4);
  2861. int ptr = 0;
  2862. for (int y = 0; y < ph; y += merge_ratio) {
  2863. for (int x = 0; x < pw; x += merge_ratio) {
  2864. for (int dy = 0; dy < 2; dy++) {
  2865. for (int dx = 0; dx < 2; dx++) {
  2866. positions[ ptr] = y + dy;
  2867. positions[ num_patches + ptr] = x + dx;
  2868. positions[2 * num_patches + ptr] = y + dy;
  2869. positions[3 * num_patches + ptr] = x + dx;
  2870. ptr++;
  2871. }
  2872. }
  2873. }
  2874. }
  2875. set_input_i32("positions", positions);
  2876. } break;
  2877. case PROJECTOR_TYPE_QWEN25VL:
  2878. {
  2879. // pw * ph = number of tokens output by ViT after apply patch merger
  2880. // ipw * ipw = number of vision token been processed inside ViT
  2881. const int merge_ratio = 2;
  2882. const int pw = image_size_width / patch_size / merge_ratio;
  2883. const int ph = image_size_height / patch_size / merge_ratio;
  2884. const int ipw = image_size_width / patch_size;
  2885. const int iph = image_size_height / patch_size;
  2886. std::vector<int> idx (ph * pw);
  2887. std::vector<int> inv_idx(ph * pw);
  2888. if (use_window_attn) {
  2889. const int attn_window_size = 112;
  2890. const int grid_window = attn_window_size / patch_size / merge_ratio;
  2891. int dst = 0;
  2892. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  2893. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  2894. int mask_row = 0;
  2895. for (int y = 0; y < ph; y += grid_window) {
  2896. for (int x = 0; x < pw; x += grid_window) {
  2897. const int win_h = std::min(grid_window, ph - y);
  2898. const int win_w = std::min(grid_window, pw - x);
  2899. const int dst_0 = dst;
  2900. // group all tokens belong to the same window togather (to a continue range)
  2901. for (int dy = 0; dy < win_h; dy++) {
  2902. for (int dx = 0; dx < win_w; dx++) {
  2903. const int src = (y + dy) * pw + (x + dx);
  2904. GGML_ASSERT(src < (int)idx.size());
  2905. GGML_ASSERT(dst < (int)inv_idx.size());
  2906. idx [src] = dst;
  2907. inv_idx[dst] = src;
  2908. dst++;
  2909. }
  2910. }
  2911. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  2912. int row_offset = mask_row * (ipw * iph);
  2913. std::fill(
  2914. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  2915. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  2916. 0.0);
  2917. mask_row++;
  2918. }
  2919. }
  2920. }
  2921. set_input_i32("window_idx", idx);
  2922. set_input_i32("inv_window_idx", inv_idx);
  2923. set_input_f32("window_mask", mask);
  2924. } else {
  2925. for (int i = 0; i < ph * pw; i++) {
  2926. idx[i] = i;
  2927. }
  2928. }
  2929. const int mpow = merge_ratio * merge_ratio;
  2930. std::vector<int> positions(n_pos * 4);
  2931. int ptr = 0;
  2932. for (int y = 0; y < iph; y += merge_ratio) {
  2933. for (int x = 0; x < ipw; x += merge_ratio) {
  2934. for (int dy = 0; dy < 2; dy++) {
  2935. for (int dx = 0; dx < 2; dx++) {
  2936. auto remap = idx[ptr / mpow];
  2937. remap = (remap * mpow) + (ptr % mpow);
  2938. positions[ remap] = y + dy;
  2939. positions[ num_patches + remap] = x + dx;
  2940. positions[2 * num_patches + remap] = y + dy;
  2941. positions[3 * num_patches + remap] = x + dx;
  2942. ptr++;
  2943. }
  2944. }
  2945. }
  2946. }
  2947. set_input_i32("positions", positions);
  2948. } break;
  2949. case PROJECTOR_TYPE_PIXTRAL:
  2950. case PROJECTOR_TYPE_KIMIVL:
  2951. case PROJECTOR_TYPE_LIGHTONOCR:
  2952. {
  2953. // set the 2D positions
  2954. int n_patches_per_col = image_size_width / patch_size;
  2955. std::vector<int> pos_data(n_pos);
  2956. // dimension H
  2957. for (int i = 0; i < n_pos; i++) {
  2958. pos_data[i] = i / n_patches_per_col;
  2959. }
  2960. set_input_i32("pos_h", pos_data);
  2961. // dimension W
  2962. for (int i = 0; i < n_pos; i++) {
  2963. pos_data[i] = i % n_patches_per_col;
  2964. }
  2965. set_input_i32("pos_w", pos_data);
  2966. } break;
  2967. case PROJECTOR_TYPE_GLM_EDGE:
  2968. {
  2969. // llava and other models
  2970. std::vector<int32_t> positions(n_pos);
  2971. for (int i = 0; i < n_pos; i++) {
  2972. positions[i] = i;
  2973. }
  2974. set_input_i32("positions", positions);
  2975. } break;
  2976. case PROJECTOR_TYPE_MLP:
  2977. case PROJECTOR_TYPE_MLP_NORM:
  2978. case PROJECTOR_TYPE_LDP:
  2979. case PROJECTOR_TYPE_LDPV2:
  2980. {
  2981. // llava and other models
  2982. std::vector<int32_t> positions(n_pos);
  2983. for (int i = 0; i < n_pos; i++) {
  2984. positions[i] = i;
  2985. }
  2986. set_input_i32("positions", positions);
  2987. // The patches vector is used to get rows to index into the embeds with;
  2988. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  2989. // when retrieving the rows.
  2990. int patch_offset = model.class_embedding ? 1 : 0;
  2991. std::vector<int32_t> patches(num_patches);
  2992. for (int i = 0; i < num_patches; i++) {
  2993. patches[i] = i + patch_offset;
  2994. }
  2995. set_input_i32("patches", patches);
  2996. } break;
  2997. case PROJECTOR_TYPE_GEMMA3:
  2998. case PROJECTOR_TYPE_IDEFICS3:
  2999. case PROJECTOR_TYPE_INTERNVL:
  3000. case PROJECTOR_TYPE_QWEN2A:
  3001. case PROJECTOR_TYPE_GLMA:
  3002. case PROJECTOR_TYPE_ULTRAVOX:
  3003. case PROJECTOR_TYPE_LFM2:
  3004. case PROJECTOR_TYPE_VOXTRAL:
  3005. case PROJECTOR_TYPE_JANUS_PRO:
  3006. case PROJECTOR_TYPE_COGVLM:
  3007. {
  3008. // do nothing
  3009. } break;
  3010. case PROJECTOR_TYPE_LLAMA4:
  3011. {
  3012. // set the 2D positions
  3013. int n_patches_per_col = image_size_width / patch_size;
  3014. std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
  3015. // last pos is always kept 0, it's for CLS
  3016. // dimension H
  3017. for (int i = 0; i < num_patches; i++) {
  3018. pos_data[i] = (i / n_patches_per_col) + 1;
  3019. }
  3020. set_input_i32("pos_h", pos_data);
  3021. // dimension W
  3022. for (int i = 0; i < num_patches; i++) {
  3023. pos_data[i] = (i % n_patches_per_col) + 1;
  3024. }
  3025. set_input_i32("pos_w", pos_data);
  3026. } break;
  3027. case PROJECTOR_TYPE_LFM2A:
  3028. {
  3029. GGML_ASSERT(imgs.entries.size() == 1);
  3030. const auto n_frames = clip_n_output_tokens(ctx, imgs.entries.front().get());
  3031. auto d_model = 512;
  3032. auto seq_len = n_frames * 2 - 1;
  3033. std::vector<float> pos_emb(d_model*seq_len);
  3034. std::vector<double> inv_freq(d_model / 2);
  3035. for (size_t i = 0; i < inv_freq.size(); ++i) {
  3036. inv_freq[i] = std::exp(-(std::log(10000.0) / (float)d_model) * (2.0f * (float)(i)));
  3037. }
  3038. for (int64_t pos = 0; pos < seq_len; ++pos) {
  3039. for (size_t i = 0; i < inv_freq.size(); ++i) {
  3040. const float ang = (n_frames - pos - 1) * inv_freq[i];
  3041. pos_emb[pos*d_model + 2*i + 0] = sinf(ang); // even
  3042. pos_emb[pos*d_model + 2*i + 1] = cosf(ang); // odd
  3043. }
  3044. }
  3045. set_input_f32("pos_emb", pos_emb);
  3046. } break;
  3047. default:
  3048. GGML_ABORT("Unknown projector type");
  3049. }
  3050. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  3051. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  3052. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  3053. if (reg) {
  3054. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  3055. if (ggml_backend_set_n_threads_fn) {
  3056. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  3057. }
  3058. }
  3059. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  3060. if (status != GGML_STATUS_SUCCESS) {
  3061. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  3062. return false;
  3063. }
  3064. // print debug nodes
  3065. if (ctx->debug_graph) {
  3066. LOG_INF("\n\n---\n\n");
  3067. LOG_INF("\n\nDebug graph:\n\n");
  3068. for (ggml_tensor * t : ctx->debug_print_tensors) {
  3069. std::vector<uint8_t> data(ggml_nbytes(t));
  3070. ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
  3071. print_tensor_shape(t);
  3072. print_tensor_data(t, data.data(), 3);
  3073. }
  3074. }
  3075. // the last node is the embedding tensor
  3076. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  3077. // sanity check (only support batch size of 1 for now)
  3078. const int n_tokens_out = embeddings->ne[1];
  3079. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  3080. if (n_tokens_out != expected_n_tokens_out) {
  3081. LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  3082. GGML_ABORT("Invalid number of output tokens");
  3083. }
  3084. // copy the embeddings to the location passed by the user
  3085. if (vec != nullptr) {
  3086. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  3087. }
  3088. return true;
  3089. }
  3090. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  3091. switch (ctx->model.proj_type) {
  3092. case PROJECTOR_TYPE_LDP:
  3093. return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
  3094. case PROJECTOR_TYPE_LDPV2:
  3095. return ctx->model.mm_model_peg_0_b->ne[0];
  3096. case PROJECTOR_TYPE_MLP:
  3097. case PROJECTOR_TYPE_PIXTRAL:
  3098. case PROJECTOR_TYPE_LIGHTONOCR:
  3099. return ctx->model.mm_2_w->ne[1];
  3100. case PROJECTOR_TYPE_MLP_NORM:
  3101. return ctx->model.mm_3_b->ne[0];
  3102. case PROJECTOR_TYPE_MINICPMV:
  3103. return ctx->model.mm_model_proj->ne[0];
  3104. case PROJECTOR_TYPE_GLM_EDGE:
  3105. return ctx->model.mm_model_mlp_3_w->ne[1];
  3106. case PROJECTOR_TYPE_QWEN2VL:
  3107. case PROJECTOR_TYPE_QWEN25VL:
  3108. case PROJECTOR_TYPE_JANUS_PRO:
  3109. return ctx->model.mm_1_b->ne[0];
  3110. case PROJECTOR_TYPE_QWEN3VL:
  3111. // main path + deepstack paths
  3112. return ctx->model.mm_1_b->ne[0] * (1 + ctx->model.n_deepstack_layers);
  3113. case PROJECTOR_TYPE_GEMMA3:
  3114. return ctx->model.mm_input_proj_w->ne[0];
  3115. case PROJECTOR_TYPE_IDEFICS3:
  3116. return ctx->model.projection->ne[1];
  3117. case PROJECTOR_TYPE_ULTRAVOX:
  3118. case PROJECTOR_TYPE_VOXTRAL:
  3119. return ctx->model.mm_2_w->ne[1];
  3120. case PROJECTOR_TYPE_INTERNVL:
  3121. return ctx->model.mm_3_w->ne[1];
  3122. case PROJECTOR_TYPE_LLAMA4:
  3123. return ctx->model.mm_model_proj->ne[1];
  3124. case PROJECTOR_TYPE_QWEN2A:
  3125. return ctx->model.mm_fc_w->ne[1];
  3126. case PROJECTOR_TYPE_GLMA:
  3127. return ctx->model.mm_2_w->ne[1];
  3128. case PROJECTOR_TYPE_LFM2:
  3129. case PROJECTOR_TYPE_KIMIVL:
  3130. return ctx->model.mm_2_w->ne[1];
  3131. case PROJECTOR_TYPE_COGVLM:
  3132. return ctx->model.mm_4h_to_h_w->ne[1];
  3133. case PROJECTOR_TYPE_LFM2A:
  3134. return ctx->model.position_embeddings->ne[0];
  3135. case PROJECTOR_TYPE_GLM4V:
  3136. return ctx->model.mm_ffn_down_w->ne[1];
  3137. default:
  3138. GGML_ABORT("Unknown projector type");
  3139. }
  3140. }
  3141. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  3142. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
  3143. return ctx->model.hparams.minicpmv_version;
  3144. }
  3145. return 0;
  3146. }
  3147. bool clip_is_glm(const struct clip_ctx * ctx) {
  3148. return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
  3149. }
  3150. bool clip_is_mrope(const struct clip_ctx * ctx) {
  3151. return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
  3152. || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
  3153. || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL
  3154. || ctx->proj_type() == PROJECTOR_TYPE_GLM4V;
  3155. }
  3156. bool clip_is_llava(const struct clip_ctx * ctx) {
  3157. return ctx->model.hparams.has_llava_projector;
  3158. }
  3159. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  3160. return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
  3161. }
  3162. bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
  3163. return ctx->model.modality == CLIP_MODALITY_VISION;
  3164. }
  3165. bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
  3166. return ctx->model.modality == CLIP_MODALITY_AUDIO;
  3167. }
  3168. bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
  3169. return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
  3170. || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A
  3171. || ctx->proj_type() == PROJECTOR_TYPE_GLMA
  3172. || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL;
  3173. }
  3174. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  3175. clip_image_f32 clip_img;
  3176. clip_img.buf.resize(h * w * 3);
  3177. for (int i = 0; i < h*w*3; i++)
  3178. {
  3179. clip_img.buf[i] = img[i];
  3180. }
  3181. clip_img.nx = w;
  3182. clip_img.ny = h;
  3183. clip_image_encode(ctx, n_threads, &clip_img, vec);
  3184. return true;
  3185. }
  3186. //
  3187. // API used internally with mtmd
  3188. //
  3189. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  3190. return ctx->proj_type();
  3191. }
  3192. void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
  3193. clip_image_f32 * audio = new clip_image_f32;
  3194. audio->nx = n_frames;
  3195. audio->ny = n_mel;
  3196. audio->buf.resize(n_frames * n_mel);
  3197. std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
  3198. batch->entries.push_back(clip_image_f32_ptr(audio));
  3199. batch->is_audio = true;
  3200. }
  3201. const clip_hparams * clip_get_hparams(const struct clip_ctx * ctx) {
  3202. return &ctx->model.hparams;
  3203. }
  3204. //
  3205. // API for debugging
  3206. //
  3207. void clip_debug_encode(clip_ctx * ctx, int h, int w, float fill_value) {
  3208. clip_image_f32 img;
  3209. img.nx = w;
  3210. img.ny = h;
  3211. img.buf.resize(h * w * 3);
  3212. for (int i = 0; i < h * w * 3; i++) {
  3213. img.buf[i] = static_cast<float>(fill_value);
  3214. }
  3215. bool cur_debug_graph = ctx->debug_graph;
  3216. ctx->debug_graph = true;
  3217. clip_image_encode(ctx, 1, &img, nullptr);
  3218. ctx->debug_graph = cur_debug_graph;
  3219. GGML_ASSERT(img.buf.empty() && "expected, always stop here");
  3220. }