clip.cpp 158 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-cpu.h"
  10. #include "ggml-alloc.h"
  11. #include "ggml-backend.h"
  12. #include "gguf.h"
  13. #define STB_IMAGE_IMPLEMENTATION
  14. #include "stb_image.h"
  15. #include <cassert>
  16. #include <cmath>
  17. #include <cstdlib>
  18. #include <cstring>
  19. #include <fstream>
  20. #include <map>
  21. #include <regex>
  22. #include <stdexcept>
  23. #include <unordered_set>
  24. #include <vector>
  25. #include <sstream>
  26. #include <cinttypes>
  27. #include <limits>
  28. #include <array>
  29. #include <numeric>
  30. #include <functional>
  31. struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
  32. enum ffn_op_type {
  33. FFN_GELU,
  34. FFN_SILU,
  35. FFN_GELU_QUICK,
  36. };
  37. enum norm_type {
  38. NORM_TYPE_NORMAL,
  39. NORM_TYPE_RMS,
  40. };
  41. //#define CLIP_DEBUG_FUNCTIONS
  42. #ifdef CLIP_DEBUG_FUNCTIONS
  43. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  44. std::ofstream file(filename, std::ios::binary);
  45. if (!file.is_open()) {
  46. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  47. return;
  48. }
  49. // PPM header: P6 format, width, height, and max color value
  50. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  51. // Write pixel data
  52. for (size_t i = 0; i < img.buf.size(); i += 3) {
  53. // PPM expects binary data in RGB format, which matches our image buffer
  54. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  55. }
  56. file.close();
  57. }
  58. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  59. std::ofstream file(filename, std::ios::binary);
  60. if (!file.is_open()) {
  61. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  62. return;
  63. }
  64. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  65. int bytesPerPixel = 3;
  66. int widthInBytes = img.nx * bytesPerPixel;
  67. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  68. int stride = widthInBytes + paddingAmount;
  69. // Bitmap file header
  70. unsigned char fileHeader[14] = {
  71. 'B','M', // Signature
  72. 0,0,0,0, // Image file size in bytes
  73. 0,0,0,0, // Reserved
  74. 54,0,0,0 // Start of pixel array
  75. };
  76. // Total file size
  77. fileSize = 54 + (stride * img.ny);
  78. fileHeader[2] = (unsigned char)(fileSize);
  79. fileHeader[3] = (unsigned char)(fileSize >> 8);
  80. fileHeader[4] = (unsigned char)(fileSize >> 16);
  81. fileHeader[5] = (unsigned char)(fileSize >> 24);
  82. // Bitmap information header (BITMAPINFOHEADER)
  83. unsigned char infoHeader[40] = {
  84. 40,0,0,0, // Size of this header (40 bytes)
  85. 0,0,0,0, // Image width
  86. 0,0,0,0, // Image height
  87. 1,0, // Number of color planes
  88. 24,0, // Bits per pixel
  89. 0,0,0,0, // No compression
  90. 0,0,0,0, // Image size (can be 0 for no compression)
  91. 0,0,0,0, // X pixels per meter (not specified)
  92. 0,0,0,0, // Y pixels per meter (not specified)
  93. 0,0,0,0, // Total colors (color table not used)
  94. 0,0,0,0 // Important colors (all are important)
  95. };
  96. // Width and height in the information header
  97. infoHeader[4] = (unsigned char)(img.nx);
  98. infoHeader[5] = (unsigned char)(img.nx >> 8);
  99. infoHeader[6] = (unsigned char)(img.nx >> 16);
  100. infoHeader[7] = (unsigned char)(img.nx >> 24);
  101. infoHeader[8] = (unsigned char)(img.ny);
  102. infoHeader[9] = (unsigned char)(img.ny >> 8);
  103. infoHeader[10] = (unsigned char)(img.ny >> 16);
  104. infoHeader[11] = (unsigned char)(img.ny >> 24);
  105. // Write file headers
  106. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  107. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  108. // Pixel data
  109. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  110. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  111. for (int x = 0; x < img.nx; ++x) {
  112. // Each pixel
  113. size_t pixelIndex = (y * img.nx + x) * 3;
  114. unsigned char pixel[3] = {
  115. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  116. img.buf[pixelIndex + 1],
  117. img.buf[pixelIndex]
  118. };
  119. file.write(reinterpret_cast<char*>(pixel), 3);
  120. }
  121. // Write padding for the row
  122. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  123. }
  124. file.close();
  125. }
  126. // debug function to convert f32 to u8
  127. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  128. dst.nx = src.nx;
  129. dst.ny = src.ny;
  130. dst.buf.resize(3 * src.nx * src.ny);
  131. for (size_t i = 0; i < src.buf.size(); ++i) {
  132. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  133. }
  134. }
  135. #endif
  136. //
  137. // clip layers
  138. //
  139. enum patch_merge_type {
  140. PATCH_MERGE_FLAT,
  141. PATCH_MERGE_SPATIAL_UNPAD,
  142. };
  143. struct clip_hparams {
  144. int32_t image_size;
  145. int32_t patch_size;
  146. int32_t n_embd;
  147. int32_t n_ff;
  148. int32_t projection_dim;
  149. int32_t n_head;
  150. int32_t n_layer;
  151. int32_t proj_scale_factor = 0; // idefics3
  152. // for models using dynamic image size, we need to have a smaller image size to warmup
  153. // otherwise, user will get OOM everytime they load the model
  154. int32_t warmup_image_size = 0;
  155. ffn_op_type ffn_op = FFN_GELU;
  156. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  157. float eps = 1e-6;
  158. float rope_theta = 0.0;
  159. std::vector<int32_t> image_grid_pinpoints;
  160. int32_t image_crop_resolution;
  161. std::unordered_set<int32_t> vision_feature_layer;
  162. int32_t attn_window_size = 0;
  163. int32_t n_wa_pattern = 0;
  164. int32_t spatial_merge_size = 0;
  165. };
  166. struct clip_layer {
  167. // attention
  168. ggml_tensor * k_w = nullptr;
  169. ggml_tensor * k_b = nullptr;
  170. ggml_tensor * q_w = nullptr;
  171. ggml_tensor * q_b = nullptr;
  172. ggml_tensor * v_w = nullptr;
  173. ggml_tensor * v_b = nullptr;
  174. ggml_tensor * o_w = nullptr;
  175. ggml_tensor * o_b = nullptr;
  176. ggml_tensor * k_norm = nullptr;
  177. ggml_tensor * q_norm = nullptr;
  178. // layernorm 1
  179. ggml_tensor * ln_1_w = nullptr;
  180. ggml_tensor * ln_1_b = nullptr;
  181. ggml_tensor * ff_up_w = nullptr;
  182. ggml_tensor * ff_up_b = nullptr;
  183. ggml_tensor * ff_gate_w = nullptr;
  184. ggml_tensor * ff_gate_b = nullptr;
  185. ggml_tensor * ff_down_w = nullptr;
  186. ggml_tensor * ff_down_b = nullptr;
  187. // layernorm 2
  188. ggml_tensor * ln_2_w = nullptr;
  189. ggml_tensor * ln_2_b = nullptr;
  190. // layer scale (no bias)
  191. ggml_tensor * ls_1_w = nullptr;
  192. ggml_tensor * ls_2_w = nullptr;
  193. };
  194. struct clip_vision_model {
  195. struct clip_hparams hparams;
  196. // embeddings
  197. ggml_tensor * class_embedding = nullptr;
  198. ggml_tensor * patch_embeddings_0 = nullptr;
  199. ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  200. ggml_tensor * patch_bias = nullptr;
  201. ggml_tensor * position_embeddings = nullptr;
  202. ggml_tensor * pre_ln_w = nullptr;
  203. ggml_tensor * pre_ln_b = nullptr;
  204. std::vector<clip_layer> layers;
  205. ggml_tensor * post_ln_w;
  206. ggml_tensor * post_ln_b;
  207. ggml_tensor * projection;
  208. // LLaVA projection
  209. ggml_tensor * mm_input_norm_w = nullptr;
  210. ggml_tensor * mm_0_w = nullptr;
  211. ggml_tensor * mm_0_b = nullptr;
  212. ggml_tensor * mm_2_w = nullptr;
  213. ggml_tensor * mm_2_b = nullptr;
  214. ggml_tensor * image_newline = nullptr;
  215. // Yi type models with mlp+normalization projection
  216. ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  217. ggml_tensor * mm_1_b = nullptr;
  218. ggml_tensor * mm_3_w = nullptr;
  219. ggml_tensor * mm_3_b = nullptr;
  220. ggml_tensor * mm_4_w = nullptr;
  221. ggml_tensor * mm_4_b = nullptr;
  222. // GLMV-Edge projection
  223. ggml_tensor * mm_model_adapter_conv_w = nullptr;
  224. ggml_tensor * mm_model_adapter_conv_b = nullptr;
  225. ggml_tensor * mm_glm_tok_boi = nullptr;
  226. ggml_tensor * mm_glm_tok_eoi = nullptr;
  227. // MobileVLM projection
  228. ggml_tensor * mm_model_mlp_1_w = nullptr;
  229. ggml_tensor * mm_model_mlp_1_b = nullptr;
  230. ggml_tensor * mm_model_mlp_3_w = nullptr;
  231. ggml_tensor * mm_model_mlp_3_b = nullptr;
  232. ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  233. ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  234. ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  235. ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  236. ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  237. ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  238. ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  239. ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  240. ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  241. ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  242. ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  243. ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  244. ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  245. ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  246. ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  247. ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  248. ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  249. ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  250. ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  251. ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  252. // MobileVLM_V2 projection
  253. ggml_tensor * mm_model_mlp_0_w = nullptr;
  254. ggml_tensor * mm_model_mlp_0_b = nullptr;
  255. ggml_tensor * mm_model_mlp_2_w = nullptr;
  256. ggml_tensor * mm_model_mlp_2_b = nullptr;
  257. ggml_tensor * mm_model_peg_0_w = nullptr;
  258. ggml_tensor * mm_model_peg_0_b = nullptr;
  259. // MINICPMV projection
  260. ggml_tensor * mm_model_pos_embed_k = nullptr;
  261. ggml_tensor * mm_model_query = nullptr;
  262. ggml_tensor * mm_model_proj = nullptr;
  263. ggml_tensor * mm_model_kv_proj = nullptr;
  264. ggml_tensor * mm_model_attn_q_w = nullptr;
  265. ggml_tensor * mm_model_attn_q_b = nullptr;
  266. ggml_tensor * mm_model_attn_k_w = nullptr;
  267. ggml_tensor * mm_model_attn_k_b = nullptr;
  268. ggml_tensor * mm_model_attn_v_w = nullptr;
  269. ggml_tensor * mm_model_attn_v_b = nullptr;
  270. ggml_tensor * mm_model_attn_o_w = nullptr;
  271. ggml_tensor * mm_model_attn_o_b = nullptr;
  272. ggml_tensor * mm_model_ln_q_w = nullptr;
  273. ggml_tensor * mm_model_ln_q_b = nullptr;
  274. ggml_tensor * mm_model_ln_kv_w = nullptr;
  275. ggml_tensor * mm_model_ln_kv_b = nullptr;
  276. ggml_tensor * mm_model_ln_post_w = nullptr;
  277. ggml_tensor * mm_model_ln_post_b = nullptr;
  278. // gemma3
  279. ggml_tensor * mm_input_proj_w = nullptr;
  280. ggml_tensor * mm_soft_emb_norm_w = nullptr;
  281. // pixtral
  282. ggml_tensor * token_embd_img_break = nullptr;
  283. ggml_tensor * mm_patch_merger_w = nullptr;
  284. };
  285. struct clip_ctx {
  286. bool has_llava_projector = false;
  287. int minicpmv_version = 0;
  288. struct clip_vision_model vision_model;
  289. projector_type proj_type = PROJECTOR_TYPE_MLP;
  290. float image_mean[3];
  291. float image_std[3];
  292. gguf_context_ptr ctx_gguf;
  293. ggml_context_ptr ctx_data;
  294. std::vector<uint8_t> buf_compute_meta;
  295. std::vector<ggml_backend_t> backend_ptrs;
  296. std::vector<ggml_backend_buffer_type_t> backend_buft;
  297. ggml_backend_t backend;
  298. ggml_backend_t backend_cpu;
  299. ggml_backend_buffer_ptr buf;
  300. int max_nodes = 8192;
  301. ggml_backend_sched_ptr sched;
  302. clip_image_size load_image_size;
  303. clip_ctx(clip_context_params & ctx_params) {
  304. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  305. if (!backend_cpu) {
  306. throw std::runtime_error("failed to initialize CPU backend");
  307. }
  308. backend = ctx_params.use_gpu
  309. ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
  310. : nullptr;
  311. if (backend) {
  312. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  313. backend_ptrs.push_back(backend);
  314. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  315. } else {
  316. backend = backend_cpu;
  317. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  318. }
  319. backend_ptrs.push_back(backend_cpu);
  320. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  321. sched.reset(
  322. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  323. );
  324. }
  325. ~clip_ctx() {
  326. ggml_backend_free(backend);
  327. if (backend != backend_cpu) {
  328. ggml_backend_free(backend_cpu);
  329. }
  330. }
  331. };
  332. struct clip_graph {
  333. clip_ctx * ctx;
  334. const clip_vision_model & model;
  335. const clip_hparams & hparams;
  336. // we only support single image per batch
  337. const clip_image_f32 & img;
  338. const int patch_size;
  339. const int n_patches_x;
  340. const int n_patches_y;
  341. const int n_patches;
  342. const int n_embd;
  343. const int n_head;
  344. const int d_head;
  345. const int n_layer;
  346. const float eps;
  347. const float kq_scale;
  348. ggml_context_ptr ctx0_ptr;
  349. ggml_context * ctx0;
  350. ggml_cgraph * gf;
  351. clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  352. ctx(ctx),
  353. model(ctx->vision_model),
  354. hparams(model.hparams),
  355. img(img),
  356. patch_size(hparams.patch_size),
  357. n_patches_x(img.nx / patch_size),
  358. n_patches_y(img.ny / patch_size),
  359. n_patches(n_patches_x * n_patches_y),
  360. n_embd(hparams.n_embd),
  361. n_head(hparams.n_head),
  362. d_head(n_embd / n_head),
  363. n_layer(hparams.n_layer),
  364. eps(hparams.eps),
  365. kq_scale(1.0f / sqrtf((float)d_head)) {
  366. struct ggml_init_params params = {
  367. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  368. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  369. /*.no_alloc =*/ true,
  370. };
  371. ctx0_ptr.reset(ggml_init(params));
  372. ctx0 = ctx0_ptr.get();
  373. gf = ggml_new_graph(ctx0);
  374. }
  375. ggml_cgraph * build_siglip() {
  376. ggml_tensor * inp = build_inp();
  377. ggml_tensor * cur = build_vit(
  378. inp, n_patches,
  379. NORM_TYPE_NORMAL,
  380. hparams.ffn_op,
  381. model.position_embeddings,
  382. nullptr);
  383. if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  384. const int batch_size = 1;
  385. GGML_ASSERT(n_patches_x == n_patches_y);
  386. const int patches_per_image = n_patches_x;
  387. const int kernel_size = hparams.proj_scale_factor;
  388. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  389. cur = ggml_reshape_4d(ctx0, cur, patches_per_image, patches_per_image, n_embd, batch_size);
  390. // doing a pool2d to reduce the number of output tokens
  391. cur = ggml_pool_2d(ctx0, cur, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  392. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0] * cur->ne[0], n_embd, batch_size);
  393. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  394. // apply norm before projection
  395. cur = ggml_rms_norm(ctx0, cur, eps);
  396. cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
  397. // apply projection
  398. cur = ggml_mul_mat(ctx0,
  399. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  400. cur);
  401. } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) {
  402. // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
  403. const int scale_factor = model.hparams.proj_scale_factor;
  404. const int n_embd = cur->ne[0];
  405. const int seq = cur->ne[1];
  406. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  407. const int height = std::sqrt(seq);
  408. const int width = std::sqrt(seq);
  409. GGML_ASSERT(scale_factor != 0);
  410. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height, bsz);
  411. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  412. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  413. n_embd * scale_factor * scale_factor,
  414. height / scale_factor,
  415. width / scale_factor,
  416. bsz);
  417. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  418. cur = ggml_reshape_3d(ctx0, ggml_cont(ctx0, cur),
  419. n_embd * scale_factor * scale_factor,
  420. seq / (scale_factor * scale_factor),
  421. bsz);
  422. cur = ggml_mul_mat(ctx0, model.projection, cur);
  423. } else {
  424. GGML_ABORT("SigLIP: Unsupported projector type");
  425. }
  426. // build the graph
  427. ggml_build_forward_expand(gf, cur);
  428. return gf;
  429. }
  430. ggml_cgraph * build_pixtral() {
  431. const int n_merge = hparams.spatial_merge_size;
  432. // 2D input positions
  433. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  434. ggml_set_name(pos_h, "pos_h");
  435. ggml_set_input(pos_h);
  436. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  437. ggml_set_name(pos_w, "pos_w");
  438. ggml_set_input(pos_w);
  439. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  440. return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta);
  441. };
  442. ggml_tensor * inp = build_inp();
  443. ggml_tensor * cur = build_vit(
  444. inp, n_patches,
  445. NORM_TYPE_RMS,
  446. hparams.ffn_op,
  447. nullptr, // no learned pos embd
  448. add_pos);
  449. // mistral small 3.1 patch merger
  450. // ref: https://github.com/huggingface/transformers/blob/7a3e208892c06a5e278144eaf38c8599a42f53e7/src/transformers/models/mistral3/modeling_mistral3.py#L67
  451. if (model.mm_patch_merger_w) {
  452. GGML_ASSERT(hparams.spatial_merge_size > 0);
  453. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.mm_input_norm_w);
  454. // reshape image tokens to 2D grid
  455. cur = ggml_reshape_3d(ctx0, cur, n_embd, n_patches_x, n_patches_y);
  456. cur = ggml_permute(ctx0, cur, 2, 0, 1, 3); // [x, y, n_embd]
  457. cur = ggml_cont(ctx0, cur);
  458. // torch.nn.functional.unfold is just an im2col under the hood
  459. // we just need a dummy kernel to make it work
  460. ggml_tensor * kernel = ggml_view_3d(ctx0, cur, n_merge, n_merge, cur->ne[2], 0, 0, 0);
  461. cur = ggml_im2col(ctx0, kernel, cur, n_merge, n_merge, 0, 0, 1, 1, true, inp->type);
  462. // project to n_embd
  463. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  464. cur = ggml_mul_mat(ctx0, model.mm_patch_merger_w, cur);
  465. }
  466. // LlavaMultiModalProjector (always using GELU activation)
  467. {
  468. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  469. if (model.mm_1_b) {
  470. cur = ggml_add(ctx0, cur, model.mm_1_b);
  471. }
  472. cur = ggml_gelu(ctx0, cur);
  473. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  474. if (model.mm_2_b) {
  475. cur = ggml_add(ctx0, cur, model.mm_2_b);
  476. }
  477. }
  478. // arrangement of the [IMG_BREAK] token
  479. {
  480. // not efficient, but works
  481. // the trick is to view the embeddings as a 3D tensor with shape [n_embd, n_patches_per_row, n_rows]
  482. // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
  483. // after the concatenation, we have a tensor with shape [n_embd, n_patches_per_row + 1, n_rows]
  484. const int p_y = n_merge > 0 ? n_patches_y / n_merge : n_patches_y;
  485. const int p_x = n_merge > 0 ? n_patches_x / n_merge : n_patches_x;
  486. const int p_total = p_x * p_y;
  487. const int n_embd_text = cur->ne[0];
  488. const int n_tokens_output = p_total + p_y - 1; // one [IMG_BREAK] per row, except the last row
  489. ggml_tensor * tmp = ggml_reshape_3d(ctx0, cur, n_embd_text, p_x, p_y);
  490. ggml_tensor * tok = ggml_new_tensor_3d(ctx0, tmp->type, n_embd_text, 1, p_y);
  491. tok = ggml_scale(ctx0, tok, 0.0); // clear the tensor
  492. tok = ggml_add(ctx0, tok, model.token_embd_img_break);
  493. tmp = ggml_concat(ctx0, tmp, tok, 1);
  494. cur = ggml_view_2d(ctx0, tmp,
  495. n_embd_text, n_tokens_output,
  496. ggml_row_size(tmp->type, n_embd_text), 0);
  497. }
  498. // build the graph
  499. ggml_build_forward_expand(gf, cur);
  500. return gf;
  501. }
  502. // Qwen2VL and Qwen2.5VL use M-RoPE
  503. ggml_cgraph * build_qwen2vl() {
  504. GGML_ASSERT(model.patch_bias == nullptr);
  505. GGML_ASSERT(model.class_embedding == nullptr);
  506. const int batch_size = 1;
  507. const bool use_window_attn = hparams.n_wa_pattern > 0;
  508. const int n_wa_pattern = hparams.n_wa_pattern;
  509. const int n_pos = n_patches;
  510. const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
  511. norm_type norm_t = ctx->proj_type == PROJECTOR_TYPE_QWEN25VL
  512. ? NORM_TYPE_RMS // qwen 2.5 vl
  513. : NORM_TYPE_NORMAL; // qwen 2 vl
  514. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  515. ggml_tensor * inp_raw = build_inp_raw();
  516. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  517. GGML_ASSERT(img.nx % (patch_size * 2) == 0);
  518. GGML_ASSERT(img.ny % (patch_size * 2) == 0);
  519. // second conv dimension
  520. {
  521. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  522. inp = ggml_add(ctx0, inp, inp_1);
  523. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
  524. inp = ggml_reshape_4d(
  525. ctx0, inp,
  526. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  527. inp = ggml_reshape_4d(
  528. ctx0, inp,
  529. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  530. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3));
  531. inp = ggml_reshape_3d(
  532. ctx0, inp,
  533. n_embd, n_patches_x * n_patches_y, batch_size);
  534. }
  535. ggml_tensor * inpL = inp;
  536. ggml_tensor * window_mask = nullptr;
  537. ggml_tensor * window_idx = nullptr;
  538. ggml_tensor * inv_window_idx = nullptr;
  539. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  540. ggml_set_name(positions, "positions");
  541. ggml_set_input(positions);
  542. // pre-layernorm
  543. if (model.pre_ln_w) {
  544. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  545. }
  546. if (use_window_attn) {
  547. // handle window attention inputs
  548. inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  549. ggml_set_name(inv_window_idx, "inv_window_idx");
  550. ggml_set_input(inv_window_idx);
  551. // mask for window attention
  552. window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_pos, n_pos);
  553. ggml_set_name(window_mask, "window_mask");
  554. ggml_set_input(window_mask);
  555. // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  556. GGML_ASSERT(batch_size == 1);
  557. inpL = ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
  558. inpL = ggml_get_rows(ctx0, inpL, inv_window_idx);
  559. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
  560. }
  561. // loop over layers
  562. for (int il = 0; il < n_layer; il++) {
  563. auto & layer = model.layers[il];
  564. const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
  565. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  566. // layernorm1
  567. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  568. cb(cur, "ln1", il);
  569. // self-attention
  570. {
  571. ggml_tensor * Qcur = ggml_add(ctx0,
  572. ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
  573. ggml_tensor * Kcur = ggml_add(ctx0,
  574. ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
  575. ggml_tensor * Vcur = ggml_add(ctx0,
  576. ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
  577. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
  578. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
  579. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
  580. cb(Qcur, "Qcur", il);
  581. cb(Kcur, "Kcur", il);
  582. cb(Vcur, "Vcur", il);
  583. // apply M-RoPE
  584. Qcur = ggml_rope_multi(
  585. ctx0, Qcur, positions, nullptr,
  586. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  587. Kcur = ggml_rope_multi(
  588. ctx0, Kcur, positions, nullptr,
  589. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  590. cb(Qcur, "Qcur_rope", il);
  591. cb(Kcur, "Kcur_rope", il);
  592. ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
  593. cur = build_attn(layer.o_w, layer.o_b,
  594. Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
  595. cb(cur, "attn_out", il);
  596. }
  597. // re-add the layer input, e.g., residual
  598. cur = ggml_add(ctx0, cur, inpL);
  599. inpL = cur; // inpL = residual, cur = hidden_states
  600. cb(cur, "ffn_inp", il);
  601. // layernorm2
  602. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  603. cb(cur, "ffn_inp_normed", il);
  604. // ffn
  605. cur = build_ffn(cur,
  606. layer.ff_up_w, layer.ff_up_b,
  607. layer.ff_gate_w, layer.ff_gate_b,
  608. layer.ff_down_w, layer.ff_down_b,
  609. hparams.ffn_op, il);
  610. cb(cur, "ffn_out", il);
  611. // residual 2
  612. cur = ggml_add(ctx0, inpL, cur);
  613. cb(cur, "layer_out", il);
  614. inpL = cur;
  615. }
  616. // post-layernorm
  617. if (model.post_ln_w) {
  618. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
  619. }
  620. // multimodal projection
  621. ggml_tensor * embeddings = inpL;
  622. embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
  623. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  624. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  625. // GELU activation
  626. embeddings = ggml_gelu(ctx0, embeddings);
  627. // Second linear layer
  628. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  629. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  630. if (use_window_attn) {
  631. window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  632. ggml_set_name(window_idx, "window_idx");
  633. ggml_set_input(window_idx);
  634. // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  635. GGML_ASSERT(batch_size == 1);
  636. embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
  637. embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
  638. embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
  639. }
  640. // build the graph
  641. ggml_build_forward_expand(gf, embeddings);
  642. return gf;
  643. }
  644. ggml_cgraph * build_minicpmv() {
  645. const int batch_size = 1;
  646. GGML_ASSERT(model.class_embedding == nullptr);
  647. const int n_pos = n_patches;
  648. // position embeddings for the projector (not for ViT)
  649. int n_output_dim = clip_n_mmproj_embd(ctx);
  650. ggml_tensor * pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_output_dim, n_pos, batch_size);
  651. ggml_set_name(pos_embed, "pos_embed");
  652. ggml_set_input(pos_embed);
  653. // for selecting learned pos embd, used by ViT
  654. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  655. ggml_set_name(positions, "positions");
  656. ggml_set_input(positions);
  657. ggml_tensor * learned_pos_embd = ggml_get_rows(ctx0, model.position_embeddings, positions);
  658. ggml_tensor * inp = build_inp();
  659. ggml_tensor * embeddings = build_vit(
  660. inp, n_patches,
  661. NORM_TYPE_NORMAL,
  662. hparams.ffn_op,
  663. learned_pos_embd,
  664. nullptr);
  665. // resampler projector (it is just another transformer)
  666. ggml_tensor * q = model.mm_model_query;
  667. ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  668. // norm
  669. q = build_norm(q, model.mm_model_ln_q_w, model.mm_model_ln_q_b, NORM_TYPE_NORMAL, eps, -1);
  670. v = build_norm(v, model.mm_model_ln_kv_w, model.mm_model_ln_kv_b, NORM_TYPE_NORMAL, eps, -1);
  671. // k = v + pos_embed
  672. ggml_tensor * k = ggml_add(ctx0, v, pos_embed);
  673. // attention
  674. {
  675. int n_embd = clip_n_mmproj_embd(ctx);
  676. const int d_head = 128;
  677. int n_head = n_embd/d_head;
  678. int num_query = 96;
  679. if (ctx->minicpmv_version == 2) {
  680. num_query = 96;
  681. } else if (ctx->minicpmv_version == 3) {
  682. num_query = 64;
  683. } else if (ctx->minicpmv_version == 4) {
  684. num_query = 64;
  685. }
  686. ggml_tensor * Q = ggml_add(ctx0,
  687. ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q),
  688. model.mm_model_attn_q_b);
  689. ggml_tensor * K = ggml_add(ctx0,
  690. ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k),
  691. model.mm_model_attn_k_b);
  692. ggml_tensor * V = ggml_add(ctx0,
  693. ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v),
  694. model.mm_model_attn_v_b);
  695. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_query);
  696. K = ggml_reshape_3d(ctx0, K, d_head, n_head, n_pos);
  697. V = ggml_reshape_3d(ctx0, V, d_head, n_head, n_pos);
  698. cb(Q, "resampler_Q", -1);
  699. cb(K, "resampler_K", -1);
  700. cb(V, "resampler_V", -1);
  701. embeddings = build_attn(
  702. model.mm_model_attn_o_w,
  703. model.mm_model_attn_o_b,
  704. Q, K, V, nullptr, kq_scale, -1);
  705. cb(embeddings, "resampler_attn_out", -1);
  706. }
  707. // layernorm
  708. embeddings = build_norm(embeddings, model.mm_model_ln_post_w, model.mm_model_ln_post_b, NORM_TYPE_NORMAL, eps, -1);
  709. // projection
  710. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  711. // build the graph
  712. ggml_build_forward_expand(gf, embeddings);
  713. return gf;
  714. }
  715. ggml_cgraph * build_internvl() {
  716. GGML_ASSERT(model.class_embedding != nullptr);
  717. GGML_ASSERT(model.position_embeddings != nullptr);
  718. const int n_pos = n_patches + 1;
  719. ggml_tensor * inp = build_inp();
  720. // add CLS token
  721. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  722. // The larger models use a different ViT, which uses RMS norm instead of layer norm
  723. // ref: https://github.com/ggml-org/llama.cpp/pull/13443#issuecomment-2869786188
  724. norm_type norm_t = (hparams.n_embd == 3200 && hparams.n_layer == 45)
  725. ? NORM_TYPE_RMS // 6B ViT (Used by InternVL 2.5/3 - 26B, 38B, 78B)
  726. : NORM_TYPE_NORMAL; // 300M ViT (Used by all smaller InternVL models)
  727. ggml_tensor * cur = build_vit(
  728. inp, n_pos,
  729. norm_t,
  730. hparams.ffn_op,
  731. model.position_embeddings,
  732. nullptr);
  733. // remove CLS token
  734. cur = ggml_view_2d(ctx0, cur,
  735. n_embd, n_patches,
  736. ggml_row_size(cur->type, n_embd), 0);
  737. // pixel shuffle
  738. {
  739. const int scale_factor = model.hparams.proj_scale_factor;
  740. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  741. const int height = n_patches_y;
  742. const int width = n_patches_x;
  743. GGML_ASSERT(scale_factor > 0);
  744. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, height / scale_factor, width, bsz);
  745. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  746. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  747. n_embd * scale_factor * scale_factor,
  748. height / scale_factor,
  749. width / scale_factor,
  750. bsz);
  751. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  752. // flatten to 2D
  753. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, cur),
  754. n_embd * scale_factor * scale_factor,
  755. cur->ne[1] * cur->ne[2]);
  756. }
  757. // projector (always using GELU activation)
  758. {
  759. // projector LayerNorm uses pytorch's default eps = 1e-5
  760. // ref: https://huggingface.co/OpenGVLab/InternVL3-8B-Instruct/blob/a34d3e4e129a5856abfd6aa6de79776484caa14e/modeling_internvl_chat.py#L79
  761. cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
  762. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  763. cur = ggml_add(ctx0, cur, model.mm_1_b);
  764. cur = ggml_gelu(ctx0, cur);
  765. cur = ggml_mul_mat(ctx0, model.mm_3_w, cur);
  766. cur = ggml_add(ctx0, cur, model.mm_3_b);
  767. }
  768. // build the graph
  769. ggml_build_forward_expand(gf, cur);
  770. return gf;
  771. }
  772. // this graph is used by llava, granite and glm
  773. // due to having embedding_stack (used by granite), we cannot reuse build_vit
  774. ggml_cgraph * build_llava() {
  775. const int batch_size = 1;
  776. const int n_pos = n_patches + (model.class_embedding ? 1 : 0);
  777. GGML_ASSERT(n_patches_x == n_patches_y && "only square images supported");
  778. // Calculate the deepest feature layer based on hparams and projector type
  779. int max_feature_layer = n_layer;
  780. {
  781. // Get the index of the second to last layer; this is the default for models that have a llava projector
  782. int il_last = hparams.n_layer - 1;
  783. int deepest_feature_layer = -1;
  784. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  785. il_last += 1;
  786. }
  787. // If we set explicit vision feature layers, only go up to the deepest one
  788. // NOTE: only used by granite-vision models for now
  789. for (const auto & feature_layer : hparams.vision_feature_layer) {
  790. if (feature_layer > deepest_feature_layer) {
  791. deepest_feature_layer = feature_layer;
  792. }
  793. }
  794. max_feature_layer = deepest_feature_layer < 0 ? il_last : deepest_feature_layer;
  795. }
  796. ggml_tensor * inp = build_inp();
  797. // concat class_embeddings and patch_embeddings
  798. if (model.class_embedding) {
  799. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  800. }
  801. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  802. ggml_set_name(positions, "positions");
  803. ggml_set_input(positions);
  804. inp = ggml_add(ctx0, inp, ggml_get_rows(ctx0, model.position_embeddings, positions));
  805. ggml_tensor * inpL = inp;
  806. // pre-layernorm
  807. if (model.pre_ln_w) {
  808. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, NORM_TYPE_NORMAL, eps, -1);
  809. cb(inpL, "pre_ln", -1);
  810. }
  811. std::vector<ggml_tensor *> embedding_stack;
  812. const auto & vision_feature_layer = hparams.vision_feature_layer;
  813. // loop over layers
  814. for (int il = 0; il < max_feature_layer; il++) {
  815. auto & layer = model.layers[il];
  816. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  817. // If this is an embedding feature layer, save the output.
  818. // NOTE: 0 index here refers to the input to the encoder.
  819. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  820. embedding_stack.push_back(cur);
  821. }
  822. // layernorm1
  823. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  824. cb(cur, "layer_inp_normed", il);
  825. // self-attention
  826. {
  827. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  828. if (layer.q_b) {
  829. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  830. }
  831. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  832. if (layer.k_b) {
  833. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  834. }
  835. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  836. if (layer.v_b) {
  837. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  838. }
  839. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  840. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  841. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  842. cb(Qcur, "Qcur", il);
  843. cb(Kcur, "Kcur", il);
  844. cb(Vcur, "Vcur", il);
  845. cur = build_attn(layer.o_w, layer.o_b,
  846. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  847. cb(cur, "attn_out", il);
  848. }
  849. // re-add the layer input, e.g., residual
  850. cur = ggml_add(ctx0, cur, inpL);
  851. inpL = cur; // inpL = residual, cur = hidden_states
  852. cb(cur, "ffn_inp", il);
  853. // layernorm2
  854. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  855. cb(cur, "ffn_inp_normed", il);
  856. // ffn
  857. cur = build_ffn(cur,
  858. layer.ff_up_w, layer.ff_up_b,
  859. layer.ff_gate_w, layer.ff_gate_b,
  860. layer.ff_down_w, layer.ff_down_b,
  861. hparams.ffn_op, il);
  862. cb(cur, "ffn_out", il);
  863. // residual 2
  864. cur = ggml_add(ctx0, inpL, cur);
  865. cb(cur, "layer_out", il);
  866. inpL = cur;
  867. }
  868. // post-layernorm
  869. if (model.post_ln_w) {
  870. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, NORM_TYPE_NORMAL, eps, -1);
  871. }
  872. ggml_tensor * embeddings = inpL;
  873. // process vision feature layers (used by granite)
  874. {
  875. // final layer is a vision feature layer
  876. if (vision_feature_layer.find(max_feature_layer) != vision_feature_layer.end()) {
  877. embedding_stack.push_back(inpL);
  878. }
  879. // If feature layers are explicitly set, stack them (if we have multiple)
  880. if (!embedding_stack.empty()) {
  881. embeddings = embedding_stack[0];
  882. for (size_t i = 1; i < embedding_stack.size(); i++) {
  883. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  884. }
  885. }
  886. }
  887. // llava projector (also used by granite)
  888. if (ctx->has_llava_projector) {
  889. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  890. ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  891. ggml_set_name(patches, "patches");
  892. ggml_set_input(patches);
  893. // shape [1, 576, 1024]
  894. // ne is whcn, ne = [1024, 576, 1, 1]
  895. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  896. // print_tensor_info(embeddings, "embeddings");
  897. // llava projector
  898. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  899. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  900. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  901. embeddings = ggml_gelu(ctx0, embeddings);
  902. if (model.mm_2_w) {
  903. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  904. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  905. }
  906. }
  907. else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  908. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  909. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  910. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  911. // First LayerNorm
  912. embeddings = ggml_norm(ctx0, embeddings, eps);
  913. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  914. model.mm_1_b);
  915. // GELU activation
  916. embeddings = ggml_gelu(ctx0, embeddings);
  917. // Second linear layer
  918. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  919. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  920. // Second LayerNorm
  921. embeddings = ggml_norm(ctx0, embeddings, eps);
  922. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  923. model.mm_4_b);
  924. }
  925. else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  926. // MobileVLM projector
  927. int n_patch = 24;
  928. ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  929. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  930. mlp_1 = ggml_gelu(ctx0, mlp_1);
  931. ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  932. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  933. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  934. // block 1
  935. ggml_tensor * block_1 = nullptr;
  936. {
  937. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  938. mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
  939. mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  940. // stride = 1, padding = 1, bias is nullptr
  941. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  942. // layer norm
  943. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  944. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  945. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  946. block_1 = ggml_norm(ctx0, block_1, eps);
  947. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  948. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  949. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  950. // hardswish
  951. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  952. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  953. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  954. // pointwise conv
  955. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  956. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  957. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  958. block_1 = ggml_relu(ctx0, block_1);
  959. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  960. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  961. block_1 = ggml_hardsigmoid(ctx0, block_1);
  962. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  963. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  964. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  965. int w = block_1->ne[0], h = block_1->ne[1];
  966. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  967. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  968. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  969. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  970. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  971. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  972. block_1 = ggml_norm(ctx0, block_1, eps);
  973. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  974. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  975. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  976. // residual
  977. block_1 = ggml_add(ctx0, mlp_3, block_1);
  978. }
  979. // block_2
  980. {
  981. // stride = 2
  982. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  983. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  984. // layer norm
  985. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  986. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  987. block_1 = ggml_norm(ctx0, block_1, eps);
  988. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  989. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  990. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  991. // hardswish
  992. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  993. // not sure the parameters is right for globalAvgPooling
  994. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  995. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  996. // pointwise conv
  997. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  998. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  999. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  1000. block_1 = ggml_relu(ctx0, block_1);
  1001. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  1002. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  1003. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1004. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1005. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1006. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1007. int w = block_1->ne[0], h = block_1->ne[1];
  1008. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1009. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1010. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1011. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  1012. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1013. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1014. block_1 = ggml_norm(ctx0, block_1, eps);
  1015. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  1016. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  1017. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  1018. }
  1019. embeddings = block_1;
  1020. }
  1021. else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
  1022. {
  1023. int n_patch = 24;
  1024. ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1025. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  1026. mlp_0 = ggml_gelu(ctx0, mlp_0);
  1027. ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  1028. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  1029. // mlp_2 ne = [2048, 576, 1, 1]
  1030. // // AVG Pool Layer 2*2, strides = 2
  1031. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
  1032. // mlp_2 ne = [576, 2048, 1, 1]
  1033. mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  1034. // mlp_2 ne [24, 24, 2048, 1]
  1035. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  1036. // weight ne = [3, 3, 2048, 1]
  1037. ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  1038. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  1039. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  1040. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  1041. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  1042. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  1043. embeddings = peg_0;
  1044. }
  1045. else {
  1046. GGML_ABORT("fatal error");
  1047. }
  1048. }
  1049. // glm projector
  1050. else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  1051. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  1052. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
  1053. embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  1054. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  1055. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  1056. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  1057. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  1058. // GLU
  1059. {
  1060. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1061. embeddings = ggml_norm(ctx0, embeddings, eps);
  1062. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1063. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  1064. ggml_tensor * x = embeddings;
  1065. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  1066. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  1067. embeddings = ggml_silu_inplace(ctx0, embeddings);
  1068. embeddings = ggml_mul(ctx0, embeddings,x);
  1069. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  1070. }
  1071. // arrangement of BOI/EOI token embeddings
  1072. // note: these embeddings are not present in text model, hence we cannot process them as text tokens
  1073. // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
  1074. {
  1075. embeddings = ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI
  1076. embeddings = ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI
  1077. }
  1078. }
  1079. else {
  1080. GGML_ABORT("llava: unknown projector type");
  1081. }
  1082. // build the graph
  1083. ggml_build_forward_expand(gf, embeddings);
  1084. return gf;
  1085. }
  1086. private:
  1087. //
  1088. // utility functions
  1089. //
  1090. void cb(ggml_tensor * cur, const char * name, int il) const {
  1091. // TODO: implement this
  1092. GGML_UNUSED(cur);
  1093. GGML_UNUSED(name);
  1094. GGML_UNUSED(il);
  1095. }
  1096. // build vision transformer (ViT) cgraph
  1097. // this function should cover most of the models
  1098. // if your model has specific features, you should probably duplicate this function
  1099. ggml_tensor * build_vit(
  1100. ggml_tensor * inp,
  1101. int64_t n_pos,
  1102. norm_type norm_t,
  1103. ffn_op_type ffn_t,
  1104. ggml_tensor * learned_pos_embd,
  1105. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  1106. ) {
  1107. if (learned_pos_embd) {
  1108. inp = ggml_add(ctx0, inp, learned_pos_embd);
  1109. cb(inp, "pos_embed", -1);
  1110. }
  1111. ggml_tensor * inpL = inp;
  1112. // pre-layernorm
  1113. if (model.pre_ln_w) {
  1114. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  1115. cb(inpL, "pre_ln", -1);
  1116. }
  1117. // loop over layers
  1118. for (int il = 0; il < n_layer; il++) {
  1119. auto & layer = model.layers[il];
  1120. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  1121. // layernorm1
  1122. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  1123. cb(cur, "layer_inp_normed", il);
  1124. // self-attention
  1125. {
  1126. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1127. if (layer.q_b) {
  1128. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1129. }
  1130. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1131. if (layer.k_b) {
  1132. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1133. }
  1134. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1135. if (layer.v_b) {
  1136. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1137. }
  1138. if (layer.q_norm) {
  1139. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  1140. cb(Qcur, "Qcur_norm", il);
  1141. }
  1142. if (layer.k_norm) {
  1143. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  1144. cb(Kcur, "Kcur_norm", il);
  1145. }
  1146. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1147. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1148. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1149. cb(Qcur, "Qcur", il);
  1150. cb(Kcur, "Kcur", il);
  1151. cb(Vcur, "Vcur", il);
  1152. if (add_pos) {
  1153. Qcur = add_pos(Qcur, layer);
  1154. Kcur = add_pos(Kcur, layer);
  1155. cb(Qcur, "Qcur_pos", il);
  1156. cb(Kcur, "Kcur_pos", il);
  1157. }
  1158. cur = build_attn(layer.o_w, layer.o_b,
  1159. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1160. cb(cur, "attn_out", il);
  1161. }
  1162. if (layer.ls_1_w) {
  1163. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  1164. cb(cur, "attn_out_scaled", il);
  1165. }
  1166. // re-add the layer input, e.g., residual
  1167. cur = ggml_add(ctx0, cur, inpL);
  1168. inpL = cur; // inpL = residual, cur = hidden_states
  1169. cb(cur, "ffn_inp", il);
  1170. // layernorm2
  1171. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  1172. cb(cur, "ffn_inp_normed", il);
  1173. // ffn
  1174. cur = build_ffn(cur,
  1175. layer.ff_up_w, layer.ff_up_b,
  1176. layer.ff_gate_w, layer.ff_gate_b,
  1177. layer.ff_down_w, layer.ff_down_b,
  1178. ffn_t, il);
  1179. cb(cur, "ffn_out", il);
  1180. if (layer.ls_2_w) {
  1181. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  1182. cb(cur, "ffn_out_scaled", il);
  1183. }
  1184. // residual 2
  1185. cur = ggml_add(ctx0, inpL, cur);
  1186. cb(cur, "layer_out", il);
  1187. inpL = cur;
  1188. }
  1189. // post-layernorm
  1190. if (model.post_ln_w) {
  1191. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  1192. }
  1193. return inpL;
  1194. }
  1195. // build the input after conv2d (inp_raw --> patches)
  1196. // returns tensor with shape [n_embd, n_patches]
  1197. ggml_tensor * build_inp() {
  1198. ggml_tensor * inp_raw = build_inp_raw();
  1199. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  1200. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  1201. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  1202. if (model.patch_bias) {
  1203. inp = ggml_add(ctx0, inp, model.patch_bias);
  1204. cb(inp, "patch_bias", -1);
  1205. }
  1206. return inp;
  1207. }
  1208. ggml_tensor * build_inp_raw() {
  1209. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, 3);
  1210. ggml_set_name(inp_raw, "inp_raw");
  1211. ggml_set_input(inp_raw);
  1212. return inp_raw;
  1213. }
  1214. ggml_tensor * build_norm(
  1215. ggml_tensor * cur,
  1216. ggml_tensor * mw,
  1217. ggml_tensor * mb,
  1218. norm_type type,
  1219. float norm_eps,
  1220. int il) const {
  1221. cur = type == NORM_TYPE_RMS
  1222. ? ggml_rms_norm(ctx0, cur, norm_eps)
  1223. : ggml_norm(ctx0, cur, norm_eps);
  1224. if (mw || mb) {
  1225. cb(cur, "norm", il);
  1226. }
  1227. if (mw) {
  1228. cur = ggml_mul(ctx0, cur, mw);
  1229. if (mb) {
  1230. cb(cur, "norm_w", il);
  1231. }
  1232. }
  1233. if (mb) {
  1234. cur = ggml_add(ctx0, cur, mb);
  1235. }
  1236. return cur;
  1237. }
  1238. ggml_tensor * build_ffn(
  1239. ggml_tensor * cur,
  1240. ggml_tensor * up,
  1241. ggml_tensor * up_b,
  1242. ggml_tensor * gate,
  1243. ggml_tensor * gate_b,
  1244. ggml_tensor * down,
  1245. ggml_tensor * down_b,
  1246. ffn_op_type type_op,
  1247. int il) const {
  1248. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  1249. cb(tmp, "ffn_up", il);
  1250. if (up_b) {
  1251. tmp = ggml_add(ctx0, tmp, up_b);
  1252. cb(tmp, "ffn_up_b", il);
  1253. }
  1254. if (gate) {
  1255. cur = ggml_mul_mat(ctx0, gate, cur);
  1256. cb(cur, "ffn_gate", il);
  1257. if (gate_b) {
  1258. cur = ggml_add(ctx0, cur, gate_b);
  1259. cb(cur, "ffn_gate_b", il);
  1260. }
  1261. } else {
  1262. cur = tmp;
  1263. }
  1264. switch (type_op) {
  1265. case FFN_SILU:
  1266. {
  1267. cur = ggml_silu(ctx0, cur);
  1268. cb(cur, "ffn_silu", il);
  1269. } break;
  1270. case FFN_GELU:
  1271. {
  1272. cur = ggml_gelu(ctx0, cur);
  1273. cb(cur, "ffn_gelu", il);
  1274. } break;
  1275. case FFN_GELU_QUICK:
  1276. {
  1277. cur = ggml_gelu_quick(ctx0, cur);
  1278. cb(cur, "ffn_relu", il);
  1279. } break;
  1280. }
  1281. // we only support parallel ffn for now
  1282. if (gate) {
  1283. cur = ggml_mul(ctx0, cur, tmp);
  1284. cb(cur, "ffn_gate_par", il);
  1285. }
  1286. if (down) {
  1287. cur = ggml_mul_mat(ctx0, down, cur);
  1288. }
  1289. if (down_b) {
  1290. cb(cur, "ffn_down", il);
  1291. }
  1292. if (down_b) {
  1293. cur = ggml_add(ctx0, cur, down_b);
  1294. }
  1295. return cur;
  1296. }
  1297. ggml_tensor * build_attn(
  1298. ggml_tensor * wo,
  1299. ggml_tensor * wo_b,
  1300. ggml_tensor * q_cur,
  1301. ggml_tensor * k_cur,
  1302. ggml_tensor * v_cur,
  1303. ggml_tensor * kq_mask,
  1304. float kq_scale,
  1305. int il) const {
  1306. // these nodes are added to the graph together so that they are not reordered
  1307. // by doing so, the number of splits in the graph is reduced
  1308. ggml_build_forward_expand(gf, q_cur);
  1309. ggml_build_forward_expand(gf, k_cur);
  1310. ggml_build_forward_expand(gf, v_cur);
  1311. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  1312. //cb(q, "q", il);
  1313. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  1314. //cb(k, "k", il);
  1315. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  1316. v = ggml_cont(ctx0, v);
  1317. //cb(k, "v", il);
  1318. ggml_tensor * cur;
  1319. // TODO @ngxson : support flash attention
  1320. {
  1321. const auto n_tokens = q->ne[1];
  1322. const auto n_head = q->ne[2];
  1323. // const auto n_kv = k->ne[1]; // for flash attention
  1324. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  1325. // F32 may not needed for vision encoders?
  1326. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  1327. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  1328. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  1329. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  1330. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  1331. }
  1332. cb(cur, "kqv_out", il);
  1333. if (wo) {
  1334. cur = ggml_mul_mat(ctx0, wo, cur);
  1335. }
  1336. if (wo_b) {
  1337. cur = ggml_add(ctx0, cur, wo_b);
  1338. }
  1339. return cur;
  1340. }
  1341. // implementation of the 2D RoPE without adding a new op in ggml
  1342. // this is not efficient (use double the memory), but works on all backends
  1343. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  1344. static ggml_tensor * build_rope_2d(
  1345. ggml_context * ctx0,
  1346. ggml_tensor * cur,
  1347. ggml_tensor * pos_h,
  1348. ggml_tensor * pos_w,
  1349. const float freq_base
  1350. ) {
  1351. const int64_t n_dim = cur->ne[0];
  1352. const int64_t n_head = cur->ne[1];
  1353. const int64_t n_pos = cur->ne[2];
  1354. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  1355. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  1356. // first half of cur will use 1e-0, 1e-2 (even)
  1357. // second half of cur will use 1e-1, 1e-3 (odd)
  1358. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  1359. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  1360. // then for the second half, we use freq_scale to shift the inv_freq
  1361. // ^ why? replace (2i) with (2i+1) in the above equation
  1362. const float freq_scale_odd = std::pow(freq_base, (float)-2/n_dim);
  1363. // first half
  1364. ggml_tensor * first;
  1365. {
  1366. first = ggml_view_3d(ctx0, cur,
  1367. n_dim/2, n_head, n_pos,
  1368. ggml_row_size(cur->type, n_dim),
  1369. ggml_row_size(cur->type, n_dim*n_head),
  1370. 0);
  1371. first = ggml_rope_ext(
  1372. ctx0,
  1373. first,
  1374. pos_h, // positions
  1375. nullptr, // freq factors
  1376. n_dim/2, // n_dims
  1377. 0, 0, freq_base,
  1378. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  1379. );
  1380. }
  1381. // second half
  1382. ggml_tensor * second;
  1383. {
  1384. second = ggml_view_3d(ctx0, cur,
  1385. n_dim/2, n_head, n_pos,
  1386. ggml_row_size(cur->type, n_dim),
  1387. ggml_row_size(cur->type, n_dim*n_head),
  1388. n_dim/2 * ggml_element_size(cur));
  1389. second = ggml_cont(ctx0, second); // copy, because ggml_rope don't play well with non-contiguous tensors
  1390. second = ggml_rope_ext(
  1391. ctx0,
  1392. second,
  1393. pos_w, // positions
  1394. nullptr, // freq factors
  1395. n_dim/2, // n_dims
  1396. 0, 0, freq_base,
  1397. freq_scale_odd,
  1398. 0.0f, 1.0f, 0.0f, 0.0f
  1399. );
  1400. }
  1401. cur = ggml_concat(ctx0, first, second, 0);
  1402. return cur;
  1403. }
  1404. };
  1405. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  1406. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  1407. clip_graph graph(ctx, *imgs.entries[0]);
  1408. ggml_cgraph * res;
  1409. switch (ctx->proj_type) {
  1410. case PROJECTOR_TYPE_GEMMA3:
  1411. case PROJECTOR_TYPE_IDEFICS3:
  1412. {
  1413. res = graph.build_siglip();
  1414. } break;
  1415. case PROJECTOR_TYPE_PIXTRAL:
  1416. {
  1417. res = graph.build_pixtral();
  1418. } break;
  1419. case PROJECTOR_TYPE_QWEN2VL:
  1420. case PROJECTOR_TYPE_QWEN25VL:
  1421. {
  1422. res = graph.build_qwen2vl();
  1423. } break;
  1424. case PROJECTOR_TYPE_MINICPMV:
  1425. {
  1426. res = graph.build_minicpmv();
  1427. } break;
  1428. case PROJECTOR_TYPE_INTERNVL:
  1429. {
  1430. res = graph.build_internvl();
  1431. } break;
  1432. default:
  1433. {
  1434. res = graph.build_llava();
  1435. } break;
  1436. }
  1437. return res;
  1438. }
  1439. struct clip_model_loader {
  1440. ggml_context_ptr ctx_meta;
  1441. gguf_context_ptr ctx_gguf;
  1442. clip_ctx & ctx_clip;
  1443. std::string fname;
  1444. size_t model_size = 0; // in bytes
  1445. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model
  1446. clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) {
  1447. struct ggml_context * meta = nullptr;
  1448. struct gguf_init_params params = {
  1449. /*.no_alloc = */ true,
  1450. /*.ctx = */ &meta,
  1451. };
  1452. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  1453. if (!ctx_gguf.get()) {
  1454. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  1455. }
  1456. ctx_meta.reset(meta);
  1457. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  1458. // print gguf info
  1459. {
  1460. std::string name;
  1461. get_string(KEY_NAME, name, false);
  1462. std::string description;
  1463. get_string(KEY_DESCRIPTION, description, false);
  1464. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  1465. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  1466. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  1467. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  1468. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  1469. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  1470. LOG_INF("\n");
  1471. }
  1472. // tensors
  1473. {
  1474. for (int i = 0; i < n_tensors; ++i) {
  1475. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1476. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  1477. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  1478. ggml_tensor * cur = ggml_get_tensor(meta, name);
  1479. size_t tensor_size = ggml_nbytes(cur);
  1480. model_size += tensor_size;
  1481. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  1482. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  1483. }
  1484. }
  1485. }
  1486. void load_hparams() {
  1487. auto & hparams = ctx_clip.vision_model.hparams;
  1488. std::string log_ffn_op; // for logging
  1489. // projector type
  1490. std::string proj_type;
  1491. {
  1492. get_string(KEY_PROJ_TYPE, proj_type, false);
  1493. if (!proj_type.empty()) {
  1494. ctx_clip.proj_type = clip_projector_type_from_string(proj_type);
  1495. }
  1496. if (ctx_clip.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  1497. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  1498. }
  1499. }
  1500. // other hparams
  1501. {
  1502. get_i32(KEY_MINICPMV_VERSION, ctx_clip.minicpmv_version, false); // legacy
  1503. get_u32(KEY_N_EMBD, hparams.n_embd);
  1504. get_u32(KEY_N_HEAD, hparams.n_head);
  1505. get_u32(KEY_N_FF, hparams.n_ff);
  1506. get_u32(KEY_N_BLOCK, hparams.n_layer);
  1507. get_u32(KEY_PROJ_DIM, hparams.projection_dim);
  1508. get_f32(KEY_LAYER_NORM_EPS, hparams.eps);
  1509. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  1510. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  1511. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  1512. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false);
  1513. // default warmup value
  1514. hparams.warmup_image_size = hparams.image_size;
  1515. ctx_clip.has_llava_projector = ctx_clip.proj_type == PROJECTOR_TYPE_MLP
  1516. || ctx_clip.proj_type == PROJECTOR_TYPE_MLP_NORM
  1517. || ctx_clip.proj_type == PROJECTOR_TYPE_LDP
  1518. || ctx_clip.proj_type == PROJECTOR_TYPE_LDPV2;
  1519. {
  1520. bool use_gelu = false;
  1521. bool use_silu = false;
  1522. get_bool(KEY_USE_GELU, use_gelu, false);
  1523. get_bool(KEY_USE_SILU, use_silu, false);
  1524. if (use_gelu && use_silu) {
  1525. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  1526. }
  1527. if (use_gelu) {
  1528. hparams.ffn_op = FFN_GELU;
  1529. log_ffn_op = "gelu";
  1530. } else if (use_silu) {
  1531. hparams.ffn_op = FFN_SILU;
  1532. log_ffn_op = "silu";
  1533. } else {
  1534. hparams.ffn_op = FFN_GELU_QUICK;
  1535. log_ffn_op = "gelu_quick";
  1536. }
  1537. }
  1538. {
  1539. std::string mm_patch_merge_type;
  1540. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  1541. if (mm_patch_merge_type == "spatial_unpad") {
  1542. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  1543. }
  1544. }
  1545. {
  1546. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  1547. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  1548. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  1549. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  1550. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  1551. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  1552. for (int i = 0; i < 3; ++i) {
  1553. ctx_clip.image_mean[i] = mean_data[i];
  1554. ctx_clip.image_std[i] = std_data[i];
  1555. }
  1556. }
  1557. // Load the vision feature layer indices if they are explicitly provided;
  1558. // if multiple vision feature layers are present, the values will be concatenated
  1559. // to form the final visual features.
  1560. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  1561. // be non-negative, since we use -1 to mark values as unset here.
  1562. std::vector<int> vision_feature_layer;
  1563. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  1564. // convert std::vector to std::unordered_set
  1565. for (auto & layer : vision_feature_layer) {
  1566. hparams.vision_feature_layer.insert(layer);
  1567. }
  1568. // model-specific params
  1569. switch (ctx_clip.proj_type) {
  1570. case PROJECTOR_TYPE_MINICPMV:
  1571. {
  1572. if (ctx_clip.minicpmv_version == 0) {
  1573. ctx_clip.minicpmv_version = 2; // default to 2 if not set
  1574. }
  1575. } break;
  1576. case PROJECTOR_TYPE_IDEFICS3:
  1577. case PROJECTOR_TYPE_INTERNVL:
  1578. {
  1579. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1580. } break;
  1581. case PROJECTOR_TYPE_PIXTRAL:
  1582. {
  1583. hparams.rope_theta = 10000.0f;
  1584. hparams.warmup_image_size = hparams.patch_size * 8;
  1585. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false);
  1586. } break;
  1587. case PROJECTOR_TYPE_GEMMA3:
  1588. {
  1589. // default value (used by all model sizes in gemma 3 family)
  1590. // number of patches for each **side** is reduced by a factor of 4
  1591. hparams.proj_scale_factor = 4;
  1592. // test model (tinygemma3) has a different value, we optionally read it
  1593. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1594. } break;
  1595. case PROJECTOR_TYPE_QWEN2VL:
  1596. {
  1597. // max image size = sqrt(max_pixels) = 3584
  1598. // ref: https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct/blob/main/preprocessor_config.json
  1599. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  1600. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  1601. hparams.image_size = 1024;
  1602. hparams.warmup_image_size = hparams.patch_size * 8;
  1603. } break;
  1604. case PROJECTOR_TYPE_QWEN25VL:
  1605. {
  1606. // max image size = sqrt(max_pixels)
  1607. // https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  1608. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  1609. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  1610. hparams.image_size = 1024;
  1611. hparams.warmup_image_size = hparams.patch_size * 8;
  1612. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
  1613. } break;
  1614. default:
  1615. break;
  1616. }
  1617. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  1618. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  1619. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  1620. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  1621. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  1622. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  1623. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  1624. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  1625. LOG_INF("\n");
  1626. LOG_INF("%s: has_llava_proj: %d\n", __func__, ctx_clip.has_llava_projector);
  1627. LOG_INF("%s: minicpmv_version: %d\n", __func__, ctx_clip.minicpmv_version);
  1628. LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor);
  1629. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  1630. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  1631. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1632. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1633. }
  1634. }
  1635. void load_tensors() {
  1636. auto & hparams = ctx_clip.vision_model.hparams;
  1637. std::map<std::string, size_t> tensor_offset;
  1638. std::vector<ggml_tensor *> tensors_to_load;
  1639. // get offsets
  1640. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1641. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1642. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1643. }
  1644. // create data context
  1645. struct ggml_init_params params = {
  1646. /*.mem_size =*/ (gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1647. /*.mem_buffer =*/ NULL,
  1648. /*.no_alloc =*/ true,
  1649. };
  1650. ctx_clip.ctx_data.reset(ggml_init(params));
  1651. if (!ctx_clip.ctx_data) {
  1652. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1653. }
  1654. // helper function
  1655. auto get_tensor = [&](const std::string & name, bool required = true) {
  1656. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1657. if (!cur && required) {
  1658. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1659. }
  1660. if (cur) {
  1661. tensors_to_load.push_back(cur);
  1662. // add tensors to context
  1663. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1664. ggml_set_name(data_tensor, cur->name);
  1665. cur = data_tensor;
  1666. }
  1667. return cur;
  1668. };
  1669. auto & vision_model = ctx_clip.vision_model;
  1670. vision_model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1671. vision_model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, "v", "weight"), false);
  1672. vision_model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, "v", "bias"), false);
  1673. vision_model.post_ln_w = get_tensor(string_format(TN_LN_POST, "v", "weight"), false);
  1674. vision_model.post_ln_b = get_tensor(string_format(TN_LN_POST, "v", "bias"), false);
  1675. vision_model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1676. vision_model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1677. vision_model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1678. vision_model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, "v"), false);
  1679. // layers
  1680. vision_model.layers.resize(hparams.n_layer);
  1681. for (int il = 0; il < hparams.n_layer; ++il) {
  1682. auto & layer = vision_model.layers[il];
  1683. layer.k_w = get_tensor(string_format(TN_ATTN_K, "v", il, "weight"));
  1684. layer.q_w = get_tensor(string_format(TN_ATTN_Q, "v", il, "weight"));
  1685. layer.v_w = get_tensor(string_format(TN_ATTN_V, "v", il, "weight"));
  1686. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "weight"));
  1687. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, "v", il, "weight"), false);
  1688. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, "v", il, "weight"), false);
  1689. layer.ln_1_w = get_tensor(string_format(TN_LN_1, "v", il, "weight"), false);
  1690. layer.ln_2_w = get_tensor(string_format(TN_LN_2, "v", il, "weight"), false);
  1691. layer.ls_1_w = get_tensor(string_format(TN_LS_1, "v", il, "weight"), false); // no bias
  1692. layer.ls_2_w = get_tensor(string_format(TN_LS_2, "v", il, "weight"), false); // no bias
  1693. layer.k_b = get_tensor(string_format(TN_ATTN_K, "v", il, "bias"), false);
  1694. layer.q_b = get_tensor(string_format(TN_ATTN_Q, "v", il, "bias"), false);
  1695. layer.v_b = get_tensor(string_format(TN_ATTN_V, "v", il, "bias"), false);
  1696. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "bias"), false);
  1697. layer.ln_1_b = get_tensor(string_format(TN_LN_1, "v", il, "bias"), false);
  1698. layer.ln_2_b = get_tensor(string_format(TN_LN_2, "v", il, "bias"), false);
  1699. // ffn
  1700. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, "v", il, "weight"));
  1701. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, "v", il, "bias"), false);
  1702. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, "v", il, "weight"), false);
  1703. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, "v", il, "bias"), false);
  1704. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, "v", il, "weight"));
  1705. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, "v", il, "bias"), false);
  1706. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  1707. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  1708. if (layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd) {
  1709. // swap up and down weights
  1710. ggml_tensor * tmp = layer.ff_up_w;
  1711. layer.ff_up_w = layer.ff_down_w;
  1712. layer.ff_down_w = tmp;
  1713. // swap up and down biases
  1714. tmp = layer.ff_up_b;
  1715. layer.ff_up_b = layer.ff_down_b;
  1716. layer.ff_down_b = tmp;
  1717. }
  1718. }
  1719. switch (ctx_clip.proj_type) {
  1720. case PROJECTOR_TYPE_MLP:
  1721. case PROJECTOR_TYPE_MLP_NORM:
  1722. {
  1723. // LLaVA projection
  1724. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  1725. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  1726. // Yi-type llava
  1727. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  1728. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1729. // missing in Yi-type llava
  1730. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  1731. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1732. // Yi-type llava
  1733. vision_model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  1734. vision_model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  1735. vision_model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  1736. vision_model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  1737. if (vision_model.mm_3_w) {
  1738. // TODO: this is a hack to support Yi-type llava
  1739. ctx_clip.proj_type = PROJECTOR_TYPE_MLP_NORM;
  1740. }
  1741. vision_model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  1742. } break;
  1743. case PROJECTOR_TYPE_LDP:
  1744. {
  1745. // MobileVLM projection
  1746. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1747. vision_model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1748. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1749. vision_model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1750. vision_model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1751. vision_model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1752. vision_model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1753. vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1754. vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1755. vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1756. vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1757. vision_model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1758. vision_model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1759. vision_model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1760. vision_model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1761. vision_model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1762. vision_model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  1763. vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  1764. vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  1765. vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  1766. vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  1767. vision_model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  1768. vision_model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  1769. vision_model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  1770. } break;
  1771. case PROJECTOR_TYPE_LDPV2:
  1772. {
  1773. // MobilVLM_V2 projection
  1774. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1775. vision_model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1776. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1777. vision_model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  1778. vision_model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  1779. vision_model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  1780. } break;
  1781. case PROJECTOR_TYPE_MINICPMV:
  1782. {
  1783. // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  1784. vision_model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  1785. vision_model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  1786. vision_model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  1787. vision_model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  1788. vision_model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  1789. vision_model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  1790. vision_model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  1791. vision_model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  1792. vision_model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  1793. vision_model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  1794. vision_model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  1795. vision_model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  1796. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  1797. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  1798. vision_model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  1799. vision_model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  1800. vision_model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  1801. vision_model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  1802. } break;
  1803. case PROJECTOR_TYPE_GLM_EDGE:
  1804. {
  1805. vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  1806. vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  1807. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  1808. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  1809. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  1810. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  1811. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  1812. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  1813. vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  1814. vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  1815. } break;
  1816. case PROJECTOR_TYPE_QWEN2VL:
  1817. case PROJECTOR_TYPE_QWEN25VL:
  1818. {
  1819. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1820. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1821. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1822. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1823. } break;
  1824. case PROJECTOR_TYPE_GEMMA3:
  1825. {
  1826. vision_model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  1827. vision_model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  1828. } break;
  1829. case PROJECTOR_TYPE_IDEFICS3:
  1830. {
  1831. vision_model.projection = get_tensor(TN_MM_PROJECTOR);
  1832. } break;
  1833. case PROJECTOR_TYPE_PIXTRAL:
  1834. {
  1835. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1836. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1837. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1838. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1839. // [IMG_BREAK] token embedding
  1840. vision_model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  1841. // for mistral small 3.1
  1842. vision_model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  1843. vision_model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  1844. } break;
  1845. case PROJECTOR_TYPE_INTERNVL:
  1846. {
  1847. vision_model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1848. vision_model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1849. vision_model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1850. vision_model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1851. vision_model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1852. vision_model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1853. } break;
  1854. default:
  1855. GGML_ASSERT(false && "unknown projector type");
  1856. }
  1857. // load data
  1858. {
  1859. std::vector<uint8_t> read_buf;
  1860. auto fin = std::ifstream(fname, std::ios::binary);
  1861. if (!fin) {
  1862. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  1863. }
  1864. // alloc memory and offload data
  1865. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  1866. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  1867. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  1868. for (auto & t : tensors_to_load) {
  1869. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  1870. const size_t offset = tensor_offset[t->name];
  1871. fin.seekg(offset, std::ios::beg);
  1872. if (!fin) {
  1873. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  1874. }
  1875. size_t num_bytes = ggml_nbytes(cur);
  1876. if (ggml_backend_buft_is_host(buft)) {
  1877. // for the CPU and Metal backend, we can read directly into the tensor
  1878. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  1879. } else {
  1880. // read into a temporary buffer first, then copy to device memory
  1881. read_buf.resize(num_bytes);
  1882. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  1883. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  1884. }
  1885. }
  1886. fin.close();
  1887. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  1888. }
  1889. }
  1890. void alloc_compute_meta() {
  1891. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  1892. // create a fake batch
  1893. clip_image_f32_batch batch;
  1894. clip_image_f32_ptr img(clip_image_f32_init());
  1895. img->nx = ctx_clip.vision_model.hparams.warmup_image_size;
  1896. img->ny = ctx_clip.vision_model.hparams.warmup_image_size;
  1897. img->buf.resize(img->nx * img->ny * 3);
  1898. batch.entries.push_back(std::move(img));
  1899. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  1900. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  1901. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  1902. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  1903. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  1904. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  1905. if (size > 1) {
  1906. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  1907. ggml_backend_buft_name(buft),
  1908. size / 1024.0 / 1024.0);
  1909. }
  1910. }
  1911. }
  1912. void get_bool(const std::string & key, bool & output, bool required = true) {
  1913. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1914. if (i < 0) {
  1915. if (required) throw std::runtime_error("Key not found: " + key);
  1916. return;
  1917. }
  1918. output = gguf_get_val_bool(ctx_gguf.get(), i);
  1919. }
  1920. void get_i32(const std::string & key, int & output, bool required = true) {
  1921. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1922. if (i < 0) {
  1923. if (required) throw std::runtime_error("Key not found: " + key);
  1924. return;
  1925. }
  1926. output = gguf_get_val_i32(ctx_gguf.get(), i);
  1927. }
  1928. void get_u32(const std::string & key, int & output, bool required = true) {
  1929. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1930. if (i < 0) {
  1931. if (required) throw std::runtime_error("Key not found: " + key);
  1932. return;
  1933. }
  1934. output = gguf_get_val_u32(ctx_gguf.get(), i);
  1935. }
  1936. void get_f32(const std::string & key, float & output, bool required = true) {
  1937. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1938. if (i < 0) {
  1939. if (required) throw std::runtime_error("Key not found: " + key);
  1940. return;
  1941. }
  1942. output = gguf_get_val_f32(ctx_gguf.get(), i);
  1943. }
  1944. void get_string(const std::string & key, std::string & output, bool required = true) {
  1945. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1946. if (i < 0) {
  1947. if (required) throw std::runtime_error("Key not found: " + key);
  1948. return;
  1949. }
  1950. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  1951. }
  1952. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
  1953. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1954. if (i < 0) {
  1955. if (required) throw std::runtime_error("Key not found: " + key);
  1956. return;
  1957. }
  1958. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  1959. output.resize(n);
  1960. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  1961. for (int i = 0; i < n; ++i) {
  1962. output[i] = values[i];
  1963. }
  1964. }
  1965. };
  1966. // read and create ggml_context containing the tensors and their data
  1967. struct clip_ctx * clip_model_load(const char * fname, const int verbosity) {
  1968. return clip_init(fname, clip_context_params{
  1969. /* use_gpu */ true,
  1970. /* verbosity */ static_cast<ggml_log_level>(verbosity),
  1971. });
  1972. }
  1973. struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) {
  1974. g_logger_state.verbosity_thold = ctx_params.verbosity;
  1975. clip_ctx * ctx_clip = nullptr;
  1976. try {
  1977. ctx_clip = new clip_ctx(ctx_params);
  1978. clip_model_loader loader(fname, *ctx_clip);
  1979. loader.load_hparams();
  1980. loader.load_tensors();
  1981. loader.alloc_compute_meta();
  1982. } catch (const std::exception & e) {
  1983. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  1984. delete ctx_clip;
  1985. return nullptr;
  1986. }
  1987. return ctx_clip;
  1988. }
  1989. void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) {
  1990. ctx_clip->load_image_size = *load_image_size; // copy
  1991. }
  1992. struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip) {
  1993. return &ctx_clip->load_image_size;
  1994. }
  1995. struct clip_image_size * clip_image_size_init() {
  1996. struct clip_image_size * load_image_size = new struct clip_image_size();
  1997. load_image_size->width = 448;
  1998. load_image_size->height = 448;
  1999. return load_image_size;
  2000. }
  2001. struct clip_image_u8 * clip_image_u8_init() {
  2002. return new clip_image_u8();
  2003. }
  2004. struct clip_image_f32 * clip_image_f32_init() {
  2005. return new clip_image_f32();
  2006. }
  2007. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  2008. return new clip_image_f32_batch();
  2009. }
  2010. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  2011. if (nx) *nx = img->nx;
  2012. if (ny) *ny = img->ny;
  2013. return img->buf.data();
  2014. }
  2015. void clip_image_size_free(struct clip_image_size * load_image_size) {
  2016. if (load_image_size == nullptr) {
  2017. return;
  2018. }
  2019. delete load_image_size;
  2020. }
  2021. void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
  2022. void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
  2023. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
  2024. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
  2025. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  2026. return batch->entries.size();
  2027. }
  2028. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  2029. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2030. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2031. return 0;
  2032. }
  2033. return batch->entries[idx]->nx;
  2034. }
  2035. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  2036. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2037. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2038. return 0;
  2039. }
  2040. return batch->entries[idx]->ny;
  2041. }
  2042. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  2043. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2044. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2045. return nullptr;
  2046. }
  2047. return batch->entries[idx].get();
  2048. }
  2049. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  2050. img->nx = nx;
  2051. img->ny = ny;
  2052. img->buf.resize(3 * nx * ny);
  2053. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  2054. }
  2055. bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
  2056. int nx, ny, nc;
  2057. auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
  2058. if (!data) {
  2059. LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
  2060. return false;
  2061. }
  2062. clip_build_img_from_pixels(data, nx, ny, img);
  2063. stbi_image_free(data);
  2064. return true;
  2065. }
  2066. bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
  2067. int nx, ny, nc;
  2068. auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
  2069. if (!data) {
  2070. LOG_ERR("%s: failed to decode image bytes\n", __func__);
  2071. return false;
  2072. }
  2073. clip_build_img_from_pixels(data, nx, ny, img);
  2074. stbi_image_free(data);
  2075. return true;
  2076. }
  2077. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  2078. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  2079. dst.nx = src.nx;
  2080. dst.ny = src.ny;
  2081. dst.buf.resize(src.buf.size());
  2082. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  2083. for (size_t i = 0; i < src.buf.size(); ++i) {
  2084. int c = i % 3; // rgb
  2085. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  2086. }
  2087. }
  2088. // set of tools to manupulate images
  2089. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  2090. struct image_manipulation {
  2091. // Bilinear resize function
  2092. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  2093. dst.nx = target_width;
  2094. dst.ny = target_height;
  2095. dst.buf.resize(3 * target_width * target_height);
  2096. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  2097. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  2098. for (int y = 0; y < target_height; y++) {
  2099. for (int x = 0; x < target_width; x++) {
  2100. float px = x_ratio * x;
  2101. float py = y_ratio * y;
  2102. int x_floor = static_cast<int>(px);
  2103. int y_floor = static_cast<int>(py);
  2104. float x_lerp = px - x_floor;
  2105. float y_lerp = py - y_floor;
  2106. for (int c = 0; c < 3; c++) {
  2107. float top = lerp(
  2108. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  2109. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  2110. x_lerp
  2111. );
  2112. float bottom = lerp(
  2113. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  2114. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  2115. x_lerp
  2116. );
  2117. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  2118. }
  2119. }
  2120. }
  2121. }
  2122. // Bicubic resize function
  2123. // part of image will be cropped if the aspect ratio is different
  2124. static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  2125. const int nx = img.nx;
  2126. const int ny = img.ny;
  2127. dst.nx = target_width;
  2128. dst.ny = target_height;
  2129. dst.buf.resize(3 * target_width * target_height);
  2130. float Cc;
  2131. float C[5];
  2132. float d0, d2, d3, a0, a1, a2, a3;
  2133. int i, j, k, jj;
  2134. int x, y;
  2135. float dx, dy;
  2136. float tx, ty;
  2137. tx = (float)nx / (float)target_width;
  2138. ty = (float)ny / (float)target_height;
  2139. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  2140. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  2141. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  2142. for (i = 0; i < target_height; i++) {
  2143. for (j = 0; j < target_width; j++) {
  2144. x = (int)(tx * j);
  2145. y = (int)(ty * i);
  2146. dx = tx * j - x;
  2147. dy = ty * i - y;
  2148. for (k = 0; k < 3; k++) {
  2149. for (jj = 0; jj <= 3; jj++) {
  2150. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2151. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2152. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2153. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2154. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2155. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2156. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2157. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  2158. d0 = C[0] - C[1];
  2159. d2 = C[2] - C[1];
  2160. d3 = C[3] - C[1];
  2161. a0 = C[1];
  2162. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2163. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2164. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2165. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  2166. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  2167. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  2168. }
  2169. }
  2170. }
  2171. }
  2172. return true;
  2173. }
  2174. // llava-1.6 type of resize_and_pad
  2175. // if the ratio is not 1:1, padding with pad_color will be applied
  2176. // pad_color is single channel, default is 0 (black)
  2177. static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  2178. int target_width = target_resolution.width;
  2179. int target_height = target_resolution.height;
  2180. float scale_w = static_cast<float>(target_width) / image.nx;
  2181. float scale_h = static_cast<float>(target_height) / image.ny;
  2182. int new_width, new_height;
  2183. if (scale_w < scale_h) {
  2184. new_width = target_width;
  2185. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  2186. } else {
  2187. new_height = target_height;
  2188. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  2189. }
  2190. clip_image_u8 resized_image;
  2191. bicubic_resize(image, resized_image, new_width, new_height);
  2192. clip_image_u8 padded_image;
  2193. padded_image.nx = target_width;
  2194. padded_image.ny = target_height;
  2195. padded_image.buf.resize(3 * target_width * target_height);
  2196. // Fill the padded image with the fill color
  2197. for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
  2198. padded_image.buf[i] = pad_color[0];
  2199. padded_image.buf[i + 1] = pad_color[1];
  2200. padded_image.buf[i + 2] = pad_color[2];
  2201. }
  2202. // Calculate padding offsets
  2203. int pad_x = (target_width - new_width) / 2;
  2204. int pad_y = (target_height - new_height) / 2;
  2205. // Copy the resized image into the center of the padded buffer
  2206. for (int y = 0; y < new_height; ++y) {
  2207. for (int x = 0; x < new_width; ++x) {
  2208. for (int c = 0; c < 3; ++c) {
  2209. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  2210. }
  2211. }
  2212. }
  2213. dst = std::move(padded_image);
  2214. }
  2215. static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  2216. dst.nx = w;
  2217. dst.ny = h;
  2218. dst.buf.resize(3 * w * h);
  2219. for (int i = 0; i < h; ++i) {
  2220. for (int j = 0; j < w; ++j) {
  2221. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  2222. int dst_idx = 3 * (i*w + j);
  2223. dst.buf[dst_idx] = image.buf[src_idx];
  2224. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  2225. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  2226. }
  2227. }
  2228. }
  2229. // calculate the size of the **resized** image, while preserving the aspect ratio
  2230. // the calculated size will be aligned to the nearest multiple of align_size
  2231. // if H or W size is larger than max_dimension, it will be resized to max_dimension
  2232. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int max_dimension) {
  2233. if (inp_size.width <= 0 || inp_size.height <= 0 || align_size <= 0 || max_dimension <= 0) {
  2234. return {0, 0};
  2235. }
  2236. float scale = std::min(1.0f, std::min(static_cast<float>(max_dimension) / inp_size.width,
  2237. static_cast<float>(max_dimension) / inp_size.height));
  2238. float target_width_f = static_cast<float>(inp_size.width) * scale;
  2239. float target_height_f = static_cast<float>(inp_size.height) * scale;
  2240. int aligned_width = CLIP_ALIGN((int)target_width_f, align_size);
  2241. int aligned_height = CLIP_ALIGN((int)target_height_f, align_size);
  2242. return {aligned_width, aligned_height};
  2243. }
  2244. private:
  2245. static inline int clip(int x, int lower, int upper) {
  2246. return std::max(lower, std::min(x, upper));
  2247. }
  2248. // Linear interpolation between two points
  2249. static inline float lerp(float s, float e, float t) {
  2250. return s + (e - s) * t;
  2251. }
  2252. };
  2253. /**
  2254. * implementation of LLaVA-UHD:
  2255. * - https://arxiv.org/pdf/2403.11703
  2256. * - https://github.com/thunlp/LLaVA-UHD
  2257. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  2258. *
  2259. * overview:
  2260. * - an image always have a single overview (downscaled image)
  2261. * - an image can have 0 or multiple slices, depending on the image size
  2262. * - each slice can then be considered as a separate image
  2263. *
  2264. * for example:
  2265. *
  2266. * [overview] --> [slice 1] --> [slice 2]
  2267. * | |
  2268. * +--> [slice 3] --> [slice 4]
  2269. */
  2270. struct llava_uhd {
  2271. struct slice_coordinates {
  2272. int x;
  2273. int y;
  2274. clip_image_size size;
  2275. };
  2276. struct slice_instructions {
  2277. clip_image_size overview_size; // size of downscaled image
  2278. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2279. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2280. std::vector<slice_coordinates> slices;
  2281. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2282. };
  2283. static int get_max_slices(struct clip_ctx * ctx) {
  2284. if (clip_is_minicpmv(ctx)) {
  2285. return 9;
  2286. }
  2287. return 0;
  2288. }
  2289. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2290. slice_instructions res;
  2291. const int patch_size = clip_get_patch_size(ctx);
  2292. const int slice_size = clip_get_image_size(ctx);
  2293. const int max_slice_nums = get_max_slices(ctx);
  2294. const int original_width = original_size.width;
  2295. const int original_height = original_size.height;
  2296. const float log_ratio = log((float)original_width / original_height);
  2297. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2298. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2299. const bool has_slices = (multiple > 1);
  2300. const bool has_pinpoints = !ctx->vision_model.hparams.image_grid_pinpoints.empty();
  2301. if (has_pinpoints) {
  2302. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2303. auto refine_size = llava_uhd::select_best_resolution(
  2304. ctx->vision_model.hparams.image_grid_pinpoints,
  2305. original_size);
  2306. res.overview_size = clip_image_size{slice_size, slice_size};
  2307. res.refined_size = refine_size;
  2308. res.grid_size = clip_image_size{0, 0};
  2309. res.padding_refined = true;
  2310. for (int y = 0; y < refine_size.height; y += slice_size) {
  2311. for (int x = 0; x < refine_size.width; x += slice_size) {
  2312. slice_coordinates slice;
  2313. slice.x = x;
  2314. slice.y = y;
  2315. slice.size.width = std::min(slice_size, refine_size.width - x);
  2316. slice.size.height = std::min(slice_size, refine_size.height - y);
  2317. res.slices.push_back(slice);
  2318. if (x == 0) {
  2319. res.grid_size.width++;
  2320. }
  2321. }
  2322. res.grid_size.height++;
  2323. }
  2324. return res;
  2325. }
  2326. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2327. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  2328. res.overview_size = best_size;
  2329. if (!has_slices) {
  2330. // skip slicing logic
  2331. res.refined_size = clip_image_size{0, 0};
  2332. res.grid_size = clip_image_size{0, 0};
  2333. } else {
  2334. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2335. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2336. res.grid_size = best_grid;
  2337. res.refined_size = refine_size;
  2338. int width = refine_size.width;
  2339. int height = refine_size.height;
  2340. int grid_x = int(width / best_grid.width);
  2341. int grid_y = int(height / best_grid.height);
  2342. for (int patches_y = 0, ic = 0;
  2343. patches_y < refine_size.height && ic < best_grid.height;
  2344. patches_y += grid_y, ic += 1) {
  2345. for (int patches_x = 0, jc = 0;
  2346. patches_x < refine_size.width && jc < best_grid.width;
  2347. patches_x += grid_x, jc += 1) {
  2348. slice_coordinates slice;
  2349. slice.x = patches_x;
  2350. slice.y = patches_y;
  2351. slice.size.width = grid_x;
  2352. slice.size.height = grid_y;
  2353. res.slices.push_back(slice);
  2354. // LOG_INF("slice %d: %d %d %d %d\n", ic, patches_i, patches_j, grid_x, grid_y);
  2355. }
  2356. }
  2357. }
  2358. return res;
  2359. }
  2360. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2361. std::vector<clip_image_u8_ptr> output;
  2362. // resize to overview size
  2363. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2364. image_manipulation::bicubic_resize(*img, *resized_img, inst.overview_size.width, inst.overview_size.height);
  2365. output.push_back(std::move(resized_img));
  2366. if (inst.slices.empty()) {
  2367. // no slices, just return the resized image
  2368. return output;
  2369. }
  2370. // resize to refined size
  2371. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2372. if (inst.padding_refined) {
  2373. image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
  2374. } else {
  2375. image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
  2376. }
  2377. // create slices
  2378. for (const auto & slice : inst.slices) {
  2379. int x = slice.x;
  2380. int y = slice.y;
  2381. int w = slice.size.width;
  2382. int h = slice.size.height;
  2383. clip_image_u8_ptr img_slice(clip_image_u8_init());
  2384. image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
  2385. output.push_back(std::move(img_slice));
  2386. }
  2387. return output;
  2388. }
  2389. private:
  2390. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2391. int width = original_size.width;
  2392. int height = original_size.height;
  2393. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  2394. float r = static_cast<float>(width) / height;
  2395. height = static_cast<int>(scale_resolution / std::sqrt(r));
  2396. width = static_cast<int>(height * r);
  2397. }
  2398. clip_image_size res;
  2399. res.width = ensure_divide(width, patch_size);
  2400. res.height = ensure_divide(height, patch_size);
  2401. return res;
  2402. }
  2403. /**
  2404. * Selects the best resolution from a list of possible resolutions based on the original size.
  2405. *
  2406. * @param original_size The original size of the image
  2407. * @param possible_resolutions A list of possible resolutions
  2408. * @return The best fit resolution
  2409. */
  2410. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  2411. int original_width = original_size.width;
  2412. int original_height = original_size.height;
  2413. clip_image_size best_fit;
  2414. int max_effective_resolution = 0;
  2415. int min_wasted_resolution = std::numeric_limits<int>::max();
  2416. for (const auto & resolution : possible_resolutions) {
  2417. int width = resolution.width;
  2418. int height = resolution.height;
  2419. float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
  2420. int downscaled_width = static_cast<int>(original_width * scale);
  2421. int downscaled_height = static_cast<int>(original_height * scale);
  2422. int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
  2423. int wasted_resolution = (width * height) - effective_resolution;
  2424. // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
  2425. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
  2426. max_effective_resolution = effective_resolution;
  2427. min_wasted_resolution = wasted_resolution;
  2428. best_fit = resolution;
  2429. }
  2430. }
  2431. return best_fit;
  2432. }
  2433. // used by llava 1.6 with custom list of pinpoints
  2434. static clip_image_size select_best_resolution(const std::vector<int32_t> & pinpoints, const clip_image_size & original_size) {
  2435. std::vector<clip_image_size> possible_resolutions;
  2436. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  2437. possible_resolutions.push_back(clip_image_size{pinpoints[i], pinpoints[i+1]});
  2438. }
  2439. return select_best_resolution(original_size, possible_resolutions);
  2440. }
  2441. static int ensure_divide(int length, int patch_size) {
  2442. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  2443. }
  2444. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2445. int width = original_size.width;
  2446. int height = original_size.height;
  2447. int grid_x = grid.width;
  2448. int grid_y = grid.height;
  2449. int refine_width = ensure_divide(width, grid_x);
  2450. int refine_height = ensure_divide(height, grid_y);
  2451. clip_image_size grid_size;
  2452. grid_size.width = refine_width / grid_x;
  2453. grid_size.height = refine_height / grid_y;
  2454. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  2455. int best_grid_width = best_grid_size.width;
  2456. int best_grid_height = best_grid_size.height;
  2457. clip_image_size refine_size;
  2458. refine_size.width = best_grid_width * grid_x;
  2459. refine_size.height = best_grid_height * grid_y;
  2460. return refine_size;
  2461. }
  2462. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  2463. std::vector<int> candidate_split_grids_nums;
  2464. for (int i : {multiple - 1, multiple, multiple + 1}) {
  2465. if (i == 1 || i > max_slice_nums) {
  2466. continue;
  2467. }
  2468. candidate_split_grids_nums.push_back(i);
  2469. }
  2470. std::vector<clip_image_size> candidate_grids;
  2471. for (int split_grids_nums : candidate_split_grids_nums) {
  2472. int m = 1;
  2473. while (m <= split_grids_nums) {
  2474. if (split_grids_nums % m == 0) {
  2475. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  2476. }
  2477. ++m;
  2478. }
  2479. }
  2480. clip_image_size best_grid{1, 1};
  2481. float min_error = std::numeric_limits<float>::infinity();
  2482. for (const auto& grid : candidate_grids) {
  2483. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  2484. if (error < min_error) {
  2485. best_grid = grid;
  2486. min_error = error;
  2487. }
  2488. }
  2489. return best_grid;
  2490. }
  2491. };
  2492. // TODO @ngxson : decprecate the load_image_size singleton pattern
  2493. int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
  2494. const auto inst = llava_uhd::get_slice_instructions(ctx_clip, ctx_clip->load_image_size);
  2495. return inst.grid_size.width;
  2496. }
  2497. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  2498. // res_imgs memory is being allocated here, previous allocations will be freed if found
  2499. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  2500. clip_image_size original_size{img->nx, img->ny};
  2501. bool pad_to_square = true;
  2502. auto & params = ctx->vision_model.hparams;
  2503. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  2504. if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
  2505. pad_to_square = false;
  2506. }
  2507. if (clip_is_minicpmv(ctx)) {
  2508. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2509. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2510. for (size_t i = 0; i < imgs.size(); ++i) {
  2511. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2512. clip_image_f32_ptr res(clip_image_f32_init());
  2513. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2514. res_imgs->entries.push_back(std::move(res));
  2515. }
  2516. return true;
  2517. }
  2518. else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2519. clip_image_u8 resized;
  2520. auto patch_size = params.patch_size * 2;
  2521. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size);
  2522. image_manipulation::bicubic_resize(*img, resized, new_size.width, new_size.height);
  2523. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2524. // clip_image_f32_ptr res(clip_image_f32_init());
  2525. normalize_image_u8_to_f32(resized, *img_f32, ctx->image_mean, ctx->image_std);
  2526. // res_imgs->data[0] = *res;
  2527. res_imgs->entries.push_back(std::move(img_f32));
  2528. return true;
  2529. }
  2530. else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE
  2531. || ctx->proj_type == PROJECTOR_TYPE_GEMMA3
  2532. || ctx->proj_type == PROJECTOR_TYPE_IDEFICS3
  2533. || ctx->proj_type == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
  2534. ) {
  2535. clip_image_u8 resized_image;
  2536. int sz = params.image_size;
  2537. image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
  2538. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2539. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  2540. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  2541. res_imgs->entries.push_back(std::move(img_f32));
  2542. return true;
  2543. }
  2544. else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
  2545. clip_image_u8 resized_image;
  2546. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
  2547. image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
  2548. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2549. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  2550. res_imgs->entries.push_back(std::move(img_f32));
  2551. return true;
  2552. }
  2553. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  2554. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2555. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  2556. if (pad_to_square) {
  2557. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  2558. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2559. const int longer_side = std::max(img->nx, img->ny);
  2560. temp->nx = longer_side;
  2561. temp->ny = longer_side;
  2562. temp->buf.resize(3 * longer_side * longer_side);
  2563. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  2564. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2565. // resize the image to the target_size
  2566. image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
  2567. clip_image_f32_ptr res(clip_image_f32_init());
  2568. normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
  2569. res_imgs->entries.push_back(std::move(res));
  2570. return true;
  2571. } else if (!params.image_grid_pinpoints.empty()) {
  2572. // "spatial_unpad" with "anyres" processing for llava-1.6
  2573. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2574. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2575. for (size_t i = 0; i < imgs.size(); ++i) {
  2576. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2577. clip_image_f32_ptr res(clip_image_f32_init());
  2578. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2579. res_imgs->entries.push_back(std::move(res));
  2580. }
  2581. return true;
  2582. }
  2583. GGML_ASSERT(false && "Unknown image preprocessing type");
  2584. }
  2585. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  2586. return ctx->vision_model.image_newline;
  2587. }
  2588. void clip_free(clip_ctx * ctx) {
  2589. if (ctx == nullptr) {
  2590. return;
  2591. }
  2592. delete ctx;
  2593. }
  2594. // deprecated
  2595. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  2596. const int32_t nx = ctx->vision_model.hparams.image_size;
  2597. const int32_t ny = ctx->vision_model.hparams.image_size;
  2598. return clip_embd_nbytes_by_img(ctx, nx, ny);
  2599. }
  2600. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  2601. clip_image_f32 img;
  2602. img.nx = img_w;
  2603. img.ny = img_h;
  2604. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2605. }
  2606. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  2607. return ctx->vision_model.hparams.image_size;
  2608. }
  2609. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  2610. return ctx->vision_model.hparams.patch_size;
  2611. }
  2612. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  2613. return ctx->vision_model.hparams.n_embd;
  2614. }
  2615. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  2616. return ctx->vision_model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  2617. }
  2618. const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
  2619. if (ctx->vision_model.hparams.image_grid_pinpoints.size()) {
  2620. return &ctx->vision_model.hparams.image_grid_pinpoints.front();
  2621. }
  2622. return nullptr;
  2623. }
  2624. size_t get_clip_image_grid_size(const struct clip_ctx * ctx) {
  2625. return ctx->vision_model.hparams.image_grid_pinpoints.size();
  2626. }
  2627. // deprecated
  2628. int clip_n_patches(const struct clip_ctx * ctx) {
  2629. clip_image_f32 img;
  2630. img.nx = ctx->vision_model.hparams.image_size;
  2631. img.ny = ctx->vision_model.hparams.image_size;
  2632. return clip_n_output_tokens(ctx, &img);
  2633. }
  2634. // deprecated
  2635. int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2636. return clip_n_output_tokens(ctx, img);
  2637. }
  2638. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2639. const auto & params = ctx->vision_model.hparams;
  2640. const int n_total = clip_n_output_tokens(ctx, img);
  2641. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2642. return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0);
  2643. }
  2644. return n_total;
  2645. }
  2646. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2647. const auto & params = ctx->vision_model.hparams;
  2648. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2649. return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0);
  2650. }
  2651. return 1;
  2652. }
  2653. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2654. const auto & params = ctx->vision_model.hparams;
  2655. int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
  2656. if (ctx->proj_type == PROJECTOR_TYPE_LDP
  2657. || ctx->proj_type == PROJECTOR_TYPE_LDPV2
  2658. || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  2659. n_patches /= 4;
  2660. if (ctx->vision_model.mm_glm_tok_boi) {
  2661. n_patches += 2; // for BOI and EOI token embeddings
  2662. }
  2663. } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  2664. if (ctx->minicpmv_version == 2) {
  2665. n_patches = 96;
  2666. }
  2667. else if (ctx->minicpmv_version == 3) {
  2668. n_patches = 64;
  2669. }
  2670. else if (ctx->minicpmv_version == 4) {
  2671. n_patches = 64;
  2672. }
  2673. else {
  2674. GGML_ABORT("Unknown minicpmv version");
  2675. }
  2676. } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2677. int patch_size = params.patch_size * 2;
  2678. int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
  2679. int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
  2680. n_patches = x_patch * y_patch;
  2681. } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  2682. int n_per_side = params.image_size / params.patch_size;
  2683. int n_per_side_2d_pool = n_per_side / params.proj_scale_factor;
  2684. n_patches = n_per_side_2d_pool * n_per_side_2d_pool;
  2685. } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3 || ctx->proj_type == PROJECTOR_TYPE_INTERNVL) {
  2686. // both W and H are divided by proj_scale_factor
  2687. n_patches /= (params.proj_scale_factor * params.proj_scale_factor);
  2688. } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
  2689. int n_merge = params.spatial_merge_size;
  2690. int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1);
  2691. int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1);
  2692. n_patches = n_patches_y*n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  2693. }
  2694. return n_patches;
  2695. }
  2696. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  2697. assert(embed_dim % 2 == 0);
  2698. int H = pos.size();
  2699. int W = pos[0].size();
  2700. std::vector<float> omega(embed_dim / 2);
  2701. for (int i = 0; i < embed_dim / 2; ++i) {
  2702. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  2703. }
  2704. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2705. for (int h = 0; h < H; ++h) {
  2706. for (int w = 0; w < W; ++w) {
  2707. for (int d = 0; d < embed_dim / 2; ++d) {
  2708. float out_value = pos[h][w] * omega[d];
  2709. emb[h][w][d] = sin(out_value);
  2710. emb[h][w][d + embed_dim / 2] = cos(out_value);
  2711. }
  2712. }
  2713. }
  2714. return emb;
  2715. }
  2716. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  2717. assert(embed_dim % 2 == 0);
  2718. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  2719. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  2720. int H = emb_h.size();
  2721. int W = emb_h[0].size();
  2722. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2723. for (int h = 0; h < H; ++h) {
  2724. for (int w = 0; w < W; ++w) {
  2725. for (int d = 0; d < embed_dim / 2; ++d) {
  2726. emb[h][w][d] = emb_h[h][w][d];
  2727. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  2728. }
  2729. }
  2730. }
  2731. return emb;
  2732. }
  2733. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  2734. int grid_h_size = image_size.first;
  2735. int grid_w_size = image_size.second;
  2736. std::vector<float> grid_h(grid_h_size);
  2737. std::vector<float> grid_w(grid_w_size);
  2738. for (int i = 0; i < grid_h_size; ++i) {
  2739. grid_h[i] = static_cast<float>(i);
  2740. }
  2741. for (int i = 0; i < grid_w_size; ++i) {
  2742. grid_w[i] = static_cast<float>(i);
  2743. }
  2744. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  2745. for (int h = 0; h < grid_h_size; ++h) {
  2746. for (int w = 0; w < grid_w_size; ++w) {
  2747. grid[h][w] = grid_w[w];
  2748. }
  2749. }
  2750. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  2751. for (int h = 0; h < grid_h_size; ++h) {
  2752. for (int w = 0; w < grid_w_size; ++w) {
  2753. grid_2d[0][h][w] = grid_h[h];
  2754. grid_2d[1][h][w] = grid_w[w];
  2755. }
  2756. }
  2757. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  2758. int H = image_size.first;
  2759. int W = image_size.second;
  2760. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  2761. for (int h = 0; h < H; ++h) {
  2762. for (int w = 0; w < W; ++w) {
  2763. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  2764. }
  2765. }
  2766. return pos_embed_2d;
  2767. }
  2768. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  2769. clip_image_f32_batch imgs;
  2770. clip_image_f32_ptr img_copy(clip_image_f32_init());
  2771. *img_copy = *img;
  2772. imgs.entries.push_back(std::move(img_copy));
  2773. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  2774. }
  2775. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  2776. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  2777. int batch_size = imgs.entries.size();
  2778. // TODO @ngxson : implement batch size > 1 as a loop
  2779. // we don't need true batching support because the cgraph will gonna be big anyway
  2780. if (batch_size != 1) {
  2781. return false; // only support batch size of 1
  2782. }
  2783. // build the inference graph
  2784. ggml_backend_sched_reset(ctx->sched.get());
  2785. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  2786. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  2787. // set inputs
  2788. const auto & model = ctx->vision_model;
  2789. const auto & hparams = model.hparams;
  2790. const int image_size_width = imgs.entries[0]->nx;
  2791. const int image_size_height = imgs.entries[0]->ny;
  2792. const int patch_size = hparams.patch_size;
  2793. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  2794. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  2795. const int pos_w = ctx->load_image_size.width / patch_size;
  2796. const int pos_h = ctx->load_image_size.height / patch_size;
  2797. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  2798. auto get_inp_tensor = [&gf](const char * name) {
  2799. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  2800. if (inp == nullptr) {
  2801. GGML_ABORT("Failed to get tensor %s", name);
  2802. }
  2803. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  2804. GGML_ABORT("Tensor %s is not an input tensor", name);
  2805. }
  2806. return inp;
  2807. };
  2808. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  2809. ggml_tensor * cur = get_inp_tensor(name);
  2810. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  2811. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2812. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2813. };
  2814. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  2815. ggml_tensor * cur = get_inp_tensor(name);
  2816. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  2817. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2818. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2819. };
  2820. // set input pixel values
  2821. {
  2822. size_t nelem = 0;
  2823. for (const auto & img : imgs.entries) {
  2824. nelem += img->nx * img->ny * 3;
  2825. }
  2826. std::vector<float> inp_raw(nelem);
  2827. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  2828. //
  2829. // ┌──W──┐
  2830. // │ H │ channel = R
  2831. // ├─────┤ │
  2832. // │ H │ channel = G
  2833. // ├─────┤ │
  2834. // │ H │ channel = B
  2835. // └─────┘ │
  2836. // ──────┘ x B
  2837. for (size_t i = 0; i < imgs.entries.size(); i++) {
  2838. const int nx = imgs.entries[i]->nx;
  2839. const int ny = imgs.entries[i]->ny;
  2840. const int n = nx * ny;
  2841. for (int b = 0; b < batch_size; b++) {
  2842. float * batch_entry = inp_raw.data() + b * (3*n);
  2843. for (int y = 0; y < ny; y++) {
  2844. for (int x = 0; x < nx; x++) {
  2845. size_t base_src = 3*(y * nx + x); // idx of the first channel
  2846. size_t base_dst = y * nx + x; // idx of the first channel
  2847. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  2848. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  2849. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  2850. }
  2851. }
  2852. }
  2853. }
  2854. set_input_f32("inp_raw", inp_raw);
  2855. }
  2856. // set input per projector
  2857. switch (ctx->proj_type) {
  2858. case PROJECTOR_TYPE_MINICPMV:
  2859. {
  2860. // inspired from siglip:
  2861. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  2862. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  2863. std::vector<int32_t> positions(pos_h * pos_w);
  2864. int bucket_coords_h[1024];
  2865. int bucket_coords_w[1024];
  2866. for (int i = 0; i < pos_h; i++){
  2867. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  2868. }
  2869. for (int i = 0; i < pos_w; i++){
  2870. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  2871. }
  2872. for (int i = 0, id = 0; i < pos_h; i++){
  2873. for (int j = 0; j < pos_w; j++){
  2874. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  2875. }
  2876. }
  2877. set_input_i32("positions", positions);
  2878. // inspired from resampler of Qwen-VL:
  2879. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  2880. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  2881. int embed_dim = clip_n_mmproj_embd(ctx);
  2882. // TODO @ngxson : this is very inefficient, can we do this using ggml_sin and ggml_cos?
  2883. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  2884. std::vector<float> pos_embed(embed_dim * pos_w * pos_h);
  2885. for(int i = 0; i < pos_w * pos_h; ++i){
  2886. for(int j = 0; j < embed_dim; ++j){
  2887. pos_embed[i * embed_dim + j] = pos_embed_t[i][j];
  2888. }
  2889. }
  2890. set_input_f32("pos_embed", pos_embed);
  2891. } break;
  2892. case PROJECTOR_TYPE_QWEN2VL:
  2893. {
  2894. const int merge_ratio = 2;
  2895. const int pw = image_size_width / patch_size;
  2896. const int ph = image_size_height / patch_size;
  2897. std::vector<int> positions(n_pos * 4);
  2898. int ptr = 0;
  2899. for (int y = 0; y < ph; y += merge_ratio) {
  2900. for (int x = 0; x < pw; x += merge_ratio) {
  2901. for (int dy = 0; dy < 2; dy++) {
  2902. for (int dx = 0; dx < 2; dx++) {
  2903. positions[ ptr] = y + dy;
  2904. positions[ num_patches + ptr] = x + dx;
  2905. positions[2 * num_patches + ptr] = y + dy;
  2906. positions[3 * num_patches + ptr] = x + dx;
  2907. ptr++;
  2908. }
  2909. }
  2910. }
  2911. }
  2912. set_input_i32("positions", positions);
  2913. } break;
  2914. case PROJECTOR_TYPE_QWEN25VL:
  2915. {
  2916. // pw * ph = number of tokens output by ViT after apply patch merger
  2917. // ipw * ipw = number of vision token been processed inside ViT
  2918. const int merge_ratio = 2;
  2919. const int pw = image_size_width / patch_size / merge_ratio;
  2920. const int ph = image_size_height / patch_size / merge_ratio;
  2921. const int ipw = image_size_width / patch_size;
  2922. const int iph = image_size_height / patch_size;
  2923. std::vector<int> idx (ph * pw);
  2924. std::vector<int> inv_idx(ph * pw);
  2925. if (use_window_attn) {
  2926. const int attn_window_size = 112;
  2927. const int grid_window = attn_window_size / patch_size / merge_ratio;
  2928. int dst = 0;
  2929. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  2930. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  2931. int mask_row = 0;
  2932. for (int y = 0; y < ph; y += grid_window) {
  2933. for (int x = 0; x < pw; x += grid_window) {
  2934. const int win_h = std::min(grid_window, ph - y);
  2935. const int win_w = std::min(grid_window, pw - x);
  2936. const int dst_0 = dst;
  2937. // group all tokens belong to the same window togather (to a continue range)
  2938. for (int dy = 0; dy < win_h; dy++) {
  2939. for (int dx = 0; dx < win_w; dx++) {
  2940. const int src = (y + dy) * pw + (x + dx);
  2941. GGML_ASSERT(src < (int)idx.size());
  2942. GGML_ASSERT(dst < (int)inv_idx.size());
  2943. idx [src] = dst;
  2944. inv_idx[dst] = src;
  2945. dst++;
  2946. }
  2947. }
  2948. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  2949. int row_offset = mask_row * (ipw * iph);
  2950. std::fill(
  2951. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  2952. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  2953. 0.0);
  2954. mask_row++;
  2955. }
  2956. }
  2957. }
  2958. set_input_i32("window_idx", idx);
  2959. set_input_i32("inv_window_idx", inv_idx);
  2960. set_input_f32("window_mask", mask);
  2961. } else {
  2962. for (int i = 0; i < ph * pw; i++) {
  2963. idx[i] = i;
  2964. }
  2965. }
  2966. const int mpow = merge_ratio * merge_ratio;
  2967. std::vector<int> positions(n_pos * 4);
  2968. int ptr = 0;
  2969. for (int y = 0; y < iph; y += merge_ratio) {
  2970. for (int x = 0; x < ipw; x += merge_ratio) {
  2971. for (int dy = 0; dy < 2; dy++) {
  2972. for (int dx = 0; dx < 2; dx++) {
  2973. auto remap = idx[ptr / mpow];
  2974. remap = (remap * mpow) + (ptr % mpow);
  2975. positions[ remap] = y + dy;
  2976. positions[ num_patches + remap] = x + dx;
  2977. positions[2 * num_patches + remap] = y + dy;
  2978. positions[3 * num_patches + remap] = x + dx;
  2979. ptr++;
  2980. }
  2981. }
  2982. }
  2983. }
  2984. set_input_i32("positions", positions);
  2985. } break;
  2986. case PROJECTOR_TYPE_PIXTRAL:
  2987. {
  2988. // set the 2D positions
  2989. int n_patches_per_col = image_size_width / patch_size;
  2990. std::vector<int> pos_data(n_pos);
  2991. // dimension H
  2992. for (int i = 0; i < n_pos; i++) {
  2993. pos_data[i] = i / n_patches_per_col;
  2994. }
  2995. set_input_i32("pos_h", pos_data);
  2996. // dimension W
  2997. for (int i = 0; i < n_pos; i++) {
  2998. pos_data[i] = i % n_patches_per_col;
  2999. }
  3000. set_input_i32("pos_w", pos_data);
  3001. } break;
  3002. case PROJECTOR_TYPE_GLM_EDGE:
  3003. {
  3004. // llava and other models
  3005. std::vector<int32_t> positions(n_pos);
  3006. for (int i = 0; i < n_pos; i++) {
  3007. positions[i] = i;
  3008. }
  3009. set_input_i32("positions", positions);
  3010. } break;
  3011. case PROJECTOR_TYPE_MLP:
  3012. case PROJECTOR_TYPE_MLP_NORM:
  3013. case PROJECTOR_TYPE_LDP:
  3014. case PROJECTOR_TYPE_LDPV2:
  3015. {
  3016. // llava and other models
  3017. std::vector<int32_t> positions(n_pos);
  3018. for (int i = 0; i < n_pos; i++) {
  3019. positions[i] = i;
  3020. }
  3021. set_input_i32("positions", positions);
  3022. // The patches vector is used to get rows to index into the embeds with;
  3023. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  3024. // when retrieving the rows.
  3025. int patch_offset = model.class_embedding ? 1 : 0;
  3026. std::vector<int32_t> patches(num_patches);
  3027. for (int i = 0; i < num_patches; i++) {
  3028. patches[i] = i + patch_offset;
  3029. }
  3030. set_input_i32("patches", patches);
  3031. } break;
  3032. case PROJECTOR_TYPE_GEMMA3:
  3033. case PROJECTOR_TYPE_IDEFICS3:
  3034. case PROJECTOR_TYPE_INTERNVL:
  3035. {
  3036. // do nothing
  3037. } break;
  3038. default:
  3039. GGML_ABORT("Unknown projector type");
  3040. }
  3041. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  3042. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  3043. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  3044. if (reg) {
  3045. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  3046. if (ggml_backend_set_n_threads_fn) {
  3047. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  3048. }
  3049. }
  3050. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  3051. if (status != GGML_STATUS_SUCCESS) {
  3052. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  3053. return false;
  3054. }
  3055. // the last node is the embedding tensor
  3056. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  3057. // sanity check (only support batch size of 1 for now)
  3058. const int n_tokens_out = embeddings->ne[1];
  3059. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  3060. if (n_tokens_out != expected_n_tokens_out) {
  3061. LOG_ERR("%s: expected %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  3062. GGML_ABORT("Invalid number of output tokens");
  3063. }
  3064. // copy the embeddings to the location passed by the user
  3065. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  3066. return true;
  3067. }
  3068. bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
  3069. assert(itype < GGML_TYPE_COUNT);
  3070. ggml_type type = static_cast<ggml_type>(itype);
  3071. auto * ctx_clip = clip_init(fname_inp, clip_context_params{
  3072. /* use_gpu */ false,
  3073. /* verbosity */ GGML_LOG_LEVEL_ERROR,
  3074. });
  3075. const auto & ctx_src = ctx_clip->ctx_gguf.get();
  3076. const auto & ctx_data = ctx_clip->ctx_data.get();
  3077. auto * ctx_out = gguf_init_empty();
  3078. gguf_set_kv(ctx_out, ctx_src);
  3079. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  3080. gguf_set_val_u32(ctx_out, "general.file_type", itype);
  3081. auto fout = std::ofstream(fname_out, std::ios::binary);
  3082. const int n_tensors = gguf_get_n_tensors(ctx_src);
  3083. for (int i = 0; i < n_tensors; ++i) {
  3084. const char * name = gguf_get_tensor_name(ctx_src, i);
  3085. ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
  3086. gguf_add_tensor(ctx_out, cur);
  3087. }
  3088. const size_t meta_size = gguf_get_meta_size(ctx_out);
  3089. for (size_t i = 0; i < meta_size; ++i) {
  3090. fout.put(0);
  3091. }
  3092. // regexes of tensor names to be quantized
  3093. const std::vector<std::string> k_names = {
  3094. ".*weight",
  3095. };
  3096. std::vector<uint8_t> work(512);
  3097. std::vector<float> conv_buf(512);
  3098. size_t total_size_org = 0;
  3099. size_t total_size_new = 0;
  3100. for (int i = 0; i < n_tensors; ++i) {
  3101. const std::string name = gguf_get_tensor_name(ctx_src, i);
  3102. ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
  3103. enum ggml_type new_type;
  3104. void * new_data;
  3105. size_t new_size;
  3106. bool quantize = false;
  3107. for (const auto & s : k_names) {
  3108. if (std::regex_match(name, std::regex(s))) {
  3109. quantize = true;
  3110. break;
  3111. }
  3112. }
  3113. // quantize only 2D tensors and bigger than block size
  3114. quantize &= (ggml_n_dims(cur) == 2) && cur->ne[0] > ggml_blck_size(type);
  3115. if (quantize) {
  3116. new_type = type;
  3117. if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
  3118. new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
  3119. // LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
  3120. }
  3121. const size_t n_elms = ggml_nelements(cur);
  3122. float * f32_data;
  3123. switch (cur->type) {
  3124. case GGML_TYPE_F32:
  3125. f32_data = (float *)cur->data;
  3126. break;
  3127. case GGML_TYPE_F16:
  3128. if (conv_buf.size() < n_elms) {
  3129. conv_buf.resize(n_elms);
  3130. }
  3131. for (size_t j = 0; j < n_elms; ++j) {
  3132. conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]);
  3133. }
  3134. f32_data = (float *)conv_buf.data();
  3135. break;
  3136. default:
  3137. LOG_ERR("%s: Please use an input file in f32 or f16\n", __func__);
  3138. gguf_free(ctx_out);
  3139. return false;
  3140. }
  3141. if (work.size() < n_elms * 4) {
  3142. work.resize(n_elms * 4);
  3143. }
  3144. new_data = work.data();
  3145. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
  3146. } else {
  3147. new_type = cur->type;
  3148. new_data = cur->data;
  3149. new_size = ggml_nbytes(cur);
  3150. }
  3151. const size_t orig_size = ggml_nbytes(cur);
  3152. total_size_org += orig_size;
  3153. total_size_new += new_size;
  3154. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  3155. GGML_ASSERT(gguf_get_tensor_size(ctx_out, gguf_find_tensor(ctx_out, name.c_str())) == new_size);
  3156. gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
  3157. fout.write((const char *)new_data, new_size);
  3158. size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
  3159. for (size_t j = 0; j < pad; ++j) {
  3160. fout.put(0);
  3161. }
  3162. LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
  3163. orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  3164. }
  3165. // go back to beginning of file and write the updated metadata
  3166. fout.seekp(0, std::ios::beg);
  3167. std::vector<uint8_t> meta(meta_size);
  3168. gguf_get_meta_data(ctx_out, meta.data());
  3169. fout.write((const char *)meta.data(), meta_size);
  3170. fout.close();
  3171. clip_free(ctx_clip);
  3172. gguf_free(ctx_out);
  3173. {
  3174. LOG_INF("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
  3175. LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
  3176. }
  3177. return true;
  3178. }
  3179. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  3180. switch (ctx->proj_type) {
  3181. case PROJECTOR_TYPE_LDP:
  3182. return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
  3183. case PROJECTOR_TYPE_LDPV2:
  3184. return ctx->vision_model.mm_model_peg_0_b->ne[0];
  3185. case PROJECTOR_TYPE_MLP:
  3186. case PROJECTOR_TYPE_PIXTRAL:
  3187. return ctx->vision_model.mm_2_w->ne[1];
  3188. case PROJECTOR_TYPE_MLP_NORM:
  3189. return ctx->vision_model.mm_3_b->ne[0];
  3190. case PROJECTOR_TYPE_MINICPMV:
  3191. if (ctx->minicpmv_version == 2) {
  3192. return 4096;
  3193. } else if (ctx->minicpmv_version == 3) {
  3194. return 3584;
  3195. } else if (ctx->minicpmv_version == 4) {
  3196. return 3584;
  3197. }
  3198. GGML_ABORT("Unknown minicpmv version");
  3199. case PROJECTOR_TYPE_GLM_EDGE:
  3200. return ctx->vision_model.mm_model_mlp_3_w->ne[1];
  3201. case PROJECTOR_TYPE_QWEN2VL:
  3202. case PROJECTOR_TYPE_QWEN25VL:
  3203. return ctx->vision_model.mm_1_b->ne[0];
  3204. case PROJECTOR_TYPE_GEMMA3:
  3205. return ctx->vision_model.mm_input_proj_w->ne[0];
  3206. case PROJECTOR_TYPE_IDEFICS3:
  3207. return ctx->vision_model.projection->ne[1];
  3208. case PROJECTOR_TYPE_INTERNVL:
  3209. return ctx->vision_model.mm_3_w->ne[1];
  3210. default:
  3211. GGML_ABORT("Unknown projector type");
  3212. }
  3213. }
  3214. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  3215. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  3216. return ctx->minicpmv_version;
  3217. }
  3218. return 0;
  3219. }
  3220. bool clip_is_glm(const struct clip_ctx * ctx) {
  3221. return ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE;
  3222. }
  3223. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  3224. return ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL;
  3225. }
  3226. bool clip_is_llava(const struct clip_ctx * ctx) {
  3227. return ctx->has_llava_projector;
  3228. }
  3229. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  3230. return ctx->proj_type == PROJECTOR_TYPE_GEMMA3;
  3231. }
  3232. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  3233. clip_image_f32 clip_img;
  3234. clip_img.buf.resize(h * w * 3);
  3235. for (int i = 0; i < h*w*3; i++)
  3236. {
  3237. clip_img.buf[i] = img[i];
  3238. }
  3239. clip_img.nx = w;
  3240. clip_img.ny = h;
  3241. clip_image_encode(ctx, n_threads, &clip_img, vec);
  3242. return true;
  3243. }
  3244. //
  3245. // API used internally with mtmd
  3246. //
  3247. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  3248. return ctx->proj_type;
  3249. }