clip.cpp 174 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-cpu.h"
  10. #include "ggml-alloc.h"
  11. #include "ggml-backend.h"
  12. #include "gguf.h"
  13. #include <cassert>
  14. #include <cmath>
  15. #include <cstdlib>
  16. #include <cstring>
  17. #include <fstream>
  18. #include <map>
  19. #include <regex>
  20. #include <stdexcept>
  21. #include <unordered_set>
  22. #include <vector>
  23. #include <sstream>
  24. #include <cinttypes>
  25. #include <limits>
  26. #include <array>
  27. #include <numeric>
  28. #include <functional>
  29. struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
  30. enum ffn_op_type {
  31. FFN_GELU,
  32. FFN_GELU_ERF,
  33. FFN_SILU,
  34. FFN_GELU_QUICK,
  35. };
  36. enum norm_type {
  37. NORM_TYPE_NORMAL,
  38. NORM_TYPE_RMS,
  39. };
  40. //#define CLIP_DEBUG_FUNCTIONS
  41. #ifdef CLIP_DEBUG_FUNCTIONS
  42. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  43. std::ofstream file(filename, std::ios::binary);
  44. if (!file.is_open()) {
  45. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  46. return;
  47. }
  48. // PPM header: P6 format, width, height, and max color value
  49. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  50. // Write pixel data
  51. for (size_t i = 0; i < img.buf.size(); i += 3) {
  52. // PPM expects binary data in RGB format, which matches our image buffer
  53. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  54. }
  55. file.close();
  56. }
  57. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  58. std::ofstream file(filename, std::ios::binary);
  59. if (!file.is_open()) {
  60. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  61. return;
  62. }
  63. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  64. int bytesPerPixel = 3;
  65. int widthInBytes = img.nx * bytesPerPixel;
  66. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  67. int stride = widthInBytes + paddingAmount;
  68. // Bitmap file header
  69. unsigned char fileHeader[14] = {
  70. 'B','M', // Signature
  71. 0,0,0,0, // Image file size in bytes
  72. 0,0,0,0, // Reserved
  73. 54,0,0,0 // Start of pixel array
  74. };
  75. // Total file size
  76. fileSize = 54 + (stride * img.ny);
  77. fileHeader[2] = (unsigned char)(fileSize);
  78. fileHeader[3] = (unsigned char)(fileSize >> 8);
  79. fileHeader[4] = (unsigned char)(fileSize >> 16);
  80. fileHeader[5] = (unsigned char)(fileSize >> 24);
  81. // Bitmap information header (BITMAPINFOHEADER)
  82. unsigned char infoHeader[40] = {
  83. 40,0,0,0, // Size of this header (40 bytes)
  84. 0,0,0,0, // Image width
  85. 0,0,0,0, // Image height
  86. 1,0, // Number of color planes
  87. 24,0, // Bits per pixel
  88. 0,0,0,0, // No compression
  89. 0,0,0,0, // Image size (can be 0 for no compression)
  90. 0,0,0,0, // X pixels per meter (not specified)
  91. 0,0,0,0, // Y pixels per meter (not specified)
  92. 0,0,0,0, // Total colors (color table not used)
  93. 0,0,0,0 // Important colors (all are important)
  94. };
  95. // Width and height in the information header
  96. infoHeader[4] = (unsigned char)(img.nx);
  97. infoHeader[5] = (unsigned char)(img.nx >> 8);
  98. infoHeader[6] = (unsigned char)(img.nx >> 16);
  99. infoHeader[7] = (unsigned char)(img.nx >> 24);
  100. infoHeader[8] = (unsigned char)(img.ny);
  101. infoHeader[9] = (unsigned char)(img.ny >> 8);
  102. infoHeader[10] = (unsigned char)(img.ny >> 16);
  103. infoHeader[11] = (unsigned char)(img.ny >> 24);
  104. // Write file headers
  105. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  106. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  107. // Pixel data
  108. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  109. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  110. for (int x = 0; x < img.nx; ++x) {
  111. // Each pixel
  112. size_t pixelIndex = (y * img.nx + x) * 3;
  113. unsigned char pixel[3] = {
  114. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  115. img.buf[pixelIndex + 1],
  116. img.buf[pixelIndex]
  117. };
  118. file.write(reinterpret_cast<char*>(pixel), 3);
  119. }
  120. // Write padding for the row
  121. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  122. }
  123. file.close();
  124. }
  125. // debug function to convert f32 to u8
  126. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  127. dst.nx = src.nx;
  128. dst.ny = src.ny;
  129. dst.buf.resize(3 * src.nx * src.ny);
  130. for (size_t i = 0; i < src.buf.size(); ++i) {
  131. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  132. }
  133. }
  134. #endif
  135. //
  136. // clip layers
  137. //
  138. enum patch_merge_type {
  139. PATCH_MERGE_FLAT,
  140. PATCH_MERGE_SPATIAL_UNPAD,
  141. };
  142. struct clip_hparams {
  143. int32_t image_size;
  144. int32_t patch_size;
  145. int32_t n_embd;
  146. int32_t n_ff;
  147. int32_t projection_dim;
  148. int32_t n_head;
  149. int32_t n_layer;
  150. int32_t proj_scale_factor = 0; // idefics3
  151. float image_mean[3];
  152. float image_std[3];
  153. // for models using dynamic image size, we need to have a smaller image size to warmup
  154. // otherwise, user will get OOM everytime they load the model
  155. int32_t warmup_image_size = 0;
  156. int32_t warmup_audio_size = 3000;
  157. ffn_op_type ffn_op = FFN_GELU;
  158. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  159. float eps = 1e-6;
  160. float rope_theta = 0.0;
  161. std::vector<clip_image_size> image_res_candidates; // for llava-uhd style models
  162. int32_t image_crop_resolution;
  163. std::unordered_set<int32_t> vision_feature_layer;
  164. int32_t attn_window_size = 0;
  165. int32_t n_wa_pattern = 0;
  166. int32_t spatial_merge_size = 0;
  167. // audio
  168. int32_t n_mel_bins = 0; // whisper preprocessor
  169. int32_t proj_stack_factor = 0; // ultravox
  170. // legacy
  171. bool has_llava_projector = false;
  172. int minicpmv_version = 0;
  173. };
  174. struct clip_layer {
  175. // attention
  176. ggml_tensor * k_w = nullptr;
  177. ggml_tensor * k_b = nullptr;
  178. ggml_tensor * q_w = nullptr;
  179. ggml_tensor * q_b = nullptr;
  180. ggml_tensor * v_w = nullptr;
  181. ggml_tensor * v_b = nullptr;
  182. ggml_tensor * o_w = nullptr;
  183. ggml_tensor * o_b = nullptr;
  184. ggml_tensor * k_norm = nullptr;
  185. ggml_tensor * q_norm = nullptr;
  186. // layernorm 1
  187. ggml_tensor * ln_1_w = nullptr;
  188. ggml_tensor * ln_1_b = nullptr;
  189. ggml_tensor * ff_up_w = nullptr;
  190. ggml_tensor * ff_up_b = nullptr;
  191. ggml_tensor * ff_gate_w = nullptr;
  192. ggml_tensor * ff_gate_b = nullptr;
  193. ggml_tensor * ff_down_w = nullptr;
  194. ggml_tensor * ff_down_b = nullptr;
  195. // layernorm 2
  196. ggml_tensor * ln_2_w = nullptr;
  197. ggml_tensor * ln_2_b = nullptr;
  198. // layer scale (no bias)
  199. ggml_tensor * ls_1_w = nullptr;
  200. ggml_tensor * ls_2_w = nullptr;
  201. };
  202. struct clip_model {
  203. clip_modality modality = CLIP_MODALITY_VISION;
  204. projector_type proj_type = PROJECTOR_TYPE_MLP;
  205. clip_hparams hparams;
  206. // embeddings
  207. ggml_tensor * class_embedding = nullptr;
  208. ggml_tensor * patch_embeddings_0 = nullptr;
  209. ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  210. ggml_tensor * patch_bias = nullptr;
  211. ggml_tensor * position_embeddings = nullptr;
  212. ggml_tensor * pre_ln_w = nullptr;
  213. ggml_tensor * pre_ln_b = nullptr;
  214. std::vector<clip_layer> layers;
  215. ggml_tensor * post_ln_w;
  216. ggml_tensor * post_ln_b;
  217. ggml_tensor * projection; // TODO: rename it to fc (fully connected layer)
  218. ggml_tensor * mm_fc_w;
  219. ggml_tensor * mm_fc_b;
  220. // LLaVA projection
  221. ggml_tensor * mm_input_norm_w = nullptr;
  222. ggml_tensor * mm_0_w = nullptr;
  223. ggml_tensor * mm_0_b = nullptr;
  224. ggml_tensor * mm_2_w = nullptr;
  225. ggml_tensor * mm_2_b = nullptr;
  226. ggml_tensor * image_newline = nullptr;
  227. // Yi type models with mlp+normalization projection
  228. ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  229. ggml_tensor * mm_1_b = nullptr;
  230. ggml_tensor * mm_3_w = nullptr;
  231. ggml_tensor * mm_3_b = nullptr;
  232. ggml_tensor * mm_4_w = nullptr;
  233. ggml_tensor * mm_4_b = nullptr;
  234. // GLMV-Edge projection
  235. ggml_tensor * mm_model_adapter_conv_w = nullptr;
  236. ggml_tensor * mm_model_adapter_conv_b = nullptr;
  237. ggml_tensor * mm_glm_tok_boi = nullptr;
  238. ggml_tensor * mm_glm_tok_eoi = nullptr;
  239. // MobileVLM projection
  240. ggml_tensor * mm_model_mlp_1_w = nullptr;
  241. ggml_tensor * mm_model_mlp_1_b = nullptr;
  242. ggml_tensor * mm_model_mlp_3_w = nullptr;
  243. ggml_tensor * mm_model_mlp_3_b = nullptr;
  244. ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  245. ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  246. ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  247. ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  248. ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  249. ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  250. ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  251. ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  252. ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  253. ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  254. ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  255. ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  256. ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  257. ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  258. ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  259. ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  260. ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  261. ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  262. ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  263. ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  264. // MobileVLM_V2 projection
  265. ggml_tensor * mm_model_mlp_0_w = nullptr;
  266. ggml_tensor * mm_model_mlp_0_b = nullptr;
  267. ggml_tensor * mm_model_mlp_2_w = nullptr;
  268. ggml_tensor * mm_model_mlp_2_b = nullptr;
  269. ggml_tensor * mm_model_peg_0_w = nullptr;
  270. ggml_tensor * mm_model_peg_0_b = nullptr;
  271. // MINICPMV projection
  272. ggml_tensor * mm_model_pos_embed_k = nullptr;
  273. ggml_tensor * mm_model_query = nullptr;
  274. ggml_tensor * mm_model_proj = nullptr;
  275. ggml_tensor * mm_model_kv_proj = nullptr;
  276. ggml_tensor * mm_model_attn_q_w = nullptr;
  277. ggml_tensor * mm_model_attn_q_b = nullptr;
  278. ggml_tensor * mm_model_attn_k_w = nullptr;
  279. ggml_tensor * mm_model_attn_k_b = nullptr;
  280. ggml_tensor * mm_model_attn_v_w = nullptr;
  281. ggml_tensor * mm_model_attn_v_b = nullptr;
  282. ggml_tensor * mm_model_attn_o_w = nullptr;
  283. ggml_tensor * mm_model_attn_o_b = nullptr;
  284. ggml_tensor * mm_model_ln_q_w = nullptr;
  285. ggml_tensor * mm_model_ln_q_b = nullptr;
  286. ggml_tensor * mm_model_ln_kv_w = nullptr;
  287. ggml_tensor * mm_model_ln_kv_b = nullptr;
  288. ggml_tensor * mm_model_ln_post_w = nullptr;
  289. ggml_tensor * mm_model_ln_post_b = nullptr;
  290. // gemma3
  291. ggml_tensor * mm_input_proj_w = nullptr;
  292. ggml_tensor * mm_soft_emb_norm_w = nullptr;
  293. // pixtral
  294. ggml_tensor * token_embd_img_break = nullptr;
  295. ggml_tensor * mm_patch_merger_w = nullptr;
  296. // ultravox / whisper encoder
  297. ggml_tensor * conv1d_1_w = nullptr;
  298. ggml_tensor * conv1d_1_b = nullptr;
  299. ggml_tensor * conv1d_2_w = nullptr;
  300. ggml_tensor * conv1d_2_b = nullptr;
  301. ggml_tensor * mm_norm_pre_w = nullptr;
  302. ggml_tensor * mm_norm_mid_w = nullptr;
  303. };
  304. struct clip_ctx {
  305. clip_model model;
  306. gguf_context_ptr ctx_gguf;
  307. ggml_context_ptr ctx_data;
  308. std::vector<uint8_t> buf_compute_meta;
  309. std::vector<ggml_backend_t> backend_ptrs;
  310. std::vector<ggml_backend_buffer_type_t> backend_buft;
  311. ggml_backend_t backend;
  312. ggml_backend_t backend_cpu;
  313. ggml_backend_buffer_ptr buf;
  314. int max_nodes = 8192;
  315. ggml_backend_sched_ptr sched;
  316. // for debugging
  317. bool debug_graph = false;
  318. std::vector<ggml_tensor *> debug_print_tensors;
  319. clip_ctx(clip_context_params & ctx_params) {
  320. debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
  321. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  322. if (!backend_cpu) {
  323. throw std::runtime_error("failed to initialize CPU backend");
  324. }
  325. backend = ctx_params.use_gpu
  326. ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
  327. : nullptr;
  328. if (backend) {
  329. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  330. backend_ptrs.push_back(backend);
  331. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  332. } else {
  333. backend = backend_cpu;
  334. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  335. }
  336. backend_ptrs.push_back(backend_cpu);
  337. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  338. sched.reset(
  339. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  340. );
  341. }
  342. ~clip_ctx() {
  343. ggml_backend_free(backend);
  344. if (backend != backend_cpu) {
  345. ggml_backend_free(backend_cpu);
  346. }
  347. }
  348. // this function is added so that we don't change too much of the existing code
  349. projector_type proj_type() const {
  350. return model.proj_type;
  351. }
  352. };
  353. struct clip_graph {
  354. clip_ctx * ctx;
  355. const clip_model & model;
  356. const clip_hparams & hparams;
  357. // we only support single image per batch
  358. const clip_image_f32 & img;
  359. const int patch_size;
  360. const int n_patches_x;
  361. const int n_patches_y;
  362. const int n_patches;
  363. const int n_embd;
  364. const int n_head;
  365. const int d_head;
  366. const int n_layer;
  367. const float eps;
  368. const float kq_scale;
  369. ggml_context_ptr ctx0_ptr;
  370. ggml_context * ctx0;
  371. ggml_cgraph * gf;
  372. clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  373. ctx(ctx),
  374. model(ctx->model),
  375. hparams(model.hparams),
  376. img(img),
  377. patch_size(hparams.patch_size),
  378. n_patches_x(img.nx / patch_size),
  379. n_patches_y(img.ny / patch_size),
  380. n_patches(n_patches_x * n_patches_y),
  381. n_embd(hparams.n_embd),
  382. n_head(hparams.n_head),
  383. d_head(n_embd / n_head),
  384. n_layer(hparams.n_layer),
  385. eps(hparams.eps),
  386. kq_scale(1.0f / sqrtf((float)d_head)) {
  387. struct ggml_init_params params = {
  388. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  389. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  390. /*.no_alloc =*/ true,
  391. };
  392. ctx0_ptr.reset(ggml_init(params));
  393. ctx0 = ctx0_ptr.get();
  394. gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
  395. }
  396. ggml_cgraph * build_siglip() {
  397. ggml_tensor * inp = build_inp();
  398. ggml_tensor * cur = build_vit(
  399. inp, n_patches,
  400. NORM_TYPE_NORMAL,
  401. hparams.ffn_op,
  402. model.position_embeddings,
  403. nullptr);
  404. if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) {
  405. const int batch_size = 1;
  406. GGML_ASSERT(n_patches_x == n_patches_y);
  407. const int patches_per_image = n_patches_x;
  408. const int kernel_size = hparams.proj_scale_factor;
  409. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  410. cur = ggml_reshape_4d(ctx0, cur, patches_per_image, patches_per_image, n_embd, batch_size);
  411. // doing a pool2d to reduce the number of output tokens
  412. cur = ggml_pool_2d(ctx0, cur, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  413. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0] * cur->ne[0], n_embd, batch_size);
  414. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  415. // apply norm before projection
  416. cur = ggml_rms_norm(ctx0, cur, eps);
  417. cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
  418. // apply projection
  419. cur = ggml_mul_mat(ctx0,
  420. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  421. cur);
  422. } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
  423. // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
  424. const int scale_factor = model.hparams.proj_scale_factor;
  425. const int n_embd = cur->ne[0];
  426. const int seq = cur->ne[1];
  427. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  428. const int height = std::sqrt(seq);
  429. const int width = std::sqrt(seq);
  430. GGML_ASSERT(scale_factor != 0);
  431. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height, bsz);
  432. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  433. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  434. n_embd * scale_factor * scale_factor,
  435. height / scale_factor,
  436. width / scale_factor,
  437. bsz);
  438. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  439. cur = ggml_reshape_3d(ctx0, ggml_cont(ctx0, cur),
  440. n_embd * scale_factor * scale_factor,
  441. seq / (scale_factor * scale_factor),
  442. bsz);
  443. cur = ggml_mul_mat(ctx0, model.projection, cur);
  444. } else {
  445. GGML_ABORT("SigLIP: Unsupported projector type");
  446. }
  447. // build the graph
  448. ggml_build_forward_expand(gf, cur);
  449. return gf;
  450. }
  451. ggml_cgraph * build_pixtral() {
  452. const int n_merge = hparams.spatial_merge_size;
  453. // 2D input positions
  454. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  455. ggml_set_name(pos_h, "pos_h");
  456. ggml_set_input(pos_h);
  457. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  458. ggml_set_name(pos_w, "pos_w");
  459. ggml_set_input(pos_w);
  460. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  461. return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta, true);
  462. };
  463. ggml_tensor * inp = build_inp();
  464. ggml_tensor * cur = build_vit(
  465. inp, n_patches,
  466. NORM_TYPE_RMS,
  467. hparams.ffn_op,
  468. nullptr, // no learned pos embd
  469. add_pos);
  470. // mistral small 3.1 patch merger
  471. // ref: https://github.com/huggingface/transformers/blob/7a3e208892c06a5e278144eaf38c8599a42f53e7/src/transformers/models/mistral3/modeling_mistral3.py#L67
  472. if (model.mm_patch_merger_w) {
  473. GGML_ASSERT(hparams.spatial_merge_size > 0);
  474. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.mm_input_norm_w);
  475. // reshape image tokens to 2D grid
  476. cur = ggml_reshape_3d(ctx0, cur, n_embd, n_patches_x, n_patches_y);
  477. cur = ggml_permute(ctx0, cur, 2, 0, 1, 3); // [x, y, n_embd]
  478. cur = ggml_cont(ctx0, cur);
  479. // torch.nn.functional.unfold is just an im2col under the hood
  480. // we just need a dummy kernel to make it work
  481. ggml_tensor * kernel = ggml_view_3d(ctx0, cur, n_merge, n_merge, cur->ne[2], 0, 0, 0);
  482. cur = ggml_im2col(ctx0, kernel, cur, n_merge, n_merge, 0, 0, 1, 1, true, inp->type);
  483. // project to n_embd
  484. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  485. cur = ggml_mul_mat(ctx0, model.mm_patch_merger_w, cur);
  486. }
  487. // LlavaMultiModalProjector (always using GELU activation)
  488. {
  489. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  490. if (model.mm_1_b) {
  491. cur = ggml_add(ctx0, cur, model.mm_1_b);
  492. }
  493. cur = ggml_gelu(ctx0, cur);
  494. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  495. if (model.mm_2_b) {
  496. cur = ggml_add(ctx0, cur, model.mm_2_b);
  497. }
  498. }
  499. // arrangement of the [IMG_BREAK] token
  500. {
  501. // not efficient, but works
  502. // the trick is to view the embeddings as a 3D tensor with shape [n_embd, n_patches_per_row, n_rows]
  503. // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
  504. // after the concatenation, we have a tensor with shape [n_embd, n_patches_per_row + 1, n_rows]
  505. const int p_y = n_merge > 0 ? n_patches_y / n_merge : n_patches_y;
  506. const int p_x = n_merge > 0 ? n_patches_x / n_merge : n_patches_x;
  507. const int p_total = p_x * p_y;
  508. const int n_embd_text = cur->ne[0];
  509. const int n_tokens_output = p_total + p_y - 1; // one [IMG_BREAK] per row, except the last row
  510. ggml_tensor * tmp = ggml_reshape_3d(ctx0, cur, n_embd_text, p_x, p_y);
  511. ggml_tensor * tok = ggml_new_tensor_3d(ctx0, tmp->type, n_embd_text, 1, p_y);
  512. tok = ggml_scale(ctx0, tok, 0.0); // clear the tensor
  513. tok = ggml_add(ctx0, tok, model.token_embd_img_break);
  514. tmp = ggml_concat(ctx0, tmp, tok, 1);
  515. cur = ggml_view_2d(ctx0, tmp,
  516. n_embd_text, n_tokens_output,
  517. ggml_row_size(tmp->type, n_embd_text), 0);
  518. }
  519. // build the graph
  520. ggml_build_forward_expand(gf, cur);
  521. return gf;
  522. }
  523. // Qwen2VL and Qwen2.5VL use M-RoPE
  524. ggml_cgraph * build_qwen2vl() {
  525. GGML_ASSERT(model.patch_bias == nullptr);
  526. GGML_ASSERT(model.class_embedding == nullptr);
  527. const int batch_size = 1;
  528. const bool use_window_attn = hparams.n_wa_pattern > 0;
  529. const int n_wa_pattern = hparams.n_wa_pattern;
  530. const int n_pos = n_patches;
  531. const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
  532. norm_type norm_t = ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
  533. ? NORM_TYPE_RMS // qwen 2.5 vl
  534. : NORM_TYPE_NORMAL; // qwen 2 vl
  535. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  536. ggml_tensor * inp_raw = build_inp_raw();
  537. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  538. GGML_ASSERT(img.nx % (patch_size * 2) == 0);
  539. GGML_ASSERT(img.ny % (patch_size * 2) == 0);
  540. // second conv dimension
  541. {
  542. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  543. inp = ggml_add(ctx0, inp, inp_1);
  544. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
  545. inp = ggml_reshape_4d(
  546. ctx0, inp,
  547. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  548. inp = ggml_reshape_4d(
  549. ctx0, inp,
  550. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  551. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3));
  552. inp = ggml_reshape_3d(
  553. ctx0, inp,
  554. n_embd, n_patches_x * n_patches_y, batch_size);
  555. }
  556. ggml_tensor * inpL = inp;
  557. ggml_tensor * window_mask = nullptr;
  558. ggml_tensor * window_idx = nullptr;
  559. ggml_tensor * inv_window_idx = nullptr;
  560. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  561. ggml_set_name(positions, "positions");
  562. ggml_set_input(positions);
  563. // pre-layernorm
  564. if (model.pre_ln_w) {
  565. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  566. }
  567. if (use_window_attn) {
  568. // handle window attention inputs
  569. inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  570. ggml_set_name(inv_window_idx, "inv_window_idx");
  571. ggml_set_input(inv_window_idx);
  572. // mask for window attention
  573. window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_pos, n_pos);
  574. ggml_set_name(window_mask, "window_mask");
  575. ggml_set_input(window_mask);
  576. // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  577. GGML_ASSERT(batch_size == 1);
  578. inpL = ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
  579. inpL = ggml_get_rows(ctx0, inpL, inv_window_idx);
  580. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
  581. }
  582. // loop over layers
  583. for (int il = 0; il < n_layer; il++) {
  584. auto & layer = model.layers[il];
  585. const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
  586. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  587. // layernorm1
  588. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  589. cb(cur, "ln1", il);
  590. // self-attention
  591. {
  592. ggml_tensor * Qcur = ggml_add(ctx0,
  593. ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
  594. ggml_tensor * Kcur = ggml_add(ctx0,
  595. ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
  596. ggml_tensor * Vcur = ggml_add(ctx0,
  597. ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
  598. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
  599. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
  600. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
  601. cb(Qcur, "Qcur", il);
  602. cb(Kcur, "Kcur", il);
  603. cb(Vcur, "Vcur", il);
  604. // apply M-RoPE
  605. Qcur = ggml_rope_multi(
  606. ctx0, Qcur, positions, nullptr,
  607. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  608. Kcur = ggml_rope_multi(
  609. ctx0, Kcur, positions, nullptr,
  610. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  611. cb(Qcur, "Qcur_rope", il);
  612. cb(Kcur, "Kcur_rope", il);
  613. ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
  614. cur = build_attn(layer.o_w, layer.o_b,
  615. Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
  616. cb(cur, "attn_out", il);
  617. }
  618. // re-add the layer input, e.g., residual
  619. cur = ggml_add(ctx0, cur, inpL);
  620. inpL = cur; // inpL = residual, cur = hidden_states
  621. cb(cur, "ffn_inp", il);
  622. // layernorm2
  623. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  624. cb(cur, "ffn_inp_normed", il);
  625. // ffn
  626. cur = build_ffn(cur,
  627. layer.ff_up_w, layer.ff_up_b,
  628. layer.ff_gate_w, layer.ff_gate_b,
  629. layer.ff_down_w, layer.ff_down_b,
  630. hparams.ffn_op, il);
  631. cb(cur, "ffn_out", il);
  632. // residual 2
  633. cur = ggml_add(ctx0, inpL, cur);
  634. cb(cur, "layer_out", il);
  635. inpL = cur;
  636. }
  637. // post-layernorm
  638. if (model.post_ln_w) {
  639. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
  640. }
  641. // multimodal projection
  642. ggml_tensor * embeddings = inpL;
  643. embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
  644. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  645. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  646. // GELU activation
  647. embeddings = ggml_gelu(ctx0, embeddings);
  648. // Second linear layer
  649. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  650. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  651. if (use_window_attn) {
  652. window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  653. ggml_set_name(window_idx, "window_idx");
  654. ggml_set_input(window_idx);
  655. // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  656. GGML_ASSERT(batch_size == 1);
  657. embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
  658. embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
  659. embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
  660. }
  661. // build the graph
  662. ggml_build_forward_expand(gf, embeddings);
  663. return gf;
  664. }
  665. ggml_cgraph * build_minicpmv() {
  666. const int batch_size = 1;
  667. GGML_ASSERT(model.class_embedding == nullptr);
  668. const int n_pos = n_patches;
  669. // position embeddings for the projector (not for ViT)
  670. int n_output_dim = clip_n_mmproj_embd(ctx);
  671. ggml_tensor * pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_output_dim, n_pos, batch_size);
  672. ggml_set_name(pos_embed, "pos_embed");
  673. ggml_set_input(pos_embed);
  674. // for selecting learned pos embd, used by ViT
  675. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  676. ggml_set_name(positions, "positions");
  677. ggml_set_input(positions);
  678. ggml_tensor * learned_pos_embd = ggml_get_rows(ctx0, model.position_embeddings, positions);
  679. ggml_tensor * inp = build_inp();
  680. ggml_tensor * embeddings = build_vit(
  681. inp, n_patches,
  682. NORM_TYPE_NORMAL,
  683. hparams.ffn_op,
  684. learned_pos_embd,
  685. nullptr);
  686. // resampler projector (it is just another transformer)
  687. ggml_tensor * q = model.mm_model_query;
  688. ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  689. // norm
  690. q = build_norm(q, model.mm_model_ln_q_w, model.mm_model_ln_q_b, NORM_TYPE_NORMAL, eps, -1);
  691. v = build_norm(v, model.mm_model_ln_kv_w, model.mm_model_ln_kv_b, NORM_TYPE_NORMAL, eps, -1);
  692. // k = v + pos_embed
  693. ggml_tensor * k = ggml_add(ctx0, v, pos_embed);
  694. // attention
  695. {
  696. int n_embd = clip_n_mmproj_embd(ctx);
  697. const int d_head = 128;
  698. int n_head = n_embd/d_head;
  699. int num_query = 96;
  700. if (ctx->model.hparams.minicpmv_version == 2) {
  701. num_query = 96;
  702. } else if (ctx->model.hparams.minicpmv_version == 3) {
  703. num_query = 64;
  704. } else if (ctx->model.hparams.minicpmv_version == 4) {
  705. num_query = 64;
  706. }
  707. ggml_tensor * Q = ggml_add(ctx0,
  708. ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q),
  709. model.mm_model_attn_q_b);
  710. ggml_tensor * K = ggml_add(ctx0,
  711. ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k),
  712. model.mm_model_attn_k_b);
  713. ggml_tensor * V = ggml_add(ctx0,
  714. ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v),
  715. model.mm_model_attn_v_b);
  716. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_query);
  717. K = ggml_reshape_3d(ctx0, K, d_head, n_head, n_pos);
  718. V = ggml_reshape_3d(ctx0, V, d_head, n_head, n_pos);
  719. cb(Q, "resampler_Q", -1);
  720. cb(K, "resampler_K", -1);
  721. cb(V, "resampler_V", -1);
  722. embeddings = build_attn(
  723. model.mm_model_attn_o_w,
  724. model.mm_model_attn_o_b,
  725. Q, K, V, nullptr, kq_scale, -1);
  726. cb(embeddings, "resampler_attn_out", -1);
  727. }
  728. // layernorm
  729. embeddings = build_norm(embeddings, model.mm_model_ln_post_w, model.mm_model_ln_post_b, NORM_TYPE_NORMAL, eps, -1);
  730. // projection
  731. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  732. // build the graph
  733. ggml_build_forward_expand(gf, embeddings);
  734. return gf;
  735. }
  736. ggml_cgraph * build_internvl() {
  737. GGML_ASSERT(model.class_embedding != nullptr);
  738. GGML_ASSERT(model.position_embeddings != nullptr);
  739. const int n_pos = n_patches + 1;
  740. ggml_tensor * inp = build_inp();
  741. // add CLS token
  742. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  743. // The larger models use a different ViT, which uses RMS norm instead of layer norm
  744. // ref: https://github.com/ggml-org/llama.cpp/pull/13443#issuecomment-2869786188
  745. norm_type norm_t = (hparams.n_embd == 3200 && hparams.n_layer == 45)
  746. ? NORM_TYPE_RMS // 6B ViT (Used by InternVL 2.5/3 - 26B, 38B, 78B)
  747. : NORM_TYPE_NORMAL; // 300M ViT (Used by all smaller InternVL models)
  748. ggml_tensor * cur = build_vit(
  749. inp, n_pos,
  750. norm_t,
  751. hparams.ffn_op,
  752. model.position_embeddings,
  753. nullptr);
  754. // remove CLS token
  755. cur = ggml_view_2d(ctx0, cur,
  756. n_embd, n_patches,
  757. ggml_row_size(cur->type, n_embd), 0);
  758. // pixel shuffle
  759. {
  760. const int scale_factor = model.hparams.proj_scale_factor;
  761. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  762. const int height = n_patches_y;
  763. const int width = n_patches_x;
  764. GGML_ASSERT(scale_factor > 0);
  765. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, height / scale_factor, width, bsz);
  766. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  767. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  768. n_embd * scale_factor * scale_factor,
  769. height / scale_factor,
  770. width / scale_factor,
  771. bsz);
  772. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  773. // flatten to 2D
  774. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, cur),
  775. n_embd * scale_factor * scale_factor,
  776. cur->ne[1] * cur->ne[2]);
  777. }
  778. // projector (always using GELU activation)
  779. {
  780. // projector LayerNorm uses pytorch's default eps = 1e-5
  781. // ref: https://huggingface.co/OpenGVLab/InternVL3-8B-Instruct/blob/a34d3e4e129a5856abfd6aa6de79776484caa14e/modeling_internvl_chat.py#L79
  782. cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
  783. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  784. cur = ggml_add(ctx0, cur, model.mm_1_b);
  785. cur = ggml_gelu(ctx0, cur);
  786. cur = ggml_mul_mat(ctx0, model.mm_3_w, cur);
  787. cur = ggml_add(ctx0, cur, model.mm_3_b);
  788. }
  789. // build the graph
  790. ggml_build_forward_expand(gf, cur);
  791. return gf;
  792. }
  793. ggml_cgraph * build_llama4() {
  794. GGML_ASSERT(model.class_embedding != nullptr);
  795. GGML_ASSERT(model.position_embeddings != nullptr);
  796. const int n_pos = n_patches + 1; // +1 for [CLS]
  797. // 2D input positions
  798. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  799. ggml_set_name(pos_h, "pos_h");
  800. ggml_set_input(pos_h);
  801. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  802. ggml_set_name(pos_w, "pos_w");
  803. ggml_set_input(pos_w);
  804. ggml_tensor * inp = build_inp_raw();
  805. // Llama4UnfoldConvolution
  806. {
  807. ggml_tensor * kernel = ggml_reshape_4d(ctx0, model.patch_embeddings_0,
  808. patch_size, patch_size, 3, n_embd);
  809. inp = ggml_im2col(ctx0, kernel, inp, patch_size, patch_size, 0, 0, 1, 1, true, inp->type);
  810. inp = ggml_mul_mat(ctx0, model.patch_embeddings_0, inp);
  811. inp = ggml_reshape_2d(ctx0, inp, n_embd, n_patches);
  812. cb(inp, "patch_conv", -1);
  813. }
  814. // add CLS token
  815. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  816. // build ViT with 2D position embeddings
  817. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  818. // first half is X axis and second half is Y axis
  819. // ref: https://github.com/huggingface/transformers/blob/40a493c7ed4f19f08eadb0639cf26d49bfa5e180/src/transformers/models/llama4/modeling_llama4.py#L1312
  820. // ref: https://github.com/Blaizzy/mlx-vlm/blob/a57156aa87b33cca6e5ee6cfc14dd4ef8f611be6/mlx_vlm/models/llama4/vision.py#L441
  821. return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
  822. };
  823. ggml_tensor * cur = build_vit(
  824. inp, n_pos,
  825. NORM_TYPE_NORMAL,
  826. hparams.ffn_op,
  827. model.position_embeddings,
  828. add_pos);
  829. // remove CLS token
  830. cur = ggml_view_2d(ctx0, cur,
  831. n_embd, n_patches,
  832. ggml_row_size(cur->type, n_embd), 0);
  833. // pixel shuffle
  834. // based on Llama4VisionPixelShuffleMLP
  835. // https://github.com/huggingface/transformers/blob/2932f318a20d9e54cc7aea052e040164d85de7d6/src/transformers/models/llama4/modeling_llama4.py#L1151
  836. {
  837. const int scale_factor = model.hparams.proj_scale_factor;
  838. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  839. GGML_ASSERT(scale_factor > 0);
  840. GGML_ASSERT(n_patches_x == n_patches_y); // llama4 only supports square images
  841. cur = ggml_reshape_4d(ctx0, cur,
  842. n_embd * scale_factor,
  843. n_patches_x / scale_factor,
  844. n_patches_y,
  845. bsz);
  846. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  847. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  848. n_embd * scale_factor * scale_factor,
  849. n_patches_x / scale_factor,
  850. n_patches_y / scale_factor,
  851. bsz);
  852. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  853. // flatten to 2D
  854. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, cur),
  855. n_embd * scale_factor * scale_factor,
  856. n_patches / scale_factor / scale_factor);
  857. cb(cur, "pixel_shuffle", -1);
  858. }
  859. // based on Llama4VisionMLP2 (always uses GELU activation, no bias)
  860. {
  861. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, cur);
  862. cur = ggml_gelu(ctx0, cur);
  863. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, cur);
  864. cur = ggml_gelu(ctx0, cur);
  865. cb(cur, "adapter_mlp", -1);
  866. }
  867. // Llama4MultiModalProjector
  868. cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
  869. cb(cur, "projected", -1);
  870. // build the graph
  871. ggml_build_forward_expand(gf, cur);
  872. return gf;
  873. }
  874. // this graph is used by llava, granite and glm
  875. // due to having embedding_stack (used by granite), we cannot reuse build_vit
  876. ggml_cgraph * build_llava() {
  877. const int batch_size = 1;
  878. const int n_pos = n_patches + (model.class_embedding ? 1 : 0);
  879. GGML_ASSERT(n_patches_x == n_patches_y && "only square images supported");
  880. // Calculate the deepest feature layer based on hparams and projector type
  881. int max_feature_layer = n_layer;
  882. {
  883. // Get the index of the second to last layer; this is the default for models that have a llava projector
  884. int il_last = hparams.n_layer - 1;
  885. int deepest_feature_layer = -1;
  886. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
  887. il_last += 1;
  888. }
  889. // If we set explicit vision feature layers, only go up to the deepest one
  890. // NOTE: only used by granite-vision models for now
  891. for (const auto & feature_layer : hparams.vision_feature_layer) {
  892. if (feature_layer > deepest_feature_layer) {
  893. deepest_feature_layer = feature_layer;
  894. }
  895. }
  896. max_feature_layer = deepest_feature_layer < 0 ? il_last : deepest_feature_layer;
  897. }
  898. ggml_tensor * inp = build_inp();
  899. // concat class_embeddings and patch_embeddings
  900. if (model.class_embedding) {
  901. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  902. }
  903. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  904. ggml_set_name(positions, "positions");
  905. ggml_set_input(positions);
  906. inp = ggml_add(ctx0, inp, ggml_get_rows(ctx0, model.position_embeddings, positions));
  907. ggml_tensor * inpL = inp;
  908. // pre-layernorm
  909. if (model.pre_ln_w) {
  910. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, NORM_TYPE_NORMAL, eps, -1);
  911. cb(inpL, "pre_ln", -1);
  912. }
  913. std::vector<ggml_tensor *> embedding_stack;
  914. const auto & vision_feature_layer = hparams.vision_feature_layer;
  915. // loop over layers
  916. for (int il = 0; il < max_feature_layer; il++) {
  917. auto & layer = model.layers[il];
  918. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  919. // If this is an embedding feature layer, save the output.
  920. // NOTE: 0 index here refers to the input to the encoder.
  921. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  922. embedding_stack.push_back(cur);
  923. }
  924. // layernorm1
  925. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  926. cb(cur, "layer_inp_normed", il);
  927. // self-attention
  928. {
  929. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  930. if (layer.q_b) {
  931. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  932. }
  933. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  934. if (layer.k_b) {
  935. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  936. }
  937. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  938. if (layer.v_b) {
  939. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  940. }
  941. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  942. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  943. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  944. cb(Qcur, "Qcur", il);
  945. cb(Kcur, "Kcur", il);
  946. cb(Vcur, "Vcur", il);
  947. cur = build_attn(layer.o_w, layer.o_b,
  948. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  949. cb(cur, "attn_out", il);
  950. }
  951. // re-add the layer input, e.g., residual
  952. cur = ggml_add(ctx0, cur, inpL);
  953. inpL = cur; // inpL = residual, cur = hidden_states
  954. cb(cur, "ffn_inp", il);
  955. // layernorm2
  956. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  957. cb(cur, "ffn_inp_normed", il);
  958. // ffn
  959. cur = build_ffn(cur,
  960. layer.ff_up_w, layer.ff_up_b,
  961. layer.ff_gate_w, layer.ff_gate_b,
  962. layer.ff_down_w, layer.ff_down_b,
  963. hparams.ffn_op, il);
  964. cb(cur, "ffn_out", il);
  965. // residual 2
  966. cur = ggml_add(ctx0, inpL, cur);
  967. cb(cur, "layer_out", il);
  968. inpL = cur;
  969. }
  970. // post-layernorm
  971. if (model.post_ln_w) {
  972. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, NORM_TYPE_NORMAL, eps, -1);
  973. }
  974. ggml_tensor * embeddings = inpL;
  975. // process vision feature layers (used by granite)
  976. {
  977. // final layer is a vision feature layer
  978. if (vision_feature_layer.find(max_feature_layer) != vision_feature_layer.end()) {
  979. embedding_stack.push_back(inpL);
  980. }
  981. // If feature layers are explicitly set, stack them (if we have multiple)
  982. if (!embedding_stack.empty()) {
  983. embeddings = embedding_stack[0];
  984. for (size_t i = 1; i < embedding_stack.size(); i++) {
  985. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  986. }
  987. }
  988. }
  989. // llava projector (also used by granite)
  990. if (ctx->model.hparams.has_llava_projector) {
  991. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  992. ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  993. ggml_set_name(patches, "patches");
  994. ggml_set_input(patches);
  995. // shape [1, 576, 1024]
  996. // ne is whcn, ne = [1024, 576, 1, 1]
  997. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  998. // print_tensor_info(embeddings, "embeddings");
  999. // llava projector
  1000. if (ctx->proj_type() == PROJECTOR_TYPE_MLP) {
  1001. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1002. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1003. embeddings = ggml_gelu(ctx0, embeddings);
  1004. if (model.mm_2_w) {
  1005. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  1006. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  1007. }
  1008. }
  1009. else if (ctx->proj_type() == PROJECTOR_TYPE_MLP_NORM) {
  1010. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1011. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1012. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  1013. // First LayerNorm
  1014. embeddings = ggml_norm(ctx0, embeddings, eps);
  1015. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  1016. model.mm_1_b);
  1017. // GELU activation
  1018. embeddings = ggml_gelu(ctx0, embeddings);
  1019. // Second linear layer
  1020. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  1021. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  1022. // Second LayerNorm
  1023. embeddings = ggml_norm(ctx0, embeddings, eps);
  1024. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  1025. model.mm_4_b);
  1026. }
  1027. else if (ctx->proj_type() == PROJECTOR_TYPE_LDP) {
  1028. // MobileVLM projector
  1029. int n_patch = 24;
  1030. ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  1031. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  1032. mlp_1 = ggml_gelu(ctx0, mlp_1);
  1033. ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  1034. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  1035. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  1036. // block 1
  1037. ggml_tensor * block_1 = nullptr;
  1038. {
  1039. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  1040. mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
  1041. mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  1042. // stride = 1, padding = 1, bias is nullptr
  1043. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  1044. // layer norm
  1045. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1046. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1047. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1048. block_1 = ggml_norm(ctx0, block_1, eps);
  1049. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  1050. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1051. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1052. // hardswish
  1053. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1054. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1055. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1056. // pointwise conv
  1057. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1058. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  1059. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  1060. block_1 = ggml_relu(ctx0, block_1);
  1061. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  1062. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  1063. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1064. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  1065. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1066. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1067. int w = block_1->ne[0], h = block_1->ne[1];
  1068. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1069. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1070. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1071. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  1072. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1073. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1074. block_1 = ggml_norm(ctx0, block_1, eps);
  1075. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  1076. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1077. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1078. // residual
  1079. block_1 = ggml_add(ctx0, mlp_3, block_1);
  1080. }
  1081. // block_2
  1082. {
  1083. // stride = 2
  1084. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  1085. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1086. // layer norm
  1087. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1088. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1089. block_1 = ggml_norm(ctx0, block_1, eps);
  1090. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  1091. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1092. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1093. // hardswish
  1094. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1095. // not sure the parameters is right for globalAvgPooling
  1096. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1097. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1098. // pointwise conv
  1099. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1100. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  1101. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  1102. block_1 = ggml_relu(ctx0, block_1);
  1103. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  1104. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  1105. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1106. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1107. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1108. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1109. int w = block_1->ne[0], h = block_1->ne[1];
  1110. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1111. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1112. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1113. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  1114. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1115. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1116. block_1 = ggml_norm(ctx0, block_1, eps);
  1117. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  1118. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  1119. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  1120. }
  1121. embeddings = block_1;
  1122. }
  1123. else if (ctx->proj_type() == PROJECTOR_TYPE_LDPV2)
  1124. {
  1125. int n_patch = 24;
  1126. ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1127. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  1128. mlp_0 = ggml_gelu(ctx0, mlp_0);
  1129. ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  1130. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  1131. // mlp_2 ne = [2048, 576, 1, 1]
  1132. // // AVG Pool Layer 2*2, strides = 2
  1133. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
  1134. // mlp_2 ne = [576, 2048, 1, 1]
  1135. mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  1136. // mlp_2 ne [24, 24, 2048, 1]
  1137. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  1138. // weight ne = [3, 3, 2048, 1]
  1139. ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  1140. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  1141. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  1142. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  1143. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  1144. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  1145. embeddings = peg_0;
  1146. }
  1147. else {
  1148. GGML_ABORT("fatal error");
  1149. }
  1150. }
  1151. // glm projector
  1152. else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
  1153. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  1154. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
  1155. embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  1156. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  1157. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  1158. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  1159. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  1160. // GLU
  1161. {
  1162. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1163. embeddings = ggml_norm(ctx0, embeddings, eps);
  1164. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1165. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  1166. ggml_tensor * x = embeddings;
  1167. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  1168. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  1169. embeddings = ggml_swiglu_split(ctx0, embeddings, x);
  1170. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  1171. }
  1172. // arrangement of BOI/EOI token embeddings
  1173. // note: these embeddings are not present in text model, hence we cannot process them as text tokens
  1174. // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
  1175. {
  1176. embeddings = ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI
  1177. embeddings = ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI
  1178. }
  1179. }
  1180. else {
  1181. GGML_ABORT("llava: unknown projector type");
  1182. }
  1183. // build the graph
  1184. ggml_build_forward_expand(gf, embeddings);
  1185. return gf;
  1186. }
  1187. // whisper encoder with custom projector
  1188. ggml_cgraph * build_whisper_enc() {
  1189. const int n_frames = img.nx;
  1190. const int n_pos = n_frames / 2;
  1191. GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
  1192. ggml_tensor * inp = build_inp_raw(1);
  1193. // conv1d block
  1194. {
  1195. // convolution + gelu
  1196. ggml_tensor * cur = ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
  1197. cur = ggml_add(ctx0, cur, model.conv1d_1_b);
  1198. cur = ggml_gelu_erf(ctx0, cur);
  1199. cur = ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
  1200. cur = ggml_add(ctx0, cur, model.conv1d_2_b);
  1201. cur = ggml_gelu_erf(ctx0, cur);
  1202. // transpose
  1203. inp = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  1204. cb(inp, "after_conv1d", -1);
  1205. }
  1206. // sanity check (only check one layer, but it should be the same for all)
  1207. GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
  1208. GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
  1209. GGML_ASSERT(model.layers[0].q_b);
  1210. GGML_ASSERT(model.layers[0].v_b);
  1211. GGML_ASSERT(!model.layers[0].k_b); // no bias for k
  1212. GGML_ASSERT(model.post_ln_w && model.post_ln_b);
  1213. ggml_tensor * pos_embd_selected = ggml_view_2d(
  1214. ctx0, model.position_embeddings,
  1215. model.position_embeddings->ne[0], n_pos,
  1216. model.position_embeddings->nb[1], 0
  1217. );
  1218. ggml_tensor * cur = build_vit(
  1219. inp, n_pos,
  1220. NORM_TYPE_NORMAL,
  1221. hparams.ffn_op,
  1222. pos_embd_selected,
  1223. nullptr);
  1224. cb(cur, "after_transformer", -1);
  1225. if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) {
  1226. // StackAudioFrames
  1227. // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
  1228. {
  1229. int64_t stride = n_embd * hparams.proj_stack_factor;
  1230. int64_t padded_len = GGML_PAD(ggml_nelements(cur), stride);
  1231. int64_t pad = padded_len - ggml_nelements(cur);
  1232. if (pad > 0) {
  1233. cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0);
  1234. cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
  1235. }
  1236. cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
  1237. ggml_row_size(cur->type, stride), 0);
  1238. }
  1239. cb(cur, "after_stacked", -1);
  1240. // UltravoxProjector
  1241. {
  1242. // pre-norm
  1243. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1244. cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
  1245. // ffn in
  1246. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1247. // swiglu
  1248. // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
  1249. cur = ggml_swiglu_swapped(ctx0, cur);
  1250. // mid-norm
  1251. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1252. cur = ggml_mul(ctx0, cur, model.mm_norm_mid_w);
  1253. // ffn out
  1254. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1255. }
  1256. } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) {
  1257. // projector
  1258. cur = ggml_mul_mat(ctx0, model.mm_fc_w, cur);
  1259. cur = ggml_add(ctx0, cur, model.mm_fc_b);
  1260. } else {
  1261. GGML_ABORT("%s: unknown projector type", __func__);
  1262. }
  1263. cb(cur, "projected", -1);
  1264. ggml_build_forward_expand(gf, cur);
  1265. return gf;
  1266. }
  1267. private:
  1268. //
  1269. // utility functions
  1270. //
  1271. void cb(ggml_tensor * cur0, const char * name, int il) const {
  1272. if (ctx->debug_graph) {
  1273. ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
  1274. std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
  1275. ggml_set_name(cur, cur_name.c_str());
  1276. ggml_set_output(cur);
  1277. ggml_build_forward_expand(gf, cur);
  1278. ctx->debug_print_tensors.push_back(cur);
  1279. }
  1280. }
  1281. // build vision transformer (ViT) cgraph
  1282. // this function should cover most of the models
  1283. // if your model has specific features, you should probably duplicate this function
  1284. ggml_tensor * build_vit(
  1285. ggml_tensor * inp,
  1286. int64_t n_pos,
  1287. norm_type norm_t,
  1288. ffn_op_type ffn_t,
  1289. ggml_tensor * learned_pos_embd,
  1290. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  1291. ) {
  1292. if (learned_pos_embd) {
  1293. inp = ggml_add(ctx0, inp, learned_pos_embd);
  1294. cb(inp, "pos_embed", -1);
  1295. }
  1296. ggml_tensor * inpL = inp;
  1297. // pre-layernorm
  1298. if (model.pre_ln_w) {
  1299. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  1300. cb(inpL, "pre_ln", -1);
  1301. }
  1302. // loop over layers
  1303. for (int il = 0; il < n_layer; il++) {
  1304. auto & layer = model.layers[il];
  1305. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  1306. // layernorm1
  1307. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  1308. cb(cur, "layer_inp_normed", il);
  1309. // self-attention
  1310. {
  1311. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1312. if (layer.q_b) {
  1313. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1314. }
  1315. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1316. if (layer.k_b) {
  1317. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1318. }
  1319. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1320. if (layer.v_b) {
  1321. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1322. }
  1323. if (layer.q_norm) {
  1324. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  1325. cb(Qcur, "Qcur_norm", il);
  1326. }
  1327. if (layer.k_norm) {
  1328. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  1329. cb(Kcur, "Kcur_norm", il);
  1330. }
  1331. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1332. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1333. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1334. cb(Qcur, "Qcur", il);
  1335. cb(Kcur, "Kcur", il);
  1336. cb(Vcur, "Vcur", il);
  1337. if (add_pos) {
  1338. Qcur = add_pos(Qcur, layer);
  1339. Kcur = add_pos(Kcur, layer);
  1340. cb(Qcur, "Qcur_pos", il);
  1341. cb(Kcur, "Kcur_pos", il);
  1342. }
  1343. cur = build_attn(layer.o_w, layer.o_b,
  1344. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1345. cb(cur, "attn_out", il);
  1346. }
  1347. if (layer.ls_1_w) {
  1348. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  1349. cb(cur, "attn_out_scaled", il);
  1350. }
  1351. // re-add the layer input, e.g., residual
  1352. cur = ggml_add(ctx0, cur, inpL);
  1353. inpL = cur; // inpL = residual, cur = hidden_states
  1354. cb(cur, "ffn_inp", il);
  1355. // layernorm2
  1356. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  1357. cb(cur, "ffn_inp_normed", il);
  1358. // ffn
  1359. cur = build_ffn(cur,
  1360. layer.ff_up_w, layer.ff_up_b,
  1361. layer.ff_gate_w, layer.ff_gate_b,
  1362. layer.ff_down_w, layer.ff_down_b,
  1363. ffn_t, il);
  1364. cb(cur, "ffn_out", il);
  1365. if (layer.ls_2_w) {
  1366. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  1367. cb(cur, "ffn_out_scaled", il);
  1368. }
  1369. // residual 2
  1370. cur = ggml_add(ctx0, inpL, cur);
  1371. cb(cur, "layer_out", il);
  1372. inpL = cur;
  1373. }
  1374. // TODO @ngxson : find a way to move this outside
  1375. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) {
  1376. ggml_tensor * cur = inpL;
  1377. cur = ggml_transpose(ctx0, cur);
  1378. cur = ggml_cont(ctx0, cur);
  1379. cur = ggml_pool_1d(ctx0, cur, GGML_OP_POOL_AVG, 2, 2, 0);
  1380. cur = ggml_transpose(ctx0, cur);
  1381. cur = ggml_cont(ctx0, cur);
  1382. inpL = cur;
  1383. }
  1384. // post-layernorm
  1385. if (model.post_ln_w) {
  1386. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  1387. }
  1388. return inpL;
  1389. }
  1390. // build the input after conv2d (inp_raw --> patches)
  1391. // returns tensor with shape [n_embd, n_patches]
  1392. ggml_tensor * build_inp() {
  1393. ggml_tensor * inp_raw = build_inp_raw();
  1394. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  1395. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  1396. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  1397. if (model.patch_bias) {
  1398. inp = ggml_add(ctx0, inp, model.patch_bias);
  1399. cb(inp, "patch_bias", -1);
  1400. }
  1401. return inp;
  1402. }
  1403. ggml_tensor * build_inp_raw(int channels = 3) {
  1404. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
  1405. ggml_set_name(inp_raw, "inp_raw");
  1406. ggml_set_input(inp_raw);
  1407. return inp_raw;
  1408. }
  1409. ggml_tensor * build_norm(
  1410. ggml_tensor * cur,
  1411. ggml_tensor * mw,
  1412. ggml_tensor * mb,
  1413. norm_type type,
  1414. float norm_eps,
  1415. int il) const {
  1416. cur = type == NORM_TYPE_RMS
  1417. ? ggml_rms_norm(ctx0, cur, norm_eps)
  1418. : ggml_norm(ctx0, cur, norm_eps);
  1419. if (mw || mb) {
  1420. cb(cur, "norm", il);
  1421. }
  1422. if (mw) {
  1423. cur = ggml_mul(ctx0, cur, mw);
  1424. if (mb) {
  1425. cb(cur, "norm_w", il);
  1426. }
  1427. }
  1428. if (mb) {
  1429. cur = ggml_add(ctx0, cur, mb);
  1430. }
  1431. return cur;
  1432. }
  1433. ggml_tensor * build_ffn(
  1434. ggml_tensor * cur,
  1435. ggml_tensor * up,
  1436. ggml_tensor * up_b,
  1437. ggml_tensor * gate,
  1438. ggml_tensor * gate_b,
  1439. ggml_tensor * down,
  1440. ggml_tensor * down_b,
  1441. ffn_op_type type_op,
  1442. int il) const {
  1443. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  1444. cb(tmp, "ffn_up", il);
  1445. if (up_b) {
  1446. tmp = ggml_add(ctx0, tmp, up_b);
  1447. cb(tmp, "ffn_up_b", il);
  1448. }
  1449. if (gate) {
  1450. cur = ggml_mul_mat(ctx0, gate, cur);
  1451. cb(cur, "ffn_gate", il);
  1452. if (gate_b) {
  1453. cur = ggml_add(ctx0, cur, gate_b);
  1454. cb(cur, "ffn_gate_b", il);
  1455. }
  1456. } else {
  1457. cur = tmp;
  1458. }
  1459. // we only support parallel ffn for now
  1460. switch (type_op) {
  1461. case FFN_SILU:
  1462. if (gate) {
  1463. cur = ggml_swiglu_split(ctx0, cur, tmp);
  1464. cb(cur, "ffn_swiglu", il);
  1465. } else {
  1466. cur = ggml_silu(ctx0, cur);
  1467. cb(cur, "ffn_silu", il);
  1468. } break;
  1469. case FFN_GELU:
  1470. if (gate) {
  1471. cur = ggml_geglu_split(ctx0, cur, tmp);
  1472. cb(cur, "ffn_geglu", il);
  1473. } else {
  1474. cur = ggml_gelu(ctx0, cur);
  1475. cb(cur, "ffn_gelu", il);
  1476. } break;
  1477. case FFN_GELU_ERF:
  1478. if (gate) {
  1479. cur = ggml_geglu_erf_split(ctx0, cur, tmp);
  1480. cb(cur, "ffn_geglu_erf", il);
  1481. } else {
  1482. cur = ggml_gelu_erf(ctx0, cur);
  1483. cb(cur, "ffn_gelu_erf", il);
  1484. } break;
  1485. case FFN_GELU_QUICK:
  1486. if (gate) {
  1487. cur = ggml_geglu_quick_split(ctx0, cur, tmp);
  1488. cb(cur, "ffn_geglu_quick", il);
  1489. } else {
  1490. cur = ggml_gelu_quick(ctx0, cur);
  1491. cb(cur, "ffn_gelu_quick", il);
  1492. } break;
  1493. }
  1494. if (down) {
  1495. cur = ggml_mul_mat(ctx0, down, cur);
  1496. }
  1497. if (down_b) {
  1498. cb(cur, "ffn_down", il);
  1499. }
  1500. if (down_b) {
  1501. cur = ggml_add(ctx0, cur, down_b);
  1502. }
  1503. return cur;
  1504. }
  1505. ggml_tensor * build_attn(
  1506. ggml_tensor * wo,
  1507. ggml_tensor * wo_b,
  1508. ggml_tensor * q_cur,
  1509. ggml_tensor * k_cur,
  1510. ggml_tensor * v_cur,
  1511. ggml_tensor * kq_mask,
  1512. float kq_scale,
  1513. int il) const {
  1514. // these nodes are added to the graph together so that they are not reordered
  1515. // by doing so, the number of splits in the graph is reduced
  1516. ggml_build_forward_expand(gf, q_cur);
  1517. ggml_build_forward_expand(gf, k_cur);
  1518. ggml_build_forward_expand(gf, v_cur);
  1519. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  1520. //cb(q, "q", il);
  1521. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  1522. //cb(k, "k", il);
  1523. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  1524. v = ggml_cont(ctx0, v);
  1525. //cb(k, "v", il);
  1526. ggml_tensor * cur;
  1527. // TODO @ngxson : support flash attention
  1528. {
  1529. const auto n_tokens = q->ne[1];
  1530. const auto n_head = q->ne[2];
  1531. // const auto n_kv = k->ne[1]; // for flash attention
  1532. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  1533. // F32 may not needed for vision encoders?
  1534. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  1535. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  1536. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  1537. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  1538. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  1539. }
  1540. cb(cur, "kqv_out", il);
  1541. if (wo) {
  1542. cur = ggml_mul_mat(ctx0, wo, cur);
  1543. }
  1544. if (wo_b) {
  1545. cur = ggml_add(ctx0, cur, wo_b);
  1546. }
  1547. return cur;
  1548. }
  1549. // implementation of the 2D RoPE without adding a new op in ggml
  1550. // this is not efficient (use double the memory), but works on all backends
  1551. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  1552. static ggml_tensor * build_rope_2d(
  1553. ggml_context * ctx0,
  1554. ggml_tensor * cur,
  1555. ggml_tensor * pos_a, // first half
  1556. ggml_tensor * pos_b, // second half
  1557. const float freq_base,
  1558. const bool interleave_freq
  1559. ) {
  1560. const int64_t n_dim = cur->ne[0];
  1561. const int64_t n_head = cur->ne[1];
  1562. const int64_t n_pos = cur->ne[2];
  1563. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  1564. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  1565. // first half of cur will use 1e-0, 1e-2 (even)
  1566. // second half of cur will use 1e-1, 1e-3 (odd)
  1567. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  1568. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  1569. // then for the second half, we use freq_scale to shift the inv_freq
  1570. // ^ why? replace (2i) with (2i+1) in the above equation
  1571. const float freq_scale_odd = interleave_freq
  1572. ? std::pow(freq_base, (float)-2/n_dim)
  1573. : 1.0;
  1574. // first half
  1575. ggml_tensor * first;
  1576. {
  1577. first = ggml_view_3d(ctx0, cur,
  1578. n_dim/2, n_head, n_pos,
  1579. ggml_row_size(cur->type, n_dim),
  1580. ggml_row_size(cur->type, n_dim*n_head),
  1581. 0);
  1582. first = ggml_rope_ext(
  1583. ctx0,
  1584. first,
  1585. pos_a, // positions
  1586. nullptr, // freq factors
  1587. n_dim/2, // n_dims
  1588. 0, 0, freq_base,
  1589. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  1590. );
  1591. }
  1592. // second half
  1593. ggml_tensor * second;
  1594. {
  1595. second = ggml_view_3d(ctx0, cur,
  1596. n_dim/2, n_head, n_pos,
  1597. ggml_row_size(cur->type, n_dim),
  1598. ggml_row_size(cur->type, n_dim*n_head),
  1599. n_dim/2 * ggml_element_size(cur));
  1600. second = ggml_cont(ctx0, second); // copy, because ggml_rope don't play well with non-contiguous tensors
  1601. second = ggml_rope_ext(
  1602. ctx0,
  1603. second,
  1604. pos_b, // positions
  1605. nullptr, // freq factors
  1606. n_dim/2, // n_dims
  1607. 0, 0, freq_base,
  1608. freq_scale_odd,
  1609. 0.0f, 1.0f, 0.0f, 0.0f
  1610. );
  1611. }
  1612. cur = ggml_concat(ctx0, first, second, 0);
  1613. return cur;
  1614. }
  1615. };
  1616. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  1617. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  1618. clip_graph graph(ctx, *imgs.entries[0]);
  1619. ggml_cgraph * res;
  1620. switch (ctx->proj_type()) {
  1621. case PROJECTOR_TYPE_GEMMA3:
  1622. case PROJECTOR_TYPE_IDEFICS3:
  1623. {
  1624. res = graph.build_siglip();
  1625. } break;
  1626. case PROJECTOR_TYPE_PIXTRAL:
  1627. {
  1628. res = graph.build_pixtral();
  1629. } break;
  1630. case PROJECTOR_TYPE_QWEN2VL:
  1631. case PROJECTOR_TYPE_QWEN25VL:
  1632. {
  1633. res = graph.build_qwen2vl();
  1634. } break;
  1635. case PROJECTOR_TYPE_MINICPMV:
  1636. {
  1637. res = graph.build_minicpmv();
  1638. } break;
  1639. case PROJECTOR_TYPE_INTERNVL:
  1640. {
  1641. res = graph.build_internvl();
  1642. } break;
  1643. case PROJECTOR_TYPE_LLAMA4:
  1644. {
  1645. res = graph.build_llama4();
  1646. } break;
  1647. case PROJECTOR_TYPE_ULTRAVOX:
  1648. case PROJECTOR_TYPE_QWEN2A:
  1649. {
  1650. res = graph.build_whisper_enc();
  1651. } break;
  1652. default:
  1653. {
  1654. res = graph.build_llava();
  1655. } break;
  1656. }
  1657. return res;
  1658. }
  1659. struct clip_model_loader {
  1660. ggml_context_ptr ctx_meta;
  1661. gguf_context_ptr ctx_gguf;
  1662. std::string fname;
  1663. size_t model_size = 0; // in bytes
  1664. bool has_vision = false;
  1665. bool has_audio = false;
  1666. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
  1667. clip_model_loader(const char * fname) : fname(fname) {
  1668. struct ggml_context * meta = nullptr;
  1669. struct gguf_init_params params = {
  1670. /*.no_alloc = */ true,
  1671. /*.ctx = */ &meta,
  1672. };
  1673. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  1674. if (!ctx_gguf.get()) {
  1675. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  1676. }
  1677. ctx_meta.reset(meta);
  1678. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  1679. // print gguf info
  1680. {
  1681. std::string name;
  1682. get_string(KEY_NAME, name, false);
  1683. std::string description;
  1684. get_string(KEY_DESCRIPTION, description, false);
  1685. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  1686. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  1687. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  1688. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  1689. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  1690. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  1691. LOG_INF("\n");
  1692. }
  1693. // modalities
  1694. {
  1695. get_bool(KEY_HAS_VISION_ENC, has_vision, false);
  1696. get_bool(KEY_HAS_AUDIO_ENC, has_audio, false);
  1697. if (has_vision) {
  1698. LOG_INF("%s: has vision encoder\n", __func__);
  1699. }
  1700. if (has_audio) {
  1701. LOG_INF("%s: has audio encoder\n", __func__);
  1702. }
  1703. }
  1704. // tensors
  1705. {
  1706. for (int i = 0; i < n_tensors; ++i) {
  1707. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1708. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  1709. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  1710. ggml_tensor * cur = ggml_get_tensor(meta, name);
  1711. size_t tensor_size = ggml_nbytes(cur);
  1712. model_size += tensor_size;
  1713. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  1714. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  1715. }
  1716. }
  1717. }
  1718. void load_hparams(clip_model & model, clip_modality modality) {
  1719. auto & hparams = model.hparams;
  1720. std::string log_ffn_op; // for logging
  1721. // sanity check
  1722. if (modality == CLIP_MODALITY_VISION) {
  1723. GGML_ASSERT(has_vision);
  1724. } else if (modality == CLIP_MODALITY_AUDIO) {
  1725. GGML_ASSERT(has_audio);
  1726. }
  1727. model.modality = modality;
  1728. // projector type
  1729. std::string proj_type;
  1730. {
  1731. get_string(KEY_PROJ_TYPE, proj_type, false);
  1732. if (!proj_type.empty()) {
  1733. model.proj_type = clip_projector_type_from_string(proj_type);
  1734. }
  1735. if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  1736. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  1737. }
  1738. // correct arch for multimodal models
  1739. if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
  1740. model.proj_type = modality == CLIP_MODALITY_VISION
  1741. ? PROJECTOR_TYPE_QWEN25VL
  1742. : PROJECTOR_TYPE_QWEN2A;
  1743. }
  1744. }
  1745. const bool is_vision = model.modality == CLIP_MODALITY_VISION;
  1746. const bool is_audio = model.modality == CLIP_MODALITY_AUDIO;
  1747. // other hparams
  1748. {
  1749. const char * prefix = is_vision ? "vision" : "audio";
  1750. get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
  1751. get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
  1752. get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
  1753. get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
  1754. get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
  1755. get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
  1756. if (is_vision) {
  1757. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  1758. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  1759. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  1760. get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
  1761. } else if (is_audio) {
  1762. get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
  1763. } else {
  1764. GGML_ASSERT(false && "unknown modality");
  1765. }
  1766. // for pinpoints, we need to convert it into a list of resolution candidates
  1767. {
  1768. std::vector<int> pinpoints;
  1769. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
  1770. if (!pinpoints.empty()) {
  1771. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  1772. hparams.image_res_candidates.push_back({
  1773. pinpoints[i],
  1774. pinpoints[i+1],
  1775. });
  1776. }
  1777. }
  1778. }
  1779. // default warmup value
  1780. hparams.warmup_image_size = hparams.image_size;
  1781. hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
  1782. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  1783. || model.proj_type == PROJECTOR_TYPE_LDP
  1784. || model.proj_type == PROJECTOR_TYPE_LDPV2;
  1785. {
  1786. bool use_gelu = false;
  1787. bool use_silu = false;
  1788. get_bool(KEY_USE_GELU, use_gelu, false);
  1789. get_bool(KEY_USE_SILU, use_silu, false);
  1790. if (use_gelu && use_silu) {
  1791. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  1792. }
  1793. if (use_gelu) {
  1794. hparams.ffn_op = FFN_GELU;
  1795. log_ffn_op = "gelu";
  1796. } else if (use_silu) {
  1797. hparams.ffn_op = FFN_SILU;
  1798. log_ffn_op = "silu";
  1799. } else {
  1800. hparams.ffn_op = FFN_GELU_QUICK;
  1801. log_ffn_op = "gelu_quick";
  1802. }
  1803. }
  1804. {
  1805. std::string mm_patch_merge_type;
  1806. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  1807. if (mm_patch_merge_type == "spatial_unpad") {
  1808. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  1809. }
  1810. }
  1811. if (is_vision) {
  1812. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  1813. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  1814. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  1815. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  1816. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  1817. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  1818. for (int i = 0; i < 3; ++i) {
  1819. hparams.image_mean[i] = mean_data[i];
  1820. hparams.image_std[i] = std_data[i];
  1821. }
  1822. }
  1823. // Load the vision feature layer indices if they are explicitly provided;
  1824. // if multiple vision feature layers are present, the values will be concatenated
  1825. // to form the final visual features.
  1826. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  1827. // be non-negative, since we use -1 to mark values as unset here.
  1828. std::vector<int> vision_feature_layer;
  1829. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  1830. // convert std::vector to std::unordered_set
  1831. for (auto & layer : vision_feature_layer) {
  1832. hparams.vision_feature_layer.insert(layer);
  1833. }
  1834. // model-specific params
  1835. switch (model.proj_type) {
  1836. case PROJECTOR_TYPE_MINICPMV:
  1837. {
  1838. if (hparams.minicpmv_version == 0) {
  1839. hparams.minicpmv_version = 2; // default to 2 if not set
  1840. }
  1841. } break;
  1842. case PROJECTOR_TYPE_IDEFICS3:
  1843. case PROJECTOR_TYPE_INTERNVL:
  1844. {
  1845. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1846. } break;
  1847. case PROJECTOR_TYPE_PIXTRAL:
  1848. {
  1849. hparams.rope_theta = 10000.0f;
  1850. hparams.warmup_image_size = hparams.patch_size * 8;
  1851. // Mistral Small 2506 needs 1024x1024 image size cap to prevent OOM
  1852. // ref: https://github.com/ggml-org/llama.cpp/issues/14310
  1853. hparams.image_size = 1024;
  1854. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false);
  1855. } break;
  1856. case PROJECTOR_TYPE_GEMMA3:
  1857. {
  1858. // default value (used by all model sizes in gemma 3 family)
  1859. // number of patches for each **side** is reduced by a factor of 4
  1860. hparams.proj_scale_factor = 4;
  1861. // test model (tinygemma3) has a different value, we optionally read it
  1862. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1863. } break;
  1864. case PROJECTOR_TYPE_QWEN2VL:
  1865. {
  1866. // max image size = sqrt(max_pixels) = 3584
  1867. // ref: https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct/blob/main/preprocessor_config.json
  1868. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  1869. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  1870. hparams.image_size = 1024;
  1871. hparams.warmup_image_size = hparams.patch_size * 8;
  1872. } break;
  1873. case PROJECTOR_TYPE_QWEN25VL:
  1874. {
  1875. // max image size = sqrt(max_pixels)
  1876. // https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  1877. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  1878. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  1879. hparams.image_size = 1024;
  1880. hparams.warmup_image_size = hparams.patch_size * 8;
  1881. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
  1882. } break;
  1883. case PROJECTOR_TYPE_LLAMA4:
  1884. {
  1885. hparams.rope_theta = 10000.0f;
  1886. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor);
  1887. set_llava_uhd_res_candidates(model, 3);
  1888. } break;
  1889. case PROJECTOR_TYPE_ULTRAVOX:
  1890. case PROJECTOR_TYPE_QWEN2A:
  1891. {
  1892. bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX;
  1893. get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
  1894. if (hparams.n_mel_bins != 128) {
  1895. throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
  1896. }
  1897. hparams.ffn_op = FFN_GELU_ERF;
  1898. log_ffn_op = "gelu_erf"; // temporary solution for logging
  1899. } break;
  1900. default:
  1901. break;
  1902. }
  1903. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  1904. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  1905. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  1906. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  1907. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  1908. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  1909. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  1910. if (is_vision) {
  1911. LOG_INF("\n--- vision hparams ---\n");
  1912. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  1913. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  1914. LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector);
  1915. LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version);
  1916. LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor);
  1917. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  1918. } else if (is_audio) {
  1919. LOG_INF("\n--- audio hparams ---\n");
  1920. LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
  1921. LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
  1922. }
  1923. LOG_INF("\n");
  1924. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1925. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1926. }
  1927. }
  1928. void load_tensors(clip_ctx & ctx_clip) {
  1929. auto & model = ctx_clip.model;
  1930. auto & hparams = model.hparams;
  1931. std::map<std::string, size_t> tensor_offset;
  1932. std::vector<ggml_tensor *> tensors_to_load;
  1933. // TODO @ngxson : support both audio and video in the future
  1934. const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
  1935. // get offsets
  1936. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1937. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1938. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1939. }
  1940. // create data context
  1941. struct ggml_init_params params = {
  1942. /*.mem_size =*/ (gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1943. /*.mem_buffer =*/ NULL,
  1944. /*.no_alloc =*/ true,
  1945. };
  1946. ctx_clip.ctx_data.reset(ggml_init(params));
  1947. if (!ctx_clip.ctx_data) {
  1948. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1949. }
  1950. // helper function
  1951. auto get_tensor = [&](const std::string & name, bool required = true) {
  1952. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1953. if (!cur && required) {
  1954. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1955. }
  1956. if (cur) {
  1957. tensors_to_load.push_back(cur);
  1958. // add tensors to context
  1959. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1960. ggml_set_name(data_tensor, cur->name);
  1961. cur = data_tensor;
  1962. }
  1963. return cur;
  1964. };
  1965. model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1966. model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
  1967. model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
  1968. model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
  1969. model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
  1970. model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1971. model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1972. model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1973. model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
  1974. // layers
  1975. model.layers.resize(hparams.n_layer);
  1976. for (int il = 0; il < hparams.n_layer; ++il) {
  1977. auto & layer = model.layers[il];
  1978. layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"));
  1979. layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"));
  1980. layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"));
  1981. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
  1982. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
  1983. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
  1984. layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
  1985. layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
  1986. layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
  1987. layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
  1988. layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
  1989. layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
  1990. layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
  1991. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
  1992. layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
  1993. layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
  1994. // ffn
  1995. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
  1996. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
  1997. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
  1998. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
  1999. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
  2000. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
  2001. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  2002. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  2003. if (layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd) {
  2004. // swap up and down weights
  2005. ggml_tensor * tmp = layer.ff_up_w;
  2006. layer.ff_up_w = layer.ff_down_w;
  2007. layer.ff_down_w = tmp;
  2008. // swap up and down biases
  2009. tmp = layer.ff_up_b;
  2010. layer.ff_up_b = layer.ff_down_b;
  2011. layer.ff_down_b = tmp;
  2012. }
  2013. }
  2014. switch (model.proj_type) {
  2015. case PROJECTOR_TYPE_MLP:
  2016. case PROJECTOR_TYPE_MLP_NORM:
  2017. {
  2018. // LLaVA projection
  2019. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  2020. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  2021. // Yi-type llava
  2022. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  2023. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2024. // missing in Yi-type llava
  2025. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  2026. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2027. // Yi-type llava
  2028. model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  2029. model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  2030. model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  2031. model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  2032. if (model.mm_3_w) {
  2033. // TODO: this is a hack to support Yi-type llava
  2034. model.proj_type = PROJECTOR_TYPE_MLP_NORM;
  2035. }
  2036. model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  2037. } break;
  2038. case PROJECTOR_TYPE_LDP:
  2039. {
  2040. // MobileVLM projection
  2041. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2042. model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2043. model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2044. model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2045. model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  2046. model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  2047. model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  2048. model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  2049. model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  2050. model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  2051. model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  2052. model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  2053. model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  2054. model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  2055. model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  2056. model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  2057. model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  2058. model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  2059. model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  2060. model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  2061. model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  2062. model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  2063. model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  2064. model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  2065. } break;
  2066. case PROJECTOR_TYPE_LDPV2:
  2067. {
  2068. // MobilVLM_V2 projection
  2069. model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2070. model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2071. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2072. model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  2073. model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  2074. model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  2075. } break;
  2076. case PROJECTOR_TYPE_MINICPMV:
  2077. {
  2078. // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  2079. model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  2080. model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  2081. model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  2082. model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  2083. model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  2084. model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  2085. model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  2086. model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  2087. model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  2088. model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  2089. model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  2090. model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  2091. model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  2092. model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  2093. model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  2094. model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  2095. model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  2096. model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  2097. } break;
  2098. case PROJECTOR_TYPE_GLM_EDGE:
  2099. {
  2100. model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  2101. model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  2102. model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  2103. model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  2104. model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  2105. model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  2106. model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  2107. model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  2108. model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  2109. model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  2110. } break;
  2111. case PROJECTOR_TYPE_QWEN2VL:
  2112. case PROJECTOR_TYPE_QWEN25VL:
  2113. {
  2114. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  2115. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  2116. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2117. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2118. } break;
  2119. case PROJECTOR_TYPE_GEMMA3:
  2120. {
  2121. model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  2122. model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  2123. } break;
  2124. case PROJECTOR_TYPE_IDEFICS3:
  2125. {
  2126. model.projection = get_tensor(TN_MM_PROJECTOR);
  2127. } break;
  2128. case PROJECTOR_TYPE_PIXTRAL:
  2129. {
  2130. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2131. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2132. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2133. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2134. // [IMG_BREAK] token embedding
  2135. model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  2136. // for mistral small 3.1
  2137. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  2138. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  2139. } break;
  2140. case PROJECTOR_TYPE_ULTRAVOX:
  2141. {
  2142. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2143. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2144. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2145. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2146. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  2147. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  2148. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  2149. model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
  2150. } break;
  2151. case PROJECTOR_TYPE_QWEN2A:
  2152. {
  2153. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2154. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2155. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2156. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2157. model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
  2158. model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
  2159. } break;
  2160. case PROJECTOR_TYPE_INTERNVL:
  2161. {
  2162. model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2163. model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2164. model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2165. model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2166. model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2167. model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2168. } break;
  2169. case PROJECTOR_TYPE_LLAMA4:
  2170. {
  2171. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  2172. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2173. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2174. } break;
  2175. default:
  2176. GGML_ASSERT(false && "unknown projector type");
  2177. }
  2178. // load data
  2179. {
  2180. std::vector<uint8_t> read_buf;
  2181. auto fin = std::ifstream(fname, std::ios::binary);
  2182. if (!fin) {
  2183. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  2184. }
  2185. // alloc memory and offload data
  2186. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  2187. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  2188. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  2189. for (auto & t : tensors_to_load) {
  2190. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  2191. const size_t offset = tensor_offset[t->name];
  2192. fin.seekg(offset, std::ios::beg);
  2193. if (!fin) {
  2194. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  2195. }
  2196. size_t num_bytes = ggml_nbytes(cur);
  2197. if (ggml_backend_buft_is_host(buft)) {
  2198. // for the CPU and Metal backend, we can read directly into the tensor
  2199. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  2200. } else {
  2201. // read into a temporary buffer first, then copy to device memory
  2202. read_buf.resize(num_bytes);
  2203. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  2204. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  2205. }
  2206. }
  2207. fin.close();
  2208. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  2209. }
  2210. }
  2211. void alloc_compute_meta(clip_ctx & ctx_clip) {
  2212. const auto & hparams = ctx_clip.model.hparams;
  2213. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  2214. // create a fake batch
  2215. clip_image_f32_batch batch;
  2216. clip_image_f32_ptr img(clip_image_f32_init());
  2217. if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
  2218. img->nx = hparams.warmup_image_size;
  2219. img->ny = hparams.warmup_image_size;
  2220. } else {
  2221. img->nx = hparams.warmup_audio_size;
  2222. img->ny = hparams.n_mel_bins;
  2223. }
  2224. batch.entries.push_back(std::move(img));
  2225. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  2226. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  2227. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  2228. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  2229. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  2230. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  2231. if (size > 1) {
  2232. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  2233. ggml_backend_buft_name(buft),
  2234. size / 1024.0 / 1024.0);
  2235. }
  2236. }
  2237. }
  2238. void get_bool(const std::string & key, bool & output, bool required = true) {
  2239. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2240. if (i < 0) {
  2241. if (required) throw std::runtime_error("Key not found: " + key);
  2242. return;
  2243. }
  2244. output = gguf_get_val_bool(ctx_gguf.get(), i);
  2245. }
  2246. void get_i32(const std::string & key, int & output, bool required = true) {
  2247. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2248. if (i < 0) {
  2249. if (required) throw std::runtime_error("Key not found: " + key);
  2250. return;
  2251. }
  2252. output = gguf_get_val_i32(ctx_gguf.get(), i);
  2253. }
  2254. void get_u32(const std::string & key, int & output, bool required = true) {
  2255. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2256. if (i < 0) {
  2257. if (required) throw std::runtime_error("Key not found: " + key);
  2258. return;
  2259. }
  2260. output = gguf_get_val_u32(ctx_gguf.get(), i);
  2261. }
  2262. void get_f32(const std::string & key, float & output, bool required = true) {
  2263. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2264. if (i < 0) {
  2265. if (required) throw std::runtime_error("Key not found: " + key);
  2266. return;
  2267. }
  2268. output = gguf_get_val_f32(ctx_gguf.get(), i);
  2269. }
  2270. void get_string(const std::string & key, std::string & output, bool required = true) {
  2271. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2272. if (i < 0) {
  2273. if (required) throw std::runtime_error("Key not found: " + key);
  2274. return;
  2275. }
  2276. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  2277. }
  2278. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
  2279. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2280. if (i < 0) {
  2281. if (required) throw std::runtime_error("Key not found: " + key);
  2282. return;
  2283. }
  2284. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  2285. output.resize(n);
  2286. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  2287. for (int i = 0; i < n; ++i) {
  2288. output[i] = values[i];
  2289. }
  2290. }
  2291. void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
  2292. auto & hparams = model.hparams;
  2293. for (int x = 1; x <= max_patches_per_side; x++) {
  2294. for (int y = 1; y <= max_patches_per_side; y++) {
  2295. if (x == 1 && y == 1) {
  2296. continue; // skip the first point
  2297. }
  2298. hparams.image_res_candidates.push_back(clip_image_size{
  2299. x*hparams.image_size,
  2300. y*hparams.image_size,
  2301. });
  2302. }
  2303. }
  2304. }
  2305. };
  2306. struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
  2307. g_logger_state.verbosity_thold = ctx_params.verbosity;
  2308. clip_ctx * ctx_vision = nullptr;
  2309. clip_ctx * ctx_audio = nullptr;
  2310. try {
  2311. clip_model_loader loader(fname);
  2312. if (loader.has_vision) {
  2313. ctx_vision = new clip_ctx(ctx_params);
  2314. loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
  2315. loader.load_tensors(*ctx_vision);
  2316. loader.alloc_compute_meta(*ctx_vision);
  2317. }
  2318. if (loader.has_audio) {
  2319. ctx_audio = new clip_ctx(ctx_params);
  2320. loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
  2321. loader.load_tensors(*ctx_audio);
  2322. loader.alloc_compute_meta(*ctx_audio);
  2323. }
  2324. } catch (const std::exception & e) {
  2325. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  2326. if (ctx_vision) {
  2327. delete ctx_vision;
  2328. }
  2329. if (ctx_audio) {
  2330. delete ctx_audio;
  2331. }
  2332. return {nullptr, nullptr};
  2333. }
  2334. return {ctx_vision, ctx_audio};
  2335. }
  2336. struct clip_image_size * clip_image_size_init() {
  2337. struct clip_image_size * load_image_size = new struct clip_image_size();
  2338. load_image_size->width = 448;
  2339. load_image_size->height = 448;
  2340. return load_image_size;
  2341. }
  2342. struct clip_image_u8 * clip_image_u8_init() {
  2343. return new clip_image_u8();
  2344. }
  2345. struct clip_image_f32 * clip_image_f32_init() {
  2346. return new clip_image_f32();
  2347. }
  2348. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  2349. return new clip_image_f32_batch();
  2350. }
  2351. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  2352. if (nx) *nx = img->nx;
  2353. if (ny) *ny = img->ny;
  2354. return img->buf.data();
  2355. }
  2356. void clip_image_size_free(struct clip_image_size * load_image_size) {
  2357. if (load_image_size == nullptr) {
  2358. return;
  2359. }
  2360. delete load_image_size;
  2361. }
  2362. void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
  2363. void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
  2364. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
  2365. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
  2366. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  2367. return batch->entries.size();
  2368. }
  2369. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  2370. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2371. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2372. return 0;
  2373. }
  2374. return batch->entries[idx]->nx;
  2375. }
  2376. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  2377. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2378. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2379. return 0;
  2380. }
  2381. return batch->entries[idx]->ny;
  2382. }
  2383. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  2384. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2385. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2386. return nullptr;
  2387. }
  2388. return batch->entries[idx].get();
  2389. }
  2390. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  2391. img->nx = nx;
  2392. img->ny = ny;
  2393. img->buf.resize(3 * nx * ny);
  2394. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  2395. }
  2396. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  2397. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  2398. dst.nx = src.nx;
  2399. dst.ny = src.ny;
  2400. dst.buf.resize(src.buf.size());
  2401. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  2402. for (size_t i = 0; i < src.buf.size(); ++i) {
  2403. int c = i % 3; // rgb
  2404. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  2405. }
  2406. }
  2407. // set of tools to manupulate images
  2408. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  2409. struct image_manipulation {
  2410. // Bilinear resize function
  2411. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  2412. dst.nx = target_width;
  2413. dst.ny = target_height;
  2414. dst.buf.resize(3 * target_width * target_height);
  2415. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  2416. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  2417. for (int y = 0; y < target_height; y++) {
  2418. for (int x = 0; x < target_width; x++) {
  2419. float px = x_ratio * x;
  2420. float py = y_ratio * y;
  2421. int x_floor = static_cast<int>(px);
  2422. int y_floor = static_cast<int>(py);
  2423. float x_lerp = px - x_floor;
  2424. float y_lerp = py - y_floor;
  2425. for (int c = 0; c < 3; c++) {
  2426. float top = lerp(
  2427. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  2428. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  2429. x_lerp
  2430. );
  2431. float bottom = lerp(
  2432. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  2433. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  2434. x_lerp
  2435. );
  2436. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  2437. }
  2438. }
  2439. }
  2440. }
  2441. // Bicubic resize function
  2442. // part of image will be cropped if the aspect ratio is different
  2443. static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  2444. const int nx = img.nx;
  2445. const int ny = img.ny;
  2446. dst.nx = target_width;
  2447. dst.ny = target_height;
  2448. dst.buf.resize(3 * target_width * target_height);
  2449. float Cc;
  2450. float C[5];
  2451. float d0, d2, d3, a0, a1, a2, a3;
  2452. int i, j, k, jj;
  2453. int x, y;
  2454. float dx, dy;
  2455. float tx, ty;
  2456. tx = (float)nx / (float)target_width;
  2457. ty = (float)ny / (float)target_height;
  2458. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  2459. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  2460. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  2461. for (i = 0; i < target_height; i++) {
  2462. for (j = 0; j < target_width; j++) {
  2463. x = (int)(tx * j);
  2464. y = (int)(ty * i);
  2465. dx = tx * j - x;
  2466. dy = ty * i - y;
  2467. for (k = 0; k < 3; k++) {
  2468. for (jj = 0; jj <= 3; jj++) {
  2469. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2470. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2471. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2472. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2473. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2474. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2475. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2476. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  2477. d0 = C[0] - C[1];
  2478. d2 = C[2] - C[1];
  2479. d3 = C[3] - C[1];
  2480. a0 = C[1];
  2481. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2482. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2483. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2484. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  2485. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  2486. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  2487. }
  2488. }
  2489. }
  2490. }
  2491. return true;
  2492. }
  2493. // llava-1.6 type of resize_and_pad
  2494. // if the ratio is not 1:1, padding with pad_color will be applied
  2495. // pad_color is single channel, default is 0 (black)
  2496. static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  2497. int target_width = target_resolution.width;
  2498. int target_height = target_resolution.height;
  2499. float scale_w = static_cast<float>(target_width) / image.nx;
  2500. float scale_h = static_cast<float>(target_height) / image.ny;
  2501. int new_width, new_height;
  2502. if (scale_w < scale_h) {
  2503. new_width = target_width;
  2504. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  2505. } else {
  2506. new_height = target_height;
  2507. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  2508. }
  2509. clip_image_u8 resized_image;
  2510. bicubic_resize(image, resized_image, new_width, new_height);
  2511. clip_image_u8 padded_image;
  2512. padded_image.nx = target_width;
  2513. padded_image.ny = target_height;
  2514. padded_image.buf.resize(3 * target_width * target_height);
  2515. // Fill the padded image with the fill color
  2516. for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
  2517. padded_image.buf[i] = pad_color[0];
  2518. padded_image.buf[i + 1] = pad_color[1];
  2519. padded_image.buf[i + 2] = pad_color[2];
  2520. }
  2521. // Calculate padding offsets
  2522. int pad_x = (target_width - new_width) / 2;
  2523. int pad_y = (target_height - new_height) / 2;
  2524. // Copy the resized image into the center of the padded buffer
  2525. for (int y = 0; y < new_height; ++y) {
  2526. for (int x = 0; x < new_width; ++x) {
  2527. for (int c = 0; c < 3; ++c) {
  2528. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  2529. }
  2530. }
  2531. }
  2532. dst = std::move(padded_image);
  2533. }
  2534. static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  2535. dst.nx = w;
  2536. dst.ny = h;
  2537. dst.buf.resize(3 * w * h);
  2538. for (int i = 0; i < h; ++i) {
  2539. for (int j = 0; j < w; ++j) {
  2540. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  2541. int dst_idx = 3 * (i*w + j);
  2542. dst.buf[dst_idx] = image.buf[src_idx];
  2543. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  2544. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  2545. }
  2546. }
  2547. }
  2548. // calculate the size of the **resized** image, while preserving the aspect ratio
  2549. // the calculated size will be aligned to the nearest multiple of align_size
  2550. // if H or W size is larger than max_dimension, it will be resized to max_dimension
  2551. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int max_dimension) {
  2552. if (inp_size.width <= 0 || inp_size.height <= 0 || align_size <= 0 || max_dimension <= 0) {
  2553. return {0, 0};
  2554. }
  2555. float scale = std::min(1.0f, std::min(static_cast<float>(max_dimension) / inp_size.width,
  2556. static_cast<float>(max_dimension) / inp_size.height));
  2557. float target_width_f = static_cast<float>(inp_size.width) * scale;
  2558. float target_height_f = static_cast<float>(inp_size.height) * scale;
  2559. int aligned_width = CLIP_ALIGN((int)target_width_f, align_size);
  2560. int aligned_height = CLIP_ALIGN((int)target_height_f, align_size);
  2561. return {aligned_width, aligned_height};
  2562. }
  2563. private:
  2564. static inline int clip(int x, int lower, int upper) {
  2565. return std::max(lower, std::min(x, upper));
  2566. }
  2567. // Linear interpolation between two points
  2568. static inline float lerp(float s, float e, float t) {
  2569. return s + (e - s) * t;
  2570. }
  2571. };
  2572. /**
  2573. * implementation of LLaVA-UHD:
  2574. * - https://arxiv.org/pdf/2403.11703
  2575. * - https://github.com/thunlp/LLaVA-UHD
  2576. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  2577. *
  2578. * overview:
  2579. * - an image always have a single overview (downscaled image)
  2580. * - an image can have 0 or multiple slices, depending on the image size
  2581. * - each slice can then be considered as a separate image
  2582. *
  2583. * for example:
  2584. *
  2585. * [overview] --> [slice 1] --> [slice 2]
  2586. * | |
  2587. * +--> [slice 3] --> [slice 4]
  2588. */
  2589. struct llava_uhd {
  2590. struct slice_coordinates {
  2591. int x;
  2592. int y;
  2593. clip_image_size size;
  2594. };
  2595. struct slice_instructions {
  2596. clip_image_size overview_size; // size of downscaled image
  2597. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2598. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2599. std::vector<slice_coordinates> slices;
  2600. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2601. };
  2602. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2603. slice_instructions res;
  2604. const int patch_size = clip_get_patch_size(ctx);
  2605. const int slice_size = clip_get_image_size(ctx);
  2606. const int original_width = original_size.width;
  2607. const int original_height = original_size.height;
  2608. const bool has_slices = original_size.width > slice_size || original_size.height > slice_size;
  2609. const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
  2610. if (!has_slices) {
  2611. // skip slicing logic
  2612. res.overview_size = clip_image_size{slice_size, slice_size};
  2613. res.refined_size = clip_image_size{0, 0};
  2614. res.grid_size = clip_image_size{0, 0};
  2615. return res;
  2616. }
  2617. if (has_pinpoints) {
  2618. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2619. auto refine_size = llava_uhd::select_best_resolution(
  2620. original_size,
  2621. ctx->model.hparams.image_res_candidates);
  2622. res.overview_size = clip_image_size{slice_size, slice_size};
  2623. res.refined_size = refine_size;
  2624. res.grid_size = clip_image_size{0, 0};
  2625. res.padding_refined = true;
  2626. LOG_DBG("%s: using pinpoints for slicing\n", __func__);
  2627. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
  2628. __func__, original_width, original_height,
  2629. res.overview_size.width, res.overview_size.height,
  2630. res.refined_size.width, res.refined_size.height);
  2631. for (int y = 0; y < refine_size.height; y += slice_size) {
  2632. for (int x = 0; x < refine_size.width; x += slice_size) {
  2633. slice_coordinates slice;
  2634. slice.x = x;
  2635. slice.y = y;
  2636. slice.size.width = std::min(slice_size, refine_size.width - x);
  2637. slice.size.height = std::min(slice_size, refine_size.height - y);
  2638. res.slices.push_back(slice);
  2639. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2640. __func__, (int)res.slices.size() - 1,
  2641. slice.x, slice.y, slice.size.width, slice.size.height);
  2642. }
  2643. }
  2644. res.grid_size.height = refine_size.height / slice_size;
  2645. res.grid_size.width = refine_size.width / slice_size;
  2646. LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
  2647. return res;
  2648. }
  2649. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2650. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  2651. res.overview_size = best_size;
  2652. {
  2653. const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
  2654. const float log_ratio = log((float)original_width / original_height);
  2655. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2656. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2657. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2658. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2659. res.grid_size = best_grid;
  2660. res.refined_size = refine_size;
  2661. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
  2662. __func__, original_width, original_height,
  2663. res.overview_size.width, res.overview_size.height,
  2664. res.refined_size.width, res.refined_size.height,
  2665. res.grid_size.width, res.grid_size.height);
  2666. int width = refine_size.width;
  2667. int height = refine_size.height;
  2668. int grid_x = int(width / best_grid.width);
  2669. int grid_y = int(height / best_grid.height);
  2670. for (int patches_y = 0, ic = 0;
  2671. patches_y < refine_size.height && ic < best_grid.height;
  2672. patches_y += grid_y, ic += 1) {
  2673. for (int patches_x = 0, jc = 0;
  2674. patches_x < refine_size.width && jc < best_grid.width;
  2675. patches_x += grid_x, jc += 1) {
  2676. slice_coordinates slice;
  2677. slice.x = patches_x;
  2678. slice.y = patches_y;
  2679. slice.size.width = grid_x;
  2680. slice.size.height = grid_y;
  2681. res.slices.push_back(slice);
  2682. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2683. __func__, (int)res.slices.size() - 1,
  2684. slice.x, slice.y, slice.size.width, slice.size.height);
  2685. }
  2686. }
  2687. }
  2688. return res;
  2689. }
  2690. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2691. std::vector<clip_image_u8_ptr> output;
  2692. // resize to overview size
  2693. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2694. image_manipulation::bicubic_resize(*img, *resized_img, inst.overview_size.width, inst.overview_size.height);
  2695. output.push_back(std::move(resized_img));
  2696. if (inst.slices.empty()) {
  2697. // no slices, just return the resized image
  2698. return output;
  2699. }
  2700. // resize to refined size
  2701. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2702. if (inst.padding_refined) {
  2703. image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
  2704. } else {
  2705. image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
  2706. }
  2707. // create slices
  2708. for (const auto & slice : inst.slices) {
  2709. int x = slice.x;
  2710. int y = slice.y;
  2711. int w = slice.size.width;
  2712. int h = slice.size.height;
  2713. clip_image_u8_ptr img_slice(clip_image_u8_init());
  2714. image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
  2715. output.push_back(std::move(img_slice));
  2716. }
  2717. return output;
  2718. }
  2719. private:
  2720. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2721. int width = original_size.width;
  2722. int height = original_size.height;
  2723. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  2724. float r = static_cast<float>(width) / height;
  2725. height = static_cast<int>(scale_resolution / std::sqrt(r));
  2726. width = static_cast<int>(height * r);
  2727. }
  2728. clip_image_size res;
  2729. res.width = ensure_divide(width, patch_size);
  2730. res.height = ensure_divide(height, patch_size);
  2731. return res;
  2732. }
  2733. static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
  2734. float scale_width = static_cast<float>(target_max.width) / orig.width;
  2735. float scale_height = static_cast<float>(target_max.height) / orig.height;
  2736. float scale = std::min(scale_width, scale_height);
  2737. return clip_image_size{
  2738. static_cast<int>(orig.width * scale),
  2739. static_cast<int>(orig.height * scale),
  2740. };
  2741. }
  2742. /**
  2743. * Selects the best resolution from a list of possible resolutions based on the original size.
  2744. *
  2745. * For example, when given a list of resolutions:
  2746. * - 100x100
  2747. * - 200x100
  2748. * - 100x200
  2749. * - 200x200
  2750. *
  2751. * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
  2752. *
  2753. * @param original_size The original size of the image
  2754. * @param possible_resolutions A list of possible resolutions
  2755. * @return The best fit resolution
  2756. */
  2757. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  2758. clip_image_size best_fit;
  2759. int min_wasted_area = std::numeric_limits<int>::max();
  2760. int max_effective_resolution = 0;
  2761. for (const clip_image_size & candidate : possible_resolutions) {
  2762. auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
  2763. int effective_resolution = std::min(
  2764. target_size.width * target_size.height,
  2765. original_size.width * original_size.height);
  2766. int wasted_area = (candidate.width * candidate.height) - effective_resolution;
  2767. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
  2768. max_effective_resolution = effective_resolution;
  2769. min_wasted_area = wasted_area;
  2770. best_fit = candidate;
  2771. }
  2772. LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
  2773. }
  2774. return best_fit;
  2775. }
  2776. static int ensure_divide(int length, int patch_size) {
  2777. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  2778. }
  2779. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2780. int width = original_size.width;
  2781. int height = original_size.height;
  2782. int grid_x = grid.width;
  2783. int grid_y = grid.height;
  2784. int refine_width = ensure_divide(width, grid_x);
  2785. int refine_height = ensure_divide(height, grid_y);
  2786. clip_image_size grid_size;
  2787. grid_size.width = refine_width / grid_x;
  2788. grid_size.height = refine_height / grid_y;
  2789. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  2790. int best_grid_width = best_grid_size.width;
  2791. int best_grid_height = best_grid_size.height;
  2792. clip_image_size refine_size;
  2793. refine_size.width = best_grid_width * grid_x;
  2794. refine_size.height = best_grid_height * grid_y;
  2795. return refine_size;
  2796. }
  2797. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  2798. std::vector<int> candidate_split_grids_nums;
  2799. for (int i : {multiple - 1, multiple, multiple + 1}) {
  2800. if (i == 1 || i > max_slice_nums) {
  2801. continue;
  2802. }
  2803. candidate_split_grids_nums.push_back(i);
  2804. }
  2805. std::vector<clip_image_size> candidate_grids;
  2806. for (int split_grids_nums : candidate_split_grids_nums) {
  2807. int m = 1;
  2808. while (m <= split_grids_nums) {
  2809. if (split_grids_nums % m == 0) {
  2810. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  2811. }
  2812. ++m;
  2813. }
  2814. }
  2815. clip_image_size best_grid{1, 1};
  2816. float min_error = std::numeric_limits<float>::infinity();
  2817. for (const auto& grid : candidate_grids) {
  2818. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  2819. if (error < min_error) {
  2820. best_grid = grid;
  2821. min_error = error;
  2822. }
  2823. }
  2824. return best_grid;
  2825. }
  2826. };
  2827. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  2828. // res_imgs memory is being allocated here, previous allocations will be freed if found
  2829. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  2830. clip_image_size original_size{img->nx, img->ny};
  2831. bool pad_to_square = true;
  2832. auto & params = ctx->model.hparams;
  2833. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  2834. if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
  2835. pad_to_square = false;
  2836. }
  2837. if (clip_is_minicpmv(ctx)) {
  2838. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2839. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2840. for (size_t i = 0; i < imgs.size(); ++i) {
  2841. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2842. clip_image_f32_ptr res(clip_image_f32_init());
  2843. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2844. res_imgs->entries.push_back(std::move(res));
  2845. }
  2846. res_imgs->grid_x = inst.grid_size.width;
  2847. res_imgs->grid_y = inst.grid_size.height;
  2848. return true;
  2849. } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
  2850. clip_image_u8 resized;
  2851. auto patch_size = params.patch_size * 2;
  2852. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size);
  2853. image_manipulation::bicubic_resize(*img, resized, new_size.width, new_size.height);
  2854. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2855. // clip_image_f32_ptr res(clip_image_f32_init());
  2856. normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
  2857. // res_imgs->data[0] = *res;
  2858. res_imgs->entries.push_back(std::move(img_f32));
  2859. return true;
  2860. }
  2861. else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE
  2862. || ctx->proj_type() == PROJECTOR_TYPE_GEMMA3
  2863. || ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3
  2864. || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
  2865. ) {
  2866. clip_image_u8 resized_image;
  2867. int sz = params.image_size;
  2868. image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
  2869. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2870. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  2871. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2872. res_imgs->entries.push_back(std::move(img_f32));
  2873. return true;
  2874. } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL) {
  2875. clip_image_u8 resized_image;
  2876. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
  2877. image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
  2878. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2879. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2880. res_imgs->entries.push_back(std::move(img_f32));
  2881. return true;
  2882. } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) {
  2883. GGML_ASSERT(!params.image_res_candidates.empty());
  2884. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2885. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2886. for (size_t i = 0; i < imgs.size(); ++i) {
  2887. clip_image_f32_ptr res(clip_image_f32_init());
  2888. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2889. res_imgs->entries.push_back(std::move(res));
  2890. }
  2891. res_imgs->grid_x = inst.grid_size.width;
  2892. res_imgs->grid_y = inst.grid_size.height;
  2893. return true;
  2894. }
  2895. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  2896. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2897. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  2898. if (pad_to_square) {
  2899. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  2900. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2901. const int longer_side = std::max(img->nx, img->ny);
  2902. temp->nx = longer_side;
  2903. temp->ny = longer_side;
  2904. temp->buf.resize(3 * longer_side * longer_side);
  2905. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  2906. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2907. // resize the image to the target_size
  2908. image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
  2909. clip_image_f32_ptr res(clip_image_f32_init());
  2910. normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
  2911. res_imgs->entries.push_back(std::move(res));
  2912. return true;
  2913. } else if (!params.image_res_candidates.empty()) {
  2914. // "spatial_unpad" with "anyres" processing for llava-1.6
  2915. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2916. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2917. for (size_t i = 0; i < imgs.size(); ++i) {
  2918. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2919. clip_image_f32_ptr res(clip_image_f32_init());
  2920. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2921. res_imgs->entries.push_back(std::move(res));
  2922. }
  2923. return true;
  2924. }
  2925. GGML_ASSERT(false && "Unknown image preprocessing type");
  2926. }
  2927. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  2928. return ctx->model.image_newline;
  2929. }
  2930. void clip_free(clip_ctx * ctx) {
  2931. if (ctx == nullptr) {
  2932. return;
  2933. }
  2934. delete ctx;
  2935. }
  2936. // deprecated
  2937. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  2938. const int32_t nx = ctx->model.hparams.image_size;
  2939. const int32_t ny = ctx->model.hparams.image_size;
  2940. return clip_embd_nbytes_by_img(ctx, nx, ny);
  2941. }
  2942. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  2943. clip_image_f32 img;
  2944. img.nx = img_w;
  2945. img.ny = img_h;
  2946. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2947. }
  2948. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  2949. return ctx->model.hparams.image_size;
  2950. }
  2951. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  2952. return ctx->model.hparams.patch_size;
  2953. }
  2954. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  2955. return ctx->model.hparams.n_embd;
  2956. }
  2957. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  2958. return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  2959. }
  2960. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2961. const auto & params = ctx->model.hparams;
  2962. const int n_total = clip_n_output_tokens(ctx, img);
  2963. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
  2964. return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0);
  2965. }
  2966. return n_total;
  2967. }
  2968. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2969. const auto & params = ctx->model.hparams;
  2970. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
  2971. return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0);
  2972. }
  2973. return 1;
  2974. }
  2975. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2976. const auto & params = ctx->model.hparams;
  2977. // only for models using fixed size square images
  2978. int n_patches_sq = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
  2979. projector_type proj = ctx->proj_type();
  2980. switch (proj) {
  2981. case PROJECTOR_TYPE_MLP:
  2982. case PROJECTOR_TYPE_MLP_NORM:
  2983. {
  2984. // do nothing
  2985. } break;
  2986. case PROJECTOR_TYPE_LDP:
  2987. case PROJECTOR_TYPE_LDPV2:
  2988. case PROJECTOR_TYPE_GLM_EDGE:
  2989. {
  2990. n_patches_sq /= 4;
  2991. if (ctx->model.mm_glm_tok_boi) {
  2992. n_patches_sq += 2; // for BOI and EOI token embeddings
  2993. }
  2994. } break;
  2995. case PROJECTOR_TYPE_MINICPMV:
  2996. {
  2997. if (params.minicpmv_version == 2) {
  2998. n_patches_sq = 96;
  2999. } else if (params.minicpmv_version == 3) {
  3000. n_patches_sq = 64;
  3001. } else if (params.minicpmv_version == 4) {
  3002. n_patches_sq = 64;
  3003. } else {
  3004. GGML_ABORT("Unknown minicpmv version");
  3005. }
  3006. } break;
  3007. case PROJECTOR_TYPE_QWEN2VL:
  3008. case PROJECTOR_TYPE_QWEN25VL:
  3009. {
  3010. // dynamic size
  3011. int patch_size = params.patch_size * 2;
  3012. int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
  3013. int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
  3014. n_patches_sq = x_patch * y_patch;
  3015. } break;
  3016. case PROJECTOR_TYPE_GEMMA3:
  3017. {
  3018. int n_per_side = params.image_size / params.patch_size;
  3019. int n_per_side_2d_pool = n_per_side / params.proj_scale_factor;
  3020. n_patches_sq = n_per_side_2d_pool * n_per_side_2d_pool;
  3021. } break;
  3022. case PROJECTOR_TYPE_IDEFICS3:
  3023. case PROJECTOR_TYPE_INTERNVL:
  3024. {
  3025. // both W and H are divided by proj_scale_factor
  3026. n_patches_sq /= (params.proj_scale_factor * params.proj_scale_factor);
  3027. } break;
  3028. case PROJECTOR_TYPE_PIXTRAL:
  3029. {
  3030. // dynamic size
  3031. int n_merge = params.spatial_merge_size;
  3032. int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1);
  3033. int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1);
  3034. n_patches_sq = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  3035. } break;
  3036. case PROJECTOR_TYPE_LLAMA4:
  3037. {
  3038. int scale_factor = ctx->model.hparams.proj_scale_factor;
  3039. n_patches_sq /= (scale_factor * scale_factor);
  3040. } break;
  3041. case PROJECTOR_TYPE_ULTRAVOX:
  3042. {
  3043. const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
  3044. const int n_len = CLIP_ALIGN(img->nx, proj_stack_factor);
  3045. n_patches_sq = n_len / proj_stack_factor / 2;
  3046. } break;
  3047. case PROJECTOR_TYPE_QWEN2A:
  3048. {
  3049. // divide by 2 because of whisper
  3050. // another divide by 2 because of nn.AvgPool1d(2, stride=2)
  3051. n_patches_sq = img->nx / 4;
  3052. } break;
  3053. default:
  3054. GGML_ABORT("unsupported projector type");
  3055. }
  3056. return n_patches_sq;
  3057. }
  3058. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  3059. assert(embed_dim % 2 == 0);
  3060. int H = pos.size();
  3061. int W = pos[0].size();
  3062. std::vector<float> omega(embed_dim / 2);
  3063. for (int i = 0; i < embed_dim / 2; ++i) {
  3064. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  3065. }
  3066. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  3067. for (int h = 0; h < H; ++h) {
  3068. for (int w = 0; w < W; ++w) {
  3069. for (int d = 0; d < embed_dim / 2; ++d) {
  3070. float out_value = pos[h][w] * omega[d];
  3071. emb[h][w][d] = sin(out_value);
  3072. emb[h][w][d + embed_dim / 2] = cos(out_value);
  3073. }
  3074. }
  3075. }
  3076. return emb;
  3077. }
  3078. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  3079. assert(embed_dim % 2 == 0);
  3080. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  3081. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  3082. int H = emb_h.size();
  3083. int W = emb_h[0].size();
  3084. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  3085. for (int h = 0; h < H; ++h) {
  3086. for (int w = 0; w < W; ++w) {
  3087. for (int d = 0; d < embed_dim / 2; ++d) {
  3088. emb[h][w][d] = emb_h[h][w][d];
  3089. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  3090. }
  3091. }
  3092. }
  3093. return emb;
  3094. }
  3095. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  3096. int grid_h_size = image_size.first;
  3097. int grid_w_size = image_size.second;
  3098. std::vector<float> grid_h(grid_h_size);
  3099. std::vector<float> grid_w(grid_w_size);
  3100. for (int i = 0; i < grid_h_size; ++i) {
  3101. grid_h[i] = static_cast<float>(i);
  3102. }
  3103. for (int i = 0; i < grid_w_size; ++i) {
  3104. grid_w[i] = static_cast<float>(i);
  3105. }
  3106. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  3107. for (int h = 0; h < grid_h_size; ++h) {
  3108. for (int w = 0; w < grid_w_size; ++w) {
  3109. grid[h][w] = grid_w[w];
  3110. }
  3111. }
  3112. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  3113. for (int h = 0; h < grid_h_size; ++h) {
  3114. for (int w = 0; w < grid_w_size; ++w) {
  3115. grid_2d[0][h][w] = grid_h[h];
  3116. grid_2d[1][h][w] = grid_w[w];
  3117. }
  3118. }
  3119. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  3120. int H = image_size.first;
  3121. int W = image_size.second;
  3122. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  3123. for (int h = 0; h < H; ++h) {
  3124. for (int w = 0; w < W; ++w) {
  3125. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  3126. }
  3127. }
  3128. return pos_embed_2d;
  3129. }
  3130. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  3131. clip_image_f32_batch imgs;
  3132. clip_image_f32_ptr img_copy(clip_image_f32_init());
  3133. *img_copy = *img;
  3134. imgs.entries.push_back(std::move(img_copy));
  3135. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  3136. }
  3137. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  3138. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  3139. int batch_size = imgs.entries.size();
  3140. // TODO @ngxson : implement batch size > 1 as a loop
  3141. // we don't need true batching support because the cgraph will gonna be big anyway
  3142. if (batch_size != 1) {
  3143. return false; // only support batch size of 1
  3144. }
  3145. // build the inference graph
  3146. ctx->debug_print_tensors.clear();
  3147. ggml_backend_sched_reset(ctx->sched.get());
  3148. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  3149. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  3150. // set inputs
  3151. const auto & model = ctx->model;
  3152. const auto & hparams = model.hparams;
  3153. const int image_size_width = imgs.entries[0]->nx;
  3154. const int image_size_height = imgs.entries[0]->ny;
  3155. const int patch_size = hparams.patch_size;
  3156. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  3157. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  3158. const int pos_w = image_size_width / patch_size;
  3159. const int pos_h = image_size_height / patch_size;
  3160. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  3161. auto get_inp_tensor = [&gf](const char * name) {
  3162. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  3163. if (inp == nullptr) {
  3164. GGML_ABORT("Failed to get tensor %s", name);
  3165. }
  3166. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  3167. GGML_ABORT("Tensor %s is not an input tensor", name);
  3168. }
  3169. return inp;
  3170. };
  3171. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  3172. ggml_tensor * cur = get_inp_tensor(name);
  3173. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  3174. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3175. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3176. };
  3177. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  3178. ggml_tensor * cur = get_inp_tensor(name);
  3179. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  3180. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3181. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3182. };
  3183. // set input pixel values
  3184. if (!imgs.is_audio) {
  3185. size_t nelem = 0;
  3186. for (const auto & img : imgs.entries) {
  3187. nelem += img->nx * img->ny * 3;
  3188. }
  3189. std::vector<float> inp_raw(nelem);
  3190. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  3191. //
  3192. // ┌──W──┐
  3193. // │ H │ channel = R
  3194. // ├─────┤ │
  3195. // │ H │ channel = G
  3196. // ├─────┤ │
  3197. // │ H │ channel = B
  3198. // └─────┘ │
  3199. // ──────┘ x B
  3200. for (size_t i = 0; i < imgs.entries.size(); i++) {
  3201. const int nx = imgs.entries[i]->nx;
  3202. const int ny = imgs.entries[i]->ny;
  3203. const int n = nx * ny;
  3204. for (int b = 0; b < batch_size; b++) {
  3205. float * batch_entry = inp_raw.data() + b * (3*n);
  3206. for (int y = 0; y < ny; y++) {
  3207. for (int x = 0; x < nx; x++) {
  3208. size_t base_src = 3*(y * nx + x); // idx of the first channel
  3209. size_t base_dst = y * nx + x; // idx of the first channel
  3210. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  3211. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  3212. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  3213. }
  3214. }
  3215. }
  3216. }
  3217. set_input_f32("inp_raw", inp_raw);
  3218. } else {
  3219. // audio input
  3220. GGML_ASSERT(imgs.entries.size() == 1);
  3221. const auto & mel_inp = imgs.entries[0];
  3222. const int n_step = mel_inp->nx;
  3223. const int n_mel = mel_inp->ny;
  3224. std::vector<float> inp_raw(n_step * n_mel);
  3225. std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
  3226. set_input_f32("inp_raw", inp_raw);
  3227. }
  3228. // set input per projector
  3229. switch (ctx->model.proj_type) {
  3230. case PROJECTOR_TYPE_MINICPMV:
  3231. {
  3232. // inspired from siglip:
  3233. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  3234. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  3235. std::vector<int32_t> positions(pos_h * pos_w);
  3236. int bucket_coords_h[1024];
  3237. int bucket_coords_w[1024];
  3238. for (int i = 0; i < pos_h; i++){
  3239. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  3240. }
  3241. for (int i = 0; i < pos_w; i++){
  3242. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  3243. }
  3244. for (int i = 0, id = 0; i < pos_h; i++){
  3245. for (int j = 0; j < pos_w; j++){
  3246. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  3247. }
  3248. }
  3249. set_input_i32("positions", positions);
  3250. // inspired from resampler of Qwen-VL:
  3251. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  3252. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  3253. int embed_dim = clip_n_mmproj_embd(ctx);
  3254. // TODO @ngxson : this is very inefficient, can we do this using ggml_sin and ggml_cos?
  3255. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  3256. std::vector<float> pos_embed(embed_dim * pos_w * pos_h);
  3257. for(int i = 0; i < pos_w * pos_h; ++i){
  3258. for(int j = 0; j < embed_dim; ++j){
  3259. pos_embed[i * embed_dim + j] = pos_embed_t[i][j];
  3260. }
  3261. }
  3262. set_input_f32("pos_embed", pos_embed);
  3263. } break;
  3264. case PROJECTOR_TYPE_QWEN2VL:
  3265. {
  3266. const int merge_ratio = 2;
  3267. const int pw = image_size_width / patch_size;
  3268. const int ph = image_size_height / patch_size;
  3269. std::vector<int> positions(n_pos * 4);
  3270. int ptr = 0;
  3271. for (int y = 0; y < ph; y += merge_ratio) {
  3272. for (int x = 0; x < pw; x += merge_ratio) {
  3273. for (int dy = 0; dy < 2; dy++) {
  3274. for (int dx = 0; dx < 2; dx++) {
  3275. positions[ ptr] = y + dy;
  3276. positions[ num_patches + ptr] = x + dx;
  3277. positions[2 * num_patches + ptr] = y + dy;
  3278. positions[3 * num_patches + ptr] = x + dx;
  3279. ptr++;
  3280. }
  3281. }
  3282. }
  3283. }
  3284. set_input_i32("positions", positions);
  3285. } break;
  3286. case PROJECTOR_TYPE_QWEN25VL:
  3287. {
  3288. // pw * ph = number of tokens output by ViT after apply patch merger
  3289. // ipw * ipw = number of vision token been processed inside ViT
  3290. const int merge_ratio = 2;
  3291. const int pw = image_size_width / patch_size / merge_ratio;
  3292. const int ph = image_size_height / patch_size / merge_ratio;
  3293. const int ipw = image_size_width / patch_size;
  3294. const int iph = image_size_height / patch_size;
  3295. std::vector<int> idx (ph * pw);
  3296. std::vector<int> inv_idx(ph * pw);
  3297. if (use_window_attn) {
  3298. const int attn_window_size = 112;
  3299. const int grid_window = attn_window_size / patch_size / merge_ratio;
  3300. int dst = 0;
  3301. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  3302. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  3303. int mask_row = 0;
  3304. for (int y = 0; y < ph; y += grid_window) {
  3305. for (int x = 0; x < pw; x += grid_window) {
  3306. const int win_h = std::min(grid_window, ph - y);
  3307. const int win_w = std::min(grid_window, pw - x);
  3308. const int dst_0 = dst;
  3309. // group all tokens belong to the same window togather (to a continue range)
  3310. for (int dy = 0; dy < win_h; dy++) {
  3311. for (int dx = 0; dx < win_w; dx++) {
  3312. const int src = (y + dy) * pw + (x + dx);
  3313. GGML_ASSERT(src < (int)idx.size());
  3314. GGML_ASSERT(dst < (int)inv_idx.size());
  3315. idx [src] = dst;
  3316. inv_idx[dst] = src;
  3317. dst++;
  3318. }
  3319. }
  3320. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  3321. int row_offset = mask_row * (ipw * iph);
  3322. std::fill(
  3323. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  3324. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  3325. 0.0);
  3326. mask_row++;
  3327. }
  3328. }
  3329. }
  3330. set_input_i32("window_idx", idx);
  3331. set_input_i32("inv_window_idx", inv_idx);
  3332. set_input_f32("window_mask", mask);
  3333. } else {
  3334. for (int i = 0; i < ph * pw; i++) {
  3335. idx[i] = i;
  3336. }
  3337. }
  3338. const int mpow = merge_ratio * merge_ratio;
  3339. std::vector<int> positions(n_pos * 4);
  3340. int ptr = 0;
  3341. for (int y = 0; y < iph; y += merge_ratio) {
  3342. for (int x = 0; x < ipw; x += merge_ratio) {
  3343. for (int dy = 0; dy < 2; dy++) {
  3344. for (int dx = 0; dx < 2; dx++) {
  3345. auto remap = idx[ptr / mpow];
  3346. remap = (remap * mpow) + (ptr % mpow);
  3347. positions[ remap] = y + dy;
  3348. positions[ num_patches + remap] = x + dx;
  3349. positions[2 * num_patches + remap] = y + dy;
  3350. positions[3 * num_patches + remap] = x + dx;
  3351. ptr++;
  3352. }
  3353. }
  3354. }
  3355. }
  3356. set_input_i32("positions", positions);
  3357. } break;
  3358. case PROJECTOR_TYPE_PIXTRAL:
  3359. {
  3360. // set the 2D positions
  3361. int n_patches_per_col = image_size_width / patch_size;
  3362. std::vector<int> pos_data(n_pos);
  3363. // dimension H
  3364. for (int i = 0; i < n_pos; i++) {
  3365. pos_data[i] = i / n_patches_per_col;
  3366. }
  3367. set_input_i32("pos_h", pos_data);
  3368. // dimension W
  3369. for (int i = 0; i < n_pos; i++) {
  3370. pos_data[i] = i % n_patches_per_col;
  3371. }
  3372. set_input_i32("pos_w", pos_data);
  3373. } break;
  3374. case PROJECTOR_TYPE_GLM_EDGE:
  3375. {
  3376. // llava and other models
  3377. std::vector<int32_t> positions(n_pos);
  3378. for (int i = 0; i < n_pos; i++) {
  3379. positions[i] = i;
  3380. }
  3381. set_input_i32("positions", positions);
  3382. } break;
  3383. case PROJECTOR_TYPE_MLP:
  3384. case PROJECTOR_TYPE_MLP_NORM:
  3385. case PROJECTOR_TYPE_LDP:
  3386. case PROJECTOR_TYPE_LDPV2:
  3387. {
  3388. // llava and other models
  3389. std::vector<int32_t> positions(n_pos);
  3390. for (int i = 0; i < n_pos; i++) {
  3391. positions[i] = i;
  3392. }
  3393. set_input_i32("positions", positions);
  3394. // The patches vector is used to get rows to index into the embeds with;
  3395. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  3396. // when retrieving the rows.
  3397. int patch_offset = model.class_embedding ? 1 : 0;
  3398. std::vector<int32_t> patches(num_patches);
  3399. for (int i = 0; i < num_patches; i++) {
  3400. patches[i] = i + patch_offset;
  3401. }
  3402. set_input_i32("patches", patches);
  3403. } break;
  3404. case PROJECTOR_TYPE_GEMMA3:
  3405. case PROJECTOR_TYPE_IDEFICS3:
  3406. case PROJECTOR_TYPE_INTERNVL:
  3407. case PROJECTOR_TYPE_QWEN2A:
  3408. case PROJECTOR_TYPE_ULTRAVOX:
  3409. {
  3410. // do nothing
  3411. } break;
  3412. case PROJECTOR_TYPE_LLAMA4:
  3413. {
  3414. // set the 2D positions
  3415. int n_patches_per_col = image_size_width / patch_size;
  3416. std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
  3417. // last pos is always kept 0, it's for CLS
  3418. // dimension H
  3419. for (int i = 0; i < num_patches; i++) {
  3420. pos_data[i] = (i / n_patches_per_col) + 1;
  3421. }
  3422. set_input_i32("pos_h", pos_data);
  3423. // dimension W
  3424. for (int i = 0; i < num_patches; i++) {
  3425. pos_data[i] = (i % n_patches_per_col) + 1;
  3426. }
  3427. set_input_i32("pos_w", pos_data);
  3428. } break;
  3429. default:
  3430. GGML_ABORT("Unknown projector type");
  3431. }
  3432. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  3433. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  3434. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  3435. if (reg) {
  3436. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  3437. if (ggml_backend_set_n_threads_fn) {
  3438. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  3439. }
  3440. }
  3441. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  3442. if (status != GGML_STATUS_SUCCESS) {
  3443. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  3444. return false;
  3445. }
  3446. // print debug nodes
  3447. if (ctx->debug_graph) {
  3448. LOG_INF("\n\n---\n\n");
  3449. LOG_INF("\n\nDebug graph:\n\n");
  3450. for (ggml_tensor * t : ctx->debug_print_tensors) {
  3451. std::vector<uint8_t> data(ggml_nbytes(t));
  3452. ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
  3453. print_tensor_shape(t);
  3454. print_tensor_data(t, data.data(), 3);
  3455. }
  3456. }
  3457. // the last node is the embedding tensor
  3458. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  3459. // sanity check (only support batch size of 1 for now)
  3460. const int n_tokens_out = embeddings->ne[1];
  3461. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  3462. if (n_tokens_out != expected_n_tokens_out) {
  3463. LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  3464. GGML_ABORT("Invalid number of output tokens");
  3465. }
  3466. // copy the embeddings to the location passed by the user
  3467. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  3468. return true;
  3469. }
  3470. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  3471. const auto & hparams = ctx->model.hparams;
  3472. switch (ctx->model.proj_type) {
  3473. case PROJECTOR_TYPE_LDP:
  3474. return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
  3475. case PROJECTOR_TYPE_LDPV2:
  3476. return ctx->model.mm_model_peg_0_b->ne[0];
  3477. case PROJECTOR_TYPE_MLP:
  3478. case PROJECTOR_TYPE_PIXTRAL:
  3479. return ctx->model.mm_2_w->ne[1];
  3480. case PROJECTOR_TYPE_MLP_NORM:
  3481. return ctx->model.mm_3_b->ne[0];
  3482. case PROJECTOR_TYPE_MINICPMV:
  3483. if (hparams.minicpmv_version == 2) {
  3484. return 4096;
  3485. } else if (hparams.minicpmv_version == 3) {
  3486. return 3584;
  3487. } else if (hparams.minicpmv_version == 4) {
  3488. return 3584;
  3489. }
  3490. GGML_ABORT("Unknown minicpmv version");
  3491. case PROJECTOR_TYPE_GLM_EDGE:
  3492. return ctx->model.mm_model_mlp_3_w->ne[1];
  3493. case PROJECTOR_TYPE_QWEN2VL:
  3494. case PROJECTOR_TYPE_QWEN25VL:
  3495. return ctx->model.mm_1_b->ne[0];
  3496. case PROJECTOR_TYPE_GEMMA3:
  3497. return ctx->model.mm_input_proj_w->ne[0];
  3498. case PROJECTOR_TYPE_IDEFICS3:
  3499. return ctx->model.projection->ne[1];
  3500. case PROJECTOR_TYPE_ULTRAVOX:
  3501. return ctx->model.mm_2_w->ne[1];
  3502. case PROJECTOR_TYPE_INTERNVL:
  3503. return ctx->model.mm_3_w->ne[1];
  3504. case PROJECTOR_TYPE_LLAMA4:
  3505. return ctx->model.mm_model_proj->ne[1];
  3506. case PROJECTOR_TYPE_QWEN2A:
  3507. return ctx->model.mm_fc_w->ne[1];
  3508. default:
  3509. GGML_ABORT("Unknown projector type");
  3510. }
  3511. }
  3512. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  3513. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
  3514. return ctx->model.hparams.minicpmv_version;
  3515. }
  3516. return 0;
  3517. }
  3518. bool clip_is_glm(const struct clip_ctx * ctx) {
  3519. return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
  3520. }
  3521. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  3522. return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
  3523. || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL;
  3524. }
  3525. bool clip_is_llava(const struct clip_ctx * ctx) {
  3526. return ctx->model.hparams.has_llava_projector;
  3527. }
  3528. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  3529. return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
  3530. }
  3531. bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
  3532. return ctx->model.modality == CLIP_MODALITY_VISION;
  3533. }
  3534. bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
  3535. return ctx->model.modality == CLIP_MODALITY_AUDIO;
  3536. }
  3537. bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
  3538. return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
  3539. || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A;
  3540. }
  3541. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  3542. clip_image_f32 clip_img;
  3543. clip_img.buf.resize(h * w * 3);
  3544. for (int i = 0; i < h*w*3; i++)
  3545. {
  3546. clip_img.buf[i] = img[i];
  3547. }
  3548. clip_img.nx = w;
  3549. clip_img.ny = h;
  3550. clip_image_encode(ctx, n_threads, &clip_img, vec);
  3551. return true;
  3552. }
  3553. //
  3554. // API used internally with mtmd
  3555. //
  3556. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  3557. return ctx->proj_type();
  3558. }
  3559. void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
  3560. clip_image_f32 * audio = new clip_image_f32;
  3561. audio->nx = n_frames;
  3562. audio->ny = n_mel;
  3563. audio->buf.resize(n_frames * n_mel);
  3564. std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
  3565. batch->entries.push_back(clip_image_f32_ptr(audio));
  3566. batch->is_audio = true;
  3567. }