clip.cpp 169 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-cpu.h"
  10. #include "ggml-alloc.h"
  11. #include "ggml-backend.h"
  12. #include "gguf.h"
  13. #define STB_IMAGE_IMPLEMENTATION
  14. #include "stb_image.h"
  15. #include <cassert>
  16. #include <cmath>
  17. #include <cstdlib>
  18. #include <cstring>
  19. #include <fstream>
  20. #include <map>
  21. #include <regex>
  22. #include <stdexcept>
  23. #include <unordered_set>
  24. #include <vector>
  25. #include <sstream>
  26. #include <cinttypes>
  27. #include <limits>
  28. #include <array>
  29. #include <numeric>
  30. #include <functional>
  31. struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
  32. enum ffn_op_type {
  33. FFN_GELU,
  34. FFN_GELU_ERF,
  35. FFN_SILU,
  36. FFN_GELU_QUICK,
  37. };
  38. enum norm_type {
  39. NORM_TYPE_NORMAL,
  40. NORM_TYPE_RMS,
  41. };
  42. //#define CLIP_DEBUG_FUNCTIONS
  43. #ifdef CLIP_DEBUG_FUNCTIONS
  44. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  45. std::ofstream file(filename, std::ios::binary);
  46. if (!file.is_open()) {
  47. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  48. return;
  49. }
  50. // PPM header: P6 format, width, height, and max color value
  51. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  52. // Write pixel data
  53. for (size_t i = 0; i < img.buf.size(); i += 3) {
  54. // PPM expects binary data in RGB format, which matches our image buffer
  55. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  56. }
  57. file.close();
  58. }
  59. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  60. std::ofstream file(filename, std::ios::binary);
  61. if (!file.is_open()) {
  62. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  63. return;
  64. }
  65. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  66. int bytesPerPixel = 3;
  67. int widthInBytes = img.nx * bytesPerPixel;
  68. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  69. int stride = widthInBytes + paddingAmount;
  70. // Bitmap file header
  71. unsigned char fileHeader[14] = {
  72. 'B','M', // Signature
  73. 0,0,0,0, // Image file size in bytes
  74. 0,0,0,0, // Reserved
  75. 54,0,0,0 // Start of pixel array
  76. };
  77. // Total file size
  78. fileSize = 54 + (stride * img.ny);
  79. fileHeader[2] = (unsigned char)(fileSize);
  80. fileHeader[3] = (unsigned char)(fileSize >> 8);
  81. fileHeader[4] = (unsigned char)(fileSize >> 16);
  82. fileHeader[5] = (unsigned char)(fileSize >> 24);
  83. // Bitmap information header (BITMAPINFOHEADER)
  84. unsigned char infoHeader[40] = {
  85. 40,0,0,0, // Size of this header (40 bytes)
  86. 0,0,0,0, // Image width
  87. 0,0,0,0, // Image height
  88. 1,0, // Number of color planes
  89. 24,0, // Bits per pixel
  90. 0,0,0,0, // No compression
  91. 0,0,0,0, // Image size (can be 0 for no compression)
  92. 0,0,0,0, // X pixels per meter (not specified)
  93. 0,0,0,0, // Y pixels per meter (not specified)
  94. 0,0,0,0, // Total colors (color table not used)
  95. 0,0,0,0 // Important colors (all are important)
  96. };
  97. // Width and height in the information header
  98. infoHeader[4] = (unsigned char)(img.nx);
  99. infoHeader[5] = (unsigned char)(img.nx >> 8);
  100. infoHeader[6] = (unsigned char)(img.nx >> 16);
  101. infoHeader[7] = (unsigned char)(img.nx >> 24);
  102. infoHeader[8] = (unsigned char)(img.ny);
  103. infoHeader[9] = (unsigned char)(img.ny >> 8);
  104. infoHeader[10] = (unsigned char)(img.ny >> 16);
  105. infoHeader[11] = (unsigned char)(img.ny >> 24);
  106. // Write file headers
  107. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  108. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  109. // Pixel data
  110. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  111. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  112. for (int x = 0; x < img.nx; ++x) {
  113. // Each pixel
  114. size_t pixelIndex = (y * img.nx + x) * 3;
  115. unsigned char pixel[3] = {
  116. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  117. img.buf[pixelIndex + 1],
  118. img.buf[pixelIndex]
  119. };
  120. file.write(reinterpret_cast<char*>(pixel), 3);
  121. }
  122. // Write padding for the row
  123. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  124. }
  125. file.close();
  126. }
  127. // debug function to convert f32 to u8
  128. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  129. dst.nx = src.nx;
  130. dst.ny = src.ny;
  131. dst.buf.resize(3 * src.nx * src.ny);
  132. for (size_t i = 0; i < src.buf.size(); ++i) {
  133. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  134. }
  135. }
  136. #endif
  137. //
  138. // clip layers
  139. //
  140. enum patch_merge_type {
  141. PATCH_MERGE_FLAT,
  142. PATCH_MERGE_SPATIAL_UNPAD,
  143. };
  144. struct clip_hparams {
  145. bool has_vision = false;
  146. bool has_audio = false;
  147. int32_t image_size;
  148. int32_t patch_size;
  149. int32_t n_embd;
  150. int32_t n_ff;
  151. int32_t projection_dim;
  152. int32_t n_head;
  153. int32_t n_layer;
  154. int32_t proj_scale_factor = 0; // idefics3
  155. // for models using dynamic image size, we need to have a smaller image size to warmup
  156. // otherwise, user will get OOM everytime they load the model
  157. int32_t warmup_image_size = 0;
  158. ffn_op_type ffn_op = FFN_GELU;
  159. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  160. float eps = 1e-6;
  161. float rope_theta = 0.0;
  162. std::vector<int32_t> image_grid_pinpoints;
  163. int32_t image_crop_resolution;
  164. std::unordered_set<int32_t> vision_feature_layer;
  165. int32_t attn_window_size = 0;
  166. int32_t n_wa_pattern = 0;
  167. int32_t spatial_merge_size = 0;
  168. // audio
  169. int32_t n_mel_bins = 0; // whisper preprocessor
  170. int32_t proj_stack_factor = 0; // ultravox
  171. };
  172. struct clip_layer {
  173. // attention
  174. ggml_tensor * k_w = nullptr;
  175. ggml_tensor * k_b = nullptr;
  176. ggml_tensor * q_w = nullptr;
  177. ggml_tensor * q_b = nullptr;
  178. ggml_tensor * v_w = nullptr;
  179. ggml_tensor * v_b = nullptr;
  180. ggml_tensor * o_w = nullptr;
  181. ggml_tensor * o_b = nullptr;
  182. ggml_tensor * k_norm = nullptr;
  183. ggml_tensor * q_norm = nullptr;
  184. // layernorm 1
  185. ggml_tensor * ln_1_w = nullptr;
  186. ggml_tensor * ln_1_b = nullptr;
  187. ggml_tensor * ff_up_w = nullptr;
  188. ggml_tensor * ff_up_b = nullptr;
  189. ggml_tensor * ff_gate_w = nullptr;
  190. ggml_tensor * ff_gate_b = nullptr;
  191. ggml_tensor * ff_down_w = nullptr;
  192. ggml_tensor * ff_down_b = nullptr;
  193. // layernorm 2
  194. ggml_tensor * ln_2_w = nullptr;
  195. ggml_tensor * ln_2_b = nullptr;
  196. // layer scale (no bias)
  197. ggml_tensor * ls_1_w = nullptr;
  198. ggml_tensor * ls_2_w = nullptr;
  199. };
  200. struct clip_vision_model {
  201. struct clip_hparams hparams;
  202. // embeddings
  203. ggml_tensor * class_embedding = nullptr;
  204. ggml_tensor * patch_embeddings_0 = nullptr;
  205. ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  206. ggml_tensor * patch_bias = nullptr;
  207. ggml_tensor * position_embeddings = nullptr;
  208. ggml_tensor * pre_ln_w = nullptr;
  209. ggml_tensor * pre_ln_b = nullptr;
  210. std::vector<clip_layer> layers;
  211. ggml_tensor * post_ln_w;
  212. ggml_tensor * post_ln_b;
  213. ggml_tensor * projection;
  214. // LLaVA projection
  215. ggml_tensor * mm_input_norm_w = nullptr;
  216. ggml_tensor * mm_0_w = nullptr;
  217. ggml_tensor * mm_0_b = nullptr;
  218. ggml_tensor * mm_2_w = nullptr;
  219. ggml_tensor * mm_2_b = nullptr;
  220. ggml_tensor * image_newline = nullptr;
  221. // Yi type models with mlp+normalization projection
  222. ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  223. ggml_tensor * mm_1_b = nullptr;
  224. ggml_tensor * mm_3_w = nullptr;
  225. ggml_tensor * mm_3_b = nullptr;
  226. ggml_tensor * mm_4_w = nullptr;
  227. ggml_tensor * mm_4_b = nullptr;
  228. // GLMV-Edge projection
  229. ggml_tensor * mm_model_adapter_conv_w = nullptr;
  230. ggml_tensor * mm_model_adapter_conv_b = nullptr;
  231. ggml_tensor * mm_glm_tok_boi = nullptr;
  232. ggml_tensor * mm_glm_tok_eoi = nullptr;
  233. // MobileVLM projection
  234. ggml_tensor * mm_model_mlp_1_w = nullptr;
  235. ggml_tensor * mm_model_mlp_1_b = nullptr;
  236. ggml_tensor * mm_model_mlp_3_w = nullptr;
  237. ggml_tensor * mm_model_mlp_3_b = nullptr;
  238. ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  239. ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  240. ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  241. ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  242. ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  243. ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  244. ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  245. ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  246. ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  247. ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  248. ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  249. ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  250. ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  251. ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  252. ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  253. ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  254. ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  255. ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  256. ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  257. ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  258. // MobileVLM_V2 projection
  259. ggml_tensor * mm_model_mlp_0_w = nullptr;
  260. ggml_tensor * mm_model_mlp_0_b = nullptr;
  261. ggml_tensor * mm_model_mlp_2_w = nullptr;
  262. ggml_tensor * mm_model_mlp_2_b = nullptr;
  263. ggml_tensor * mm_model_peg_0_w = nullptr;
  264. ggml_tensor * mm_model_peg_0_b = nullptr;
  265. // MINICPMV projection
  266. ggml_tensor * mm_model_pos_embed_k = nullptr;
  267. ggml_tensor * mm_model_query = nullptr;
  268. ggml_tensor * mm_model_proj = nullptr;
  269. ggml_tensor * mm_model_kv_proj = nullptr;
  270. ggml_tensor * mm_model_attn_q_w = nullptr;
  271. ggml_tensor * mm_model_attn_q_b = nullptr;
  272. ggml_tensor * mm_model_attn_k_w = nullptr;
  273. ggml_tensor * mm_model_attn_k_b = nullptr;
  274. ggml_tensor * mm_model_attn_v_w = nullptr;
  275. ggml_tensor * mm_model_attn_v_b = nullptr;
  276. ggml_tensor * mm_model_attn_o_w = nullptr;
  277. ggml_tensor * mm_model_attn_o_b = nullptr;
  278. ggml_tensor * mm_model_ln_q_w = nullptr;
  279. ggml_tensor * mm_model_ln_q_b = nullptr;
  280. ggml_tensor * mm_model_ln_kv_w = nullptr;
  281. ggml_tensor * mm_model_ln_kv_b = nullptr;
  282. ggml_tensor * mm_model_ln_post_w = nullptr;
  283. ggml_tensor * mm_model_ln_post_b = nullptr;
  284. // gemma3
  285. ggml_tensor * mm_input_proj_w = nullptr;
  286. ggml_tensor * mm_soft_emb_norm_w = nullptr;
  287. // pixtral
  288. ggml_tensor * token_embd_img_break = nullptr;
  289. ggml_tensor * mm_patch_merger_w = nullptr;
  290. // ultravox / whisper encoder
  291. ggml_tensor * conv1d_1_w = nullptr;
  292. ggml_tensor * conv1d_1_b = nullptr;
  293. ggml_tensor * conv1d_2_w = nullptr;
  294. ggml_tensor * conv1d_2_b = nullptr;
  295. ggml_tensor * mm_norm_pre_w = nullptr;
  296. ggml_tensor * mm_norm_mid_w = nullptr;
  297. };
  298. struct clip_ctx {
  299. bool has_llava_projector = false;
  300. int minicpmv_version = 0;
  301. struct clip_vision_model vision_model;
  302. projector_type proj_type = PROJECTOR_TYPE_MLP;
  303. float image_mean[3];
  304. float image_std[3];
  305. gguf_context_ptr ctx_gguf;
  306. ggml_context_ptr ctx_data;
  307. std::vector<uint8_t> buf_compute_meta;
  308. std::vector<ggml_backend_t> backend_ptrs;
  309. std::vector<ggml_backend_buffer_type_t> backend_buft;
  310. ggml_backend_t backend;
  311. ggml_backend_t backend_cpu;
  312. ggml_backend_buffer_ptr buf;
  313. int max_nodes = 8192;
  314. ggml_backend_sched_ptr sched;
  315. // for debugging
  316. bool debug_graph = false;
  317. std::vector<ggml_tensor *> debug_print_tensors;
  318. clip_ctx(clip_context_params & ctx_params) {
  319. debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
  320. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  321. if (!backend_cpu) {
  322. throw std::runtime_error("failed to initialize CPU backend");
  323. }
  324. backend = ctx_params.use_gpu
  325. ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
  326. : nullptr;
  327. if (backend) {
  328. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  329. backend_ptrs.push_back(backend);
  330. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  331. } else {
  332. backend = backend_cpu;
  333. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  334. }
  335. backend_ptrs.push_back(backend_cpu);
  336. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  337. sched.reset(
  338. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  339. );
  340. }
  341. ~clip_ctx() {
  342. ggml_backend_free(backend);
  343. if (backend != backend_cpu) {
  344. ggml_backend_free(backend_cpu);
  345. }
  346. }
  347. };
  348. struct clip_graph {
  349. clip_ctx * ctx;
  350. const clip_vision_model & model;
  351. const clip_hparams & hparams;
  352. // we only support single image per batch
  353. const clip_image_f32 & img;
  354. const int patch_size;
  355. const int n_patches_x;
  356. const int n_patches_y;
  357. const int n_patches;
  358. const int n_embd;
  359. const int n_head;
  360. const int d_head;
  361. const int n_layer;
  362. const float eps;
  363. const float kq_scale;
  364. ggml_context_ptr ctx0_ptr;
  365. ggml_context * ctx0;
  366. ggml_cgraph * gf;
  367. clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  368. ctx(ctx),
  369. model(ctx->vision_model),
  370. hparams(model.hparams),
  371. img(img),
  372. patch_size(hparams.patch_size),
  373. n_patches_x(img.nx / patch_size),
  374. n_patches_y(img.ny / patch_size),
  375. n_patches(n_patches_x * n_patches_y),
  376. n_embd(hparams.n_embd),
  377. n_head(hparams.n_head),
  378. d_head(n_embd / n_head),
  379. n_layer(hparams.n_layer),
  380. eps(hparams.eps),
  381. kq_scale(1.0f / sqrtf((float)d_head)) {
  382. struct ggml_init_params params = {
  383. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  384. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  385. /*.no_alloc =*/ true,
  386. };
  387. ctx0_ptr.reset(ggml_init(params));
  388. ctx0 = ctx0_ptr.get();
  389. gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
  390. }
  391. ggml_cgraph * build_siglip() {
  392. ggml_tensor * inp = build_inp();
  393. ggml_tensor * cur = build_vit(
  394. inp, n_patches,
  395. NORM_TYPE_NORMAL,
  396. hparams.ffn_op,
  397. model.position_embeddings,
  398. nullptr);
  399. if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  400. const int batch_size = 1;
  401. GGML_ASSERT(n_patches_x == n_patches_y);
  402. const int patches_per_image = n_patches_x;
  403. const int kernel_size = hparams.proj_scale_factor;
  404. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  405. cur = ggml_reshape_4d(ctx0, cur, patches_per_image, patches_per_image, n_embd, batch_size);
  406. // doing a pool2d to reduce the number of output tokens
  407. cur = ggml_pool_2d(ctx0, cur, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  408. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0] * cur->ne[0], n_embd, batch_size);
  409. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  410. // apply norm before projection
  411. cur = ggml_rms_norm(ctx0, cur, eps);
  412. cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
  413. // apply projection
  414. cur = ggml_mul_mat(ctx0,
  415. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  416. cur);
  417. } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) {
  418. // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
  419. const int scale_factor = model.hparams.proj_scale_factor;
  420. const int n_embd = cur->ne[0];
  421. const int seq = cur->ne[1];
  422. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  423. const int height = std::sqrt(seq);
  424. const int width = std::sqrt(seq);
  425. GGML_ASSERT(scale_factor != 0);
  426. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height, bsz);
  427. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  428. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  429. n_embd * scale_factor * scale_factor,
  430. height / scale_factor,
  431. width / scale_factor,
  432. bsz);
  433. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  434. cur = ggml_reshape_3d(ctx0, ggml_cont(ctx0, cur),
  435. n_embd * scale_factor * scale_factor,
  436. seq / (scale_factor * scale_factor),
  437. bsz);
  438. cur = ggml_mul_mat(ctx0, model.projection, cur);
  439. } else {
  440. GGML_ABORT("SigLIP: Unsupported projector type");
  441. }
  442. // build the graph
  443. ggml_build_forward_expand(gf, cur);
  444. return gf;
  445. }
  446. ggml_cgraph * build_pixtral() {
  447. const int n_merge = hparams.spatial_merge_size;
  448. // 2D input positions
  449. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  450. ggml_set_name(pos_h, "pos_h");
  451. ggml_set_input(pos_h);
  452. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  453. ggml_set_name(pos_w, "pos_w");
  454. ggml_set_input(pos_w);
  455. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  456. return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta, true);
  457. };
  458. ggml_tensor * inp = build_inp();
  459. ggml_tensor * cur = build_vit(
  460. inp, n_patches,
  461. NORM_TYPE_RMS,
  462. hparams.ffn_op,
  463. nullptr, // no learned pos embd
  464. add_pos);
  465. // mistral small 3.1 patch merger
  466. // ref: https://github.com/huggingface/transformers/blob/7a3e208892c06a5e278144eaf38c8599a42f53e7/src/transformers/models/mistral3/modeling_mistral3.py#L67
  467. if (model.mm_patch_merger_w) {
  468. GGML_ASSERT(hparams.spatial_merge_size > 0);
  469. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.mm_input_norm_w);
  470. // reshape image tokens to 2D grid
  471. cur = ggml_reshape_3d(ctx0, cur, n_embd, n_patches_x, n_patches_y);
  472. cur = ggml_permute(ctx0, cur, 2, 0, 1, 3); // [x, y, n_embd]
  473. cur = ggml_cont(ctx0, cur);
  474. // torch.nn.functional.unfold is just an im2col under the hood
  475. // we just need a dummy kernel to make it work
  476. ggml_tensor * kernel = ggml_view_3d(ctx0, cur, n_merge, n_merge, cur->ne[2], 0, 0, 0);
  477. cur = ggml_im2col(ctx0, kernel, cur, n_merge, n_merge, 0, 0, 1, 1, true, inp->type);
  478. // project to n_embd
  479. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  480. cur = ggml_mul_mat(ctx0, model.mm_patch_merger_w, cur);
  481. }
  482. // LlavaMultiModalProjector (always using GELU activation)
  483. {
  484. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  485. if (model.mm_1_b) {
  486. cur = ggml_add(ctx0, cur, model.mm_1_b);
  487. }
  488. cur = ggml_gelu(ctx0, cur);
  489. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  490. if (model.mm_2_b) {
  491. cur = ggml_add(ctx0, cur, model.mm_2_b);
  492. }
  493. }
  494. // arrangement of the [IMG_BREAK] token
  495. {
  496. // not efficient, but works
  497. // the trick is to view the embeddings as a 3D tensor with shape [n_embd, n_patches_per_row, n_rows]
  498. // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
  499. // after the concatenation, we have a tensor with shape [n_embd, n_patches_per_row + 1, n_rows]
  500. const int p_y = n_merge > 0 ? n_patches_y / n_merge : n_patches_y;
  501. const int p_x = n_merge > 0 ? n_patches_x / n_merge : n_patches_x;
  502. const int p_total = p_x * p_y;
  503. const int n_embd_text = cur->ne[0];
  504. const int n_tokens_output = p_total + p_y - 1; // one [IMG_BREAK] per row, except the last row
  505. ggml_tensor * tmp = ggml_reshape_3d(ctx0, cur, n_embd_text, p_x, p_y);
  506. ggml_tensor * tok = ggml_new_tensor_3d(ctx0, tmp->type, n_embd_text, 1, p_y);
  507. tok = ggml_scale(ctx0, tok, 0.0); // clear the tensor
  508. tok = ggml_add(ctx0, tok, model.token_embd_img_break);
  509. tmp = ggml_concat(ctx0, tmp, tok, 1);
  510. cur = ggml_view_2d(ctx0, tmp,
  511. n_embd_text, n_tokens_output,
  512. ggml_row_size(tmp->type, n_embd_text), 0);
  513. }
  514. // build the graph
  515. ggml_build_forward_expand(gf, cur);
  516. return gf;
  517. }
  518. // Qwen2VL and Qwen2.5VL use M-RoPE
  519. ggml_cgraph * build_qwen2vl() {
  520. GGML_ASSERT(model.patch_bias == nullptr);
  521. GGML_ASSERT(model.class_embedding == nullptr);
  522. const int batch_size = 1;
  523. const bool use_window_attn = hparams.n_wa_pattern > 0;
  524. const int n_wa_pattern = hparams.n_wa_pattern;
  525. const int n_pos = n_patches;
  526. const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
  527. norm_type norm_t = ctx->proj_type == PROJECTOR_TYPE_QWEN25VL
  528. ? NORM_TYPE_RMS // qwen 2.5 vl
  529. : NORM_TYPE_NORMAL; // qwen 2 vl
  530. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  531. ggml_tensor * inp_raw = build_inp_raw();
  532. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  533. GGML_ASSERT(img.nx % (patch_size * 2) == 0);
  534. GGML_ASSERT(img.ny % (patch_size * 2) == 0);
  535. // second conv dimension
  536. {
  537. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  538. inp = ggml_add(ctx0, inp, inp_1);
  539. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
  540. inp = ggml_reshape_4d(
  541. ctx0, inp,
  542. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  543. inp = ggml_reshape_4d(
  544. ctx0, inp,
  545. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  546. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3));
  547. inp = ggml_reshape_3d(
  548. ctx0, inp,
  549. n_embd, n_patches_x * n_patches_y, batch_size);
  550. }
  551. ggml_tensor * inpL = inp;
  552. ggml_tensor * window_mask = nullptr;
  553. ggml_tensor * window_idx = nullptr;
  554. ggml_tensor * inv_window_idx = nullptr;
  555. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  556. ggml_set_name(positions, "positions");
  557. ggml_set_input(positions);
  558. // pre-layernorm
  559. if (model.pre_ln_w) {
  560. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  561. }
  562. if (use_window_attn) {
  563. // handle window attention inputs
  564. inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  565. ggml_set_name(inv_window_idx, "inv_window_idx");
  566. ggml_set_input(inv_window_idx);
  567. // mask for window attention
  568. window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_pos, n_pos);
  569. ggml_set_name(window_mask, "window_mask");
  570. ggml_set_input(window_mask);
  571. // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  572. GGML_ASSERT(batch_size == 1);
  573. inpL = ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
  574. inpL = ggml_get_rows(ctx0, inpL, inv_window_idx);
  575. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
  576. }
  577. // loop over layers
  578. for (int il = 0; il < n_layer; il++) {
  579. auto & layer = model.layers[il];
  580. const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
  581. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  582. // layernorm1
  583. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  584. cb(cur, "ln1", il);
  585. // self-attention
  586. {
  587. ggml_tensor * Qcur = ggml_add(ctx0,
  588. ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
  589. ggml_tensor * Kcur = ggml_add(ctx0,
  590. ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
  591. ggml_tensor * Vcur = ggml_add(ctx0,
  592. ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
  593. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
  594. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
  595. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
  596. cb(Qcur, "Qcur", il);
  597. cb(Kcur, "Kcur", il);
  598. cb(Vcur, "Vcur", il);
  599. // apply M-RoPE
  600. Qcur = ggml_rope_multi(
  601. ctx0, Qcur, positions, nullptr,
  602. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  603. Kcur = ggml_rope_multi(
  604. ctx0, Kcur, positions, nullptr,
  605. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  606. cb(Qcur, "Qcur_rope", il);
  607. cb(Kcur, "Kcur_rope", il);
  608. ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
  609. cur = build_attn(layer.o_w, layer.o_b,
  610. Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
  611. cb(cur, "attn_out", il);
  612. }
  613. // re-add the layer input, e.g., residual
  614. cur = ggml_add(ctx0, cur, inpL);
  615. inpL = cur; // inpL = residual, cur = hidden_states
  616. cb(cur, "ffn_inp", il);
  617. // layernorm2
  618. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  619. cb(cur, "ffn_inp_normed", il);
  620. // ffn
  621. cur = build_ffn(cur,
  622. layer.ff_up_w, layer.ff_up_b,
  623. layer.ff_gate_w, layer.ff_gate_b,
  624. layer.ff_down_w, layer.ff_down_b,
  625. hparams.ffn_op, il);
  626. cb(cur, "ffn_out", il);
  627. // residual 2
  628. cur = ggml_add(ctx0, inpL, cur);
  629. cb(cur, "layer_out", il);
  630. inpL = cur;
  631. }
  632. // post-layernorm
  633. if (model.post_ln_w) {
  634. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
  635. }
  636. // multimodal projection
  637. ggml_tensor * embeddings = inpL;
  638. embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
  639. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  640. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  641. // GELU activation
  642. embeddings = ggml_gelu(ctx0, embeddings);
  643. // Second linear layer
  644. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  645. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  646. if (use_window_attn) {
  647. window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  648. ggml_set_name(window_idx, "window_idx");
  649. ggml_set_input(window_idx);
  650. // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  651. GGML_ASSERT(batch_size == 1);
  652. embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
  653. embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
  654. embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
  655. }
  656. // build the graph
  657. ggml_build_forward_expand(gf, embeddings);
  658. return gf;
  659. }
  660. ggml_cgraph * build_minicpmv() {
  661. const int batch_size = 1;
  662. GGML_ASSERT(model.class_embedding == nullptr);
  663. const int n_pos = n_patches;
  664. // position embeddings for the projector (not for ViT)
  665. int n_output_dim = clip_n_mmproj_embd(ctx);
  666. ggml_tensor * pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_output_dim, n_pos, batch_size);
  667. ggml_set_name(pos_embed, "pos_embed");
  668. ggml_set_input(pos_embed);
  669. // for selecting learned pos embd, used by ViT
  670. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  671. ggml_set_name(positions, "positions");
  672. ggml_set_input(positions);
  673. ggml_tensor * learned_pos_embd = ggml_get_rows(ctx0, model.position_embeddings, positions);
  674. ggml_tensor * inp = build_inp();
  675. ggml_tensor * embeddings = build_vit(
  676. inp, n_patches,
  677. NORM_TYPE_NORMAL,
  678. hparams.ffn_op,
  679. learned_pos_embd,
  680. nullptr);
  681. // resampler projector (it is just another transformer)
  682. ggml_tensor * q = model.mm_model_query;
  683. ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  684. // norm
  685. q = build_norm(q, model.mm_model_ln_q_w, model.mm_model_ln_q_b, NORM_TYPE_NORMAL, eps, -1);
  686. v = build_norm(v, model.mm_model_ln_kv_w, model.mm_model_ln_kv_b, NORM_TYPE_NORMAL, eps, -1);
  687. // k = v + pos_embed
  688. ggml_tensor * k = ggml_add(ctx0, v, pos_embed);
  689. // attention
  690. {
  691. int n_embd = clip_n_mmproj_embd(ctx);
  692. const int d_head = 128;
  693. int n_head = n_embd/d_head;
  694. int num_query = 96;
  695. if (ctx->minicpmv_version == 2) {
  696. num_query = 96;
  697. } else if (ctx->minicpmv_version == 3) {
  698. num_query = 64;
  699. } else if (ctx->minicpmv_version == 4) {
  700. num_query = 64;
  701. }
  702. ggml_tensor * Q = ggml_add(ctx0,
  703. ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q),
  704. model.mm_model_attn_q_b);
  705. ggml_tensor * K = ggml_add(ctx0,
  706. ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k),
  707. model.mm_model_attn_k_b);
  708. ggml_tensor * V = ggml_add(ctx0,
  709. ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v),
  710. model.mm_model_attn_v_b);
  711. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_query);
  712. K = ggml_reshape_3d(ctx0, K, d_head, n_head, n_pos);
  713. V = ggml_reshape_3d(ctx0, V, d_head, n_head, n_pos);
  714. cb(Q, "resampler_Q", -1);
  715. cb(K, "resampler_K", -1);
  716. cb(V, "resampler_V", -1);
  717. embeddings = build_attn(
  718. model.mm_model_attn_o_w,
  719. model.mm_model_attn_o_b,
  720. Q, K, V, nullptr, kq_scale, -1);
  721. cb(embeddings, "resampler_attn_out", -1);
  722. }
  723. // layernorm
  724. embeddings = build_norm(embeddings, model.mm_model_ln_post_w, model.mm_model_ln_post_b, NORM_TYPE_NORMAL, eps, -1);
  725. // projection
  726. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  727. // build the graph
  728. ggml_build_forward_expand(gf, embeddings);
  729. return gf;
  730. }
  731. ggml_cgraph * build_internvl() {
  732. GGML_ASSERT(model.class_embedding != nullptr);
  733. GGML_ASSERT(model.position_embeddings != nullptr);
  734. const int n_pos = n_patches + 1;
  735. ggml_tensor * inp = build_inp();
  736. // add CLS token
  737. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  738. // The larger models use a different ViT, which uses RMS norm instead of layer norm
  739. // ref: https://github.com/ggml-org/llama.cpp/pull/13443#issuecomment-2869786188
  740. norm_type norm_t = (hparams.n_embd == 3200 && hparams.n_layer == 45)
  741. ? NORM_TYPE_RMS // 6B ViT (Used by InternVL 2.5/3 - 26B, 38B, 78B)
  742. : NORM_TYPE_NORMAL; // 300M ViT (Used by all smaller InternVL models)
  743. ggml_tensor * cur = build_vit(
  744. inp, n_pos,
  745. norm_t,
  746. hparams.ffn_op,
  747. model.position_embeddings,
  748. nullptr);
  749. // remove CLS token
  750. cur = ggml_view_2d(ctx0, cur,
  751. n_embd, n_patches,
  752. ggml_row_size(cur->type, n_embd), 0);
  753. // pixel shuffle
  754. {
  755. const int scale_factor = model.hparams.proj_scale_factor;
  756. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  757. const int height = n_patches_y;
  758. const int width = n_patches_x;
  759. GGML_ASSERT(scale_factor > 0);
  760. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, height / scale_factor, width, bsz);
  761. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  762. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  763. n_embd * scale_factor * scale_factor,
  764. height / scale_factor,
  765. width / scale_factor,
  766. bsz);
  767. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  768. // flatten to 2D
  769. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, cur),
  770. n_embd * scale_factor * scale_factor,
  771. cur->ne[1] * cur->ne[2]);
  772. }
  773. // projector (always using GELU activation)
  774. {
  775. // projector LayerNorm uses pytorch's default eps = 1e-5
  776. // ref: https://huggingface.co/OpenGVLab/InternVL3-8B-Instruct/blob/a34d3e4e129a5856abfd6aa6de79776484caa14e/modeling_internvl_chat.py#L79
  777. cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
  778. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  779. cur = ggml_add(ctx0, cur, model.mm_1_b);
  780. cur = ggml_gelu(ctx0, cur);
  781. cur = ggml_mul_mat(ctx0, model.mm_3_w, cur);
  782. cur = ggml_add(ctx0, cur, model.mm_3_b);
  783. }
  784. // build the graph
  785. ggml_build_forward_expand(gf, cur);
  786. return gf;
  787. }
  788. ggml_cgraph * build_llama4() {
  789. GGML_ASSERT(model.class_embedding != nullptr);
  790. GGML_ASSERT(model.position_embeddings != nullptr);
  791. const int n_pos = n_patches + 1; // +1 for [CLS]
  792. // 2D input positions
  793. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  794. ggml_set_name(pos_h, "pos_h");
  795. ggml_set_input(pos_h);
  796. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  797. ggml_set_name(pos_w, "pos_w");
  798. ggml_set_input(pos_w);
  799. ggml_tensor * inp = build_inp_raw();
  800. // Llama4UnfoldConvolution
  801. {
  802. ggml_tensor * kernel = ggml_reshape_4d(ctx0, model.patch_embeddings_0,
  803. patch_size, patch_size, 3, n_embd);
  804. inp = ggml_im2col(ctx0, kernel, inp, patch_size, patch_size, 0, 0, 1, 1, true, inp->type);
  805. inp = ggml_mul_mat(ctx0, model.patch_embeddings_0, inp);
  806. inp = ggml_reshape_2d(ctx0, inp, n_embd, n_patches);
  807. cb(inp, "patch_conv", -1);
  808. }
  809. // add CLS token
  810. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  811. // build ViT with 2D position embeddings
  812. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  813. // first half is X axis and second half is Y axis
  814. // ref: https://github.com/huggingface/transformers/blob/40a493c7ed4f19f08eadb0639cf26d49bfa5e180/src/transformers/models/llama4/modeling_llama4.py#L1312
  815. // ref: https://github.com/Blaizzy/mlx-vlm/blob/a57156aa87b33cca6e5ee6cfc14dd4ef8f611be6/mlx_vlm/models/llama4/vision.py#L441
  816. return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
  817. };
  818. ggml_tensor * cur = build_vit(
  819. inp, n_pos,
  820. NORM_TYPE_NORMAL,
  821. hparams.ffn_op,
  822. model.position_embeddings,
  823. add_pos);
  824. // remove CLS token
  825. cur = ggml_view_2d(ctx0, cur,
  826. n_embd, n_patches,
  827. ggml_row_size(cur->type, n_embd), 0);
  828. // pixel shuffle
  829. // based on Llama4VisionPixelShuffleMLP
  830. // https://github.com/huggingface/transformers/blob/2932f318a20d9e54cc7aea052e040164d85de7d6/src/transformers/models/llama4/modeling_llama4.py#L1151
  831. {
  832. const int scale_factor = model.hparams.proj_scale_factor;
  833. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  834. GGML_ASSERT(scale_factor > 0);
  835. GGML_ASSERT(n_patches_x == n_patches_y); // llama4 only supports square images
  836. cur = ggml_reshape_4d(ctx0, cur,
  837. n_embd * scale_factor,
  838. n_patches_x / scale_factor,
  839. n_patches_y,
  840. bsz);
  841. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  842. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  843. n_embd * scale_factor * scale_factor,
  844. n_patches_x / scale_factor,
  845. n_patches_y / scale_factor,
  846. bsz);
  847. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  848. // flatten to 2D
  849. cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, cur),
  850. n_embd * scale_factor * scale_factor,
  851. n_patches / scale_factor / scale_factor);
  852. cb(cur, "pixel_shuffle", -1);
  853. }
  854. // based on Llama4VisionMLP2 (always uses GELU activation, no bias)
  855. {
  856. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, cur);
  857. cur = ggml_gelu(ctx0, cur);
  858. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, cur);
  859. cur = ggml_gelu(ctx0, cur);
  860. cb(cur, "adapter_mlp", -1);
  861. }
  862. // Llama4MultiModalProjector
  863. cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
  864. cb(cur, "projected", -1);
  865. // build the graph
  866. ggml_build_forward_expand(gf, cur);
  867. return gf;
  868. }
  869. // this graph is used by llava, granite and glm
  870. // due to having embedding_stack (used by granite), we cannot reuse build_vit
  871. ggml_cgraph * build_llava() {
  872. const int batch_size = 1;
  873. const int n_pos = n_patches + (model.class_embedding ? 1 : 0);
  874. GGML_ASSERT(n_patches_x == n_patches_y && "only square images supported");
  875. // Calculate the deepest feature layer based on hparams and projector type
  876. int max_feature_layer = n_layer;
  877. {
  878. // Get the index of the second to last layer; this is the default for models that have a llava projector
  879. int il_last = hparams.n_layer - 1;
  880. int deepest_feature_layer = -1;
  881. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  882. il_last += 1;
  883. }
  884. // If we set explicit vision feature layers, only go up to the deepest one
  885. // NOTE: only used by granite-vision models for now
  886. for (const auto & feature_layer : hparams.vision_feature_layer) {
  887. if (feature_layer > deepest_feature_layer) {
  888. deepest_feature_layer = feature_layer;
  889. }
  890. }
  891. max_feature_layer = deepest_feature_layer < 0 ? il_last : deepest_feature_layer;
  892. }
  893. ggml_tensor * inp = build_inp();
  894. // concat class_embeddings and patch_embeddings
  895. if (model.class_embedding) {
  896. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  897. }
  898. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  899. ggml_set_name(positions, "positions");
  900. ggml_set_input(positions);
  901. inp = ggml_add(ctx0, inp, ggml_get_rows(ctx0, model.position_embeddings, positions));
  902. ggml_tensor * inpL = inp;
  903. // pre-layernorm
  904. if (model.pre_ln_w) {
  905. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, NORM_TYPE_NORMAL, eps, -1);
  906. cb(inpL, "pre_ln", -1);
  907. }
  908. std::vector<ggml_tensor *> embedding_stack;
  909. const auto & vision_feature_layer = hparams.vision_feature_layer;
  910. // loop over layers
  911. for (int il = 0; il < max_feature_layer; il++) {
  912. auto & layer = model.layers[il];
  913. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  914. // If this is an embedding feature layer, save the output.
  915. // NOTE: 0 index here refers to the input to the encoder.
  916. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  917. embedding_stack.push_back(cur);
  918. }
  919. // layernorm1
  920. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  921. cb(cur, "layer_inp_normed", il);
  922. // self-attention
  923. {
  924. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  925. if (layer.q_b) {
  926. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  927. }
  928. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  929. if (layer.k_b) {
  930. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  931. }
  932. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  933. if (layer.v_b) {
  934. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  935. }
  936. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  937. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  938. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  939. cb(Qcur, "Qcur", il);
  940. cb(Kcur, "Kcur", il);
  941. cb(Vcur, "Vcur", il);
  942. cur = build_attn(layer.o_w, layer.o_b,
  943. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  944. cb(cur, "attn_out", il);
  945. }
  946. // re-add the layer input, e.g., residual
  947. cur = ggml_add(ctx0, cur, inpL);
  948. inpL = cur; // inpL = residual, cur = hidden_states
  949. cb(cur, "ffn_inp", il);
  950. // layernorm2
  951. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  952. cb(cur, "ffn_inp_normed", il);
  953. // ffn
  954. cur = build_ffn(cur,
  955. layer.ff_up_w, layer.ff_up_b,
  956. layer.ff_gate_w, layer.ff_gate_b,
  957. layer.ff_down_w, layer.ff_down_b,
  958. hparams.ffn_op, il);
  959. cb(cur, "ffn_out", il);
  960. // residual 2
  961. cur = ggml_add(ctx0, inpL, cur);
  962. cb(cur, "layer_out", il);
  963. inpL = cur;
  964. }
  965. // post-layernorm
  966. if (model.post_ln_w) {
  967. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, NORM_TYPE_NORMAL, eps, -1);
  968. }
  969. ggml_tensor * embeddings = inpL;
  970. // process vision feature layers (used by granite)
  971. {
  972. // final layer is a vision feature layer
  973. if (vision_feature_layer.find(max_feature_layer) != vision_feature_layer.end()) {
  974. embedding_stack.push_back(inpL);
  975. }
  976. // If feature layers are explicitly set, stack them (if we have multiple)
  977. if (!embedding_stack.empty()) {
  978. embeddings = embedding_stack[0];
  979. for (size_t i = 1; i < embedding_stack.size(); i++) {
  980. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  981. }
  982. }
  983. }
  984. // llava projector (also used by granite)
  985. if (ctx->has_llava_projector) {
  986. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  987. ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  988. ggml_set_name(patches, "patches");
  989. ggml_set_input(patches);
  990. // shape [1, 576, 1024]
  991. // ne is whcn, ne = [1024, 576, 1, 1]
  992. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  993. // print_tensor_info(embeddings, "embeddings");
  994. // llava projector
  995. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  996. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  997. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  998. embeddings = ggml_gelu(ctx0, embeddings);
  999. if (model.mm_2_w) {
  1000. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  1001. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  1002. }
  1003. }
  1004. else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  1005. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1006. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1007. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  1008. // First LayerNorm
  1009. embeddings = ggml_norm(ctx0, embeddings, eps);
  1010. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  1011. model.mm_1_b);
  1012. // GELU activation
  1013. embeddings = ggml_gelu(ctx0, embeddings);
  1014. // Second linear layer
  1015. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  1016. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  1017. // Second LayerNorm
  1018. embeddings = ggml_norm(ctx0, embeddings, eps);
  1019. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  1020. model.mm_4_b);
  1021. }
  1022. else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  1023. // MobileVLM projector
  1024. int n_patch = 24;
  1025. ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  1026. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  1027. mlp_1 = ggml_gelu(ctx0, mlp_1);
  1028. ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  1029. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  1030. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  1031. // block 1
  1032. ggml_tensor * block_1 = nullptr;
  1033. {
  1034. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  1035. mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
  1036. mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  1037. // stride = 1, padding = 1, bias is nullptr
  1038. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  1039. // layer norm
  1040. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1041. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1042. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1043. block_1 = ggml_norm(ctx0, block_1, eps);
  1044. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  1045. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1046. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1047. // hardswish
  1048. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1049. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1050. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1051. // pointwise conv
  1052. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1053. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  1054. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  1055. block_1 = ggml_relu(ctx0, block_1);
  1056. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  1057. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  1058. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1059. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  1060. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1061. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1062. int w = block_1->ne[0], h = block_1->ne[1];
  1063. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1064. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1065. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1066. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  1067. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1068. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1069. block_1 = ggml_norm(ctx0, block_1, eps);
  1070. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  1071. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1072. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1073. // residual
  1074. block_1 = ggml_add(ctx0, mlp_3, block_1);
  1075. }
  1076. // block_2
  1077. {
  1078. // stride = 2
  1079. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  1080. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1081. // layer norm
  1082. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1083. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1084. block_1 = ggml_norm(ctx0, block_1, eps);
  1085. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  1086. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1087. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1088. // hardswish
  1089. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1090. // not sure the parameters is right for globalAvgPooling
  1091. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1092. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1093. // pointwise conv
  1094. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1095. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  1096. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  1097. block_1 = ggml_relu(ctx0, block_1);
  1098. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  1099. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  1100. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1101. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1102. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1103. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1104. int w = block_1->ne[0], h = block_1->ne[1];
  1105. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1106. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1107. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1108. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  1109. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1110. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1111. block_1 = ggml_norm(ctx0, block_1, eps);
  1112. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  1113. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  1114. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  1115. }
  1116. embeddings = block_1;
  1117. }
  1118. else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
  1119. {
  1120. int n_patch = 24;
  1121. ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1122. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  1123. mlp_0 = ggml_gelu(ctx0, mlp_0);
  1124. ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  1125. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  1126. // mlp_2 ne = [2048, 576, 1, 1]
  1127. // // AVG Pool Layer 2*2, strides = 2
  1128. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
  1129. // mlp_2 ne = [576, 2048, 1, 1]
  1130. mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  1131. // mlp_2 ne [24, 24, 2048, 1]
  1132. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  1133. // weight ne = [3, 3, 2048, 1]
  1134. ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  1135. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  1136. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  1137. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  1138. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  1139. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  1140. embeddings = peg_0;
  1141. }
  1142. else {
  1143. GGML_ABORT("fatal error");
  1144. }
  1145. }
  1146. // glm projector
  1147. else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  1148. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  1149. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
  1150. embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  1151. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  1152. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  1153. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  1154. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  1155. // GLU
  1156. {
  1157. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1158. embeddings = ggml_norm(ctx0, embeddings, eps);
  1159. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1160. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  1161. ggml_tensor * x = embeddings;
  1162. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  1163. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  1164. embeddings = ggml_silu_inplace(ctx0, embeddings);
  1165. embeddings = ggml_mul(ctx0, embeddings,x);
  1166. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  1167. }
  1168. // arrangement of BOI/EOI token embeddings
  1169. // note: these embeddings are not present in text model, hence we cannot process them as text tokens
  1170. // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
  1171. {
  1172. embeddings = ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI
  1173. embeddings = ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI
  1174. }
  1175. }
  1176. else {
  1177. GGML_ABORT("llava: unknown projector type");
  1178. }
  1179. // build the graph
  1180. ggml_build_forward_expand(gf, embeddings);
  1181. return gf;
  1182. }
  1183. // whisper encoder with custom projector
  1184. ggml_cgraph * build_whisper_enc() {
  1185. const int n_frames = img.nx;
  1186. const int n_pos = n_frames / 2;
  1187. GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
  1188. ggml_tensor * inp = build_inp_raw(1);
  1189. // conv1d block
  1190. {
  1191. // convolution + gelu
  1192. ggml_tensor * cur = ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
  1193. cur = ggml_add(ctx0, cur, model.conv1d_1_b);
  1194. cur = ggml_gelu_erf(ctx0, cur);
  1195. cur = ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
  1196. cur = ggml_add(ctx0, cur, model.conv1d_2_b);
  1197. cur = ggml_gelu_erf(ctx0, cur);
  1198. // transpose
  1199. inp = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  1200. cb(inp, "after_conv1d", -1);
  1201. }
  1202. // sanity check (only check one layer, but it should be the same for all)
  1203. GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
  1204. GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
  1205. GGML_ASSERT(model.layers[0].q_b);
  1206. GGML_ASSERT(model.layers[0].v_b);
  1207. GGML_ASSERT(!model.layers[0].k_b); // no bias for k
  1208. GGML_ASSERT(model.post_ln_w && model.post_ln_b);
  1209. ggml_tensor * pos_embd_selected = ggml_view_2d(
  1210. ctx0, model.position_embeddings,
  1211. model.position_embeddings->ne[0], n_pos,
  1212. model.position_embeddings->nb[1], 0
  1213. );
  1214. ggml_tensor * cur = build_vit(
  1215. inp, n_pos,
  1216. NORM_TYPE_NORMAL,
  1217. hparams.ffn_op,
  1218. pos_embd_selected,
  1219. nullptr);
  1220. cb(cur, "after_transformer", -1);
  1221. // StackAudioFrames
  1222. // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
  1223. {
  1224. int64_t stride = n_embd * hparams.proj_stack_factor;
  1225. int64_t padded_len = GGML_PAD(ggml_nelements(cur), stride);
  1226. int64_t pad = padded_len - ggml_nelements(cur);
  1227. if (pad > 0) {
  1228. cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0);
  1229. cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
  1230. }
  1231. cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
  1232. ggml_row_size(cur->type, stride), 0);
  1233. }
  1234. cb(cur, "after_stacked", -1);
  1235. // UltravoxProjector
  1236. {
  1237. // pre-norm
  1238. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1239. cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
  1240. // ffn in
  1241. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1242. // swiglu
  1243. {
  1244. int64_t split_point = cur->ne[0] / 2;
  1245. ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0));
  1246. ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur)));
  1247. // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
  1248. x1 = ggml_silu(ctx0, x1);
  1249. cur = ggml_mul(ctx0, x0, x1);
  1250. }
  1251. // mid-norm
  1252. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1253. cur = ggml_mul(ctx0, cur, model.mm_norm_mid_w);
  1254. // ffn out
  1255. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1256. }
  1257. cb(cur, "projected", -1);
  1258. ggml_build_forward_expand(gf, cur);
  1259. return gf;
  1260. }
  1261. private:
  1262. //
  1263. // utility functions
  1264. //
  1265. void cb(ggml_tensor * cur0, const char * name, int il) const {
  1266. if (ctx->debug_graph) {
  1267. ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
  1268. std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
  1269. ggml_set_name(cur, cur_name.c_str());
  1270. ggml_set_output(cur);
  1271. ggml_build_forward_expand(gf, cur);
  1272. ctx->debug_print_tensors.push_back(cur);
  1273. }
  1274. }
  1275. // build vision transformer (ViT) cgraph
  1276. // this function should cover most of the models
  1277. // if your model has specific features, you should probably duplicate this function
  1278. ggml_tensor * build_vit(
  1279. ggml_tensor * inp,
  1280. int64_t n_pos,
  1281. norm_type norm_t,
  1282. ffn_op_type ffn_t,
  1283. ggml_tensor * learned_pos_embd,
  1284. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  1285. ) {
  1286. if (learned_pos_embd) {
  1287. inp = ggml_add(ctx0, inp, learned_pos_embd);
  1288. cb(inp, "pos_embed", -1);
  1289. }
  1290. ggml_tensor * inpL = inp;
  1291. // pre-layernorm
  1292. if (model.pre_ln_w) {
  1293. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  1294. cb(inpL, "pre_ln", -1);
  1295. }
  1296. // loop over layers
  1297. for (int il = 0; il < n_layer; il++) {
  1298. auto & layer = model.layers[il];
  1299. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  1300. // layernorm1
  1301. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  1302. cb(cur, "layer_inp_normed", il);
  1303. // self-attention
  1304. {
  1305. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1306. if (layer.q_b) {
  1307. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1308. }
  1309. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1310. if (layer.k_b) {
  1311. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1312. }
  1313. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1314. if (layer.v_b) {
  1315. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1316. }
  1317. if (layer.q_norm) {
  1318. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  1319. cb(Qcur, "Qcur_norm", il);
  1320. }
  1321. if (layer.k_norm) {
  1322. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  1323. cb(Kcur, "Kcur_norm", il);
  1324. }
  1325. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1326. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1327. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1328. cb(Qcur, "Qcur", il);
  1329. cb(Kcur, "Kcur", il);
  1330. cb(Vcur, "Vcur", il);
  1331. if (add_pos) {
  1332. Qcur = add_pos(Qcur, layer);
  1333. Kcur = add_pos(Kcur, layer);
  1334. cb(Qcur, "Qcur_pos", il);
  1335. cb(Kcur, "Kcur_pos", il);
  1336. }
  1337. cur = build_attn(layer.o_w, layer.o_b,
  1338. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1339. cb(cur, "attn_out", il);
  1340. }
  1341. if (layer.ls_1_w) {
  1342. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  1343. cb(cur, "attn_out_scaled", il);
  1344. }
  1345. // re-add the layer input, e.g., residual
  1346. cur = ggml_add(ctx0, cur, inpL);
  1347. inpL = cur; // inpL = residual, cur = hidden_states
  1348. cb(cur, "ffn_inp", il);
  1349. // layernorm2
  1350. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  1351. cb(cur, "ffn_inp_normed", il);
  1352. // ffn
  1353. cur = build_ffn(cur,
  1354. layer.ff_up_w, layer.ff_up_b,
  1355. layer.ff_gate_w, layer.ff_gate_b,
  1356. layer.ff_down_w, layer.ff_down_b,
  1357. ffn_t, il);
  1358. cb(cur, "ffn_out", il);
  1359. if (layer.ls_2_w) {
  1360. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  1361. cb(cur, "ffn_out_scaled", il);
  1362. }
  1363. // residual 2
  1364. cur = ggml_add(ctx0, inpL, cur);
  1365. cb(cur, "layer_out", il);
  1366. inpL = cur;
  1367. }
  1368. // post-layernorm
  1369. if (model.post_ln_w) {
  1370. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  1371. }
  1372. return inpL;
  1373. }
  1374. // build the input after conv2d (inp_raw --> patches)
  1375. // returns tensor with shape [n_embd, n_patches]
  1376. ggml_tensor * build_inp() {
  1377. ggml_tensor * inp_raw = build_inp_raw();
  1378. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  1379. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  1380. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  1381. if (model.patch_bias) {
  1382. inp = ggml_add(ctx0, inp, model.patch_bias);
  1383. cb(inp, "patch_bias", -1);
  1384. }
  1385. return inp;
  1386. }
  1387. ggml_tensor * build_inp_raw(int channels = 3) {
  1388. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
  1389. ggml_set_name(inp_raw, "inp_raw");
  1390. ggml_set_input(inp_raw);
  1391. return inp_raw;
  1392. }
  1393. ggml_tensor * build_norm(
  1394. ggml_tensor * cur,
  1395. ggml_tensor * mw,
  1396. ggml_tensor * mb,
  1397. norm_type type,
  1398. float norm_eps,
  1399. int il) const {
  1400. cur = type == NORM_TYPE_RMS
  1401. ? ggml_rms_norm(ctx0, cur, norm_eps)
  1402. : ggml_norm(ctx0, cur, norm_eps);
  1403. if (mw || mb) {
  1404. cb(cur, "norm", il);
  1405. }
  1406. if (mw) {
  1407. cur = ggml_mul(ctx0, cur, mw);
  1408. if (mb) {
  1409. cb(cur, "norm_w", il);
  1410. }
  1411. }
  1412. if (mb) {
  1413. cur = ggml_add(ctx0, cur, mb);
  1414. }
  1415. return cur;
  1416. }
  1417. ggml_tensor * build_ffn(
  1418. ggml_tensor * cur,
  1419. ggml_tensor * up,
  1420. ggml_tensor * up_b,
  1421. ggml_tensor * gate,
  1422. ggml_tensor * gate_b,
  1423. ggml_tensor * down,
  1424. ggml_tensor * down_b,
  1425. ffn_op_type type_op,
  1426. int il) const {
  1427. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  1428. cb(tmp, "ffn_up", il);
  1429. if (up_b) {
  1430. tmp = ggml_add(ctx0, tmp, up_b);
  1431. cb(tmp, "ffn_up_b", il);
  1432. }
  1433. if (gate) {
  1434. cur = ggml_mul_mat(ctx0, gate, cur);
  1435. cb(cur, "ffn_gate", il);
  1436. if (gate_b) {
  1437. cur = ggml_add(ctx0, cur, gate_b);
  1438. cb(cur, "ffn_gate_b", il);
  1439. }
  1440. } else {
  1441. cur = tmp;
  1442. }
  1443. switch (type_op) {
  1444. case FFN_SILU:
  1445. {
  1446. cur = ggml_silu(ctx0, cur);
  1447. cb(cur, "ffn_silu", il);
  1448. } break;
  1449. case FFN_GELU:
  1450. {
  1451. cur = ggml_gelu(ctx0, cur);
  1452. cb(cur, "ffn_gelu", il);
  1453. } break;
  1454. case FFN_GELU_ERF:
  1455. {
  1456. cur = ggml_gelu_erf(ctx0, cur);
  1457. cb(cur, "ggml_gelu_erf", il);
  1458. } break;
  1459. case FFN_GELU_QUICK:
  1460. {
  1461. cur = ggml_gelu_quick(ctx0, cur);
  1462. cb(cur, "ffn_relu", il);
  1463. } break;
  1464. }
  1465. // we only support parallel ffn for now
  1466. if (gate) {
  1467. cur = ggml_mul(ctx0, cur, tmp);
  1468. cb(cur, "ffn_gate_par", il);
  1469. }
  1470. if (down) {
  1471. cur = ggml_mul_mat(ctx0, down, cur);
  1472. }
  1473. if (down_b) {
  1474. cb(cur, "ffn_down", il);
  1475. }
  1476. if (down_b) {
  1477. cur = ggml_add(ctx0, cur, down_b);
  1478. }
  1479. return cur;
  1480. }
  1481. ggml_tensor * build_attn(
  1482. ggml_tensor * wo,
  1483. ggml_tensor * wo_b,
  1484. ggml_tensor * q_cur,
  1485. ggml_tensor * k_cur,
  1486. ggml_tensor * v_cur,
  1487. ggml_tensor * kq_mask,
  1488. float kq_scale,
  1489. int il) const {
  1490. // these nodes are added to the graph together so that they are not reordered
  1491. // by doing so, the number of splits in the graph is reduced
  1492. ggml_build_forward_expand(gf, q_cur);
  1493. ggml_build_forward_expand(gf, k_cur);
  1494. ggml_build_forward_expand(gf, v_cur);
  1495. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  1496. //cb(q, "q", il);
  1497. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  1498. //cb(k, "k", il);
  1499. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  1500. v = ggml_cont(ctx0, v);
  1501. //cb(k, "v", il);
  1502. ggml_tensor * cur;
  1503. // TODO @ngxson : support flash attention
  1504. {
  1505. const auto n_tokens = q->ne[1];
  1506. const auto n_head = q->ne[2];
  1507. // const auto n_kv = k->ne[1]; // for flash attention
  1508. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  1509. // F32 may not needed for vision encoders?
  1510. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  1511. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  1512. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  1513. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  1514. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  1515. }
  1516. cb(cur, "kqv_out", il);
  1517. if (wo) {
  1518. cur = ggml_mul_mat(ctx0, wo, cur);
  1519. }
  1520. if (wo_b) {
  1521. cur = ggml_add(ctx0, cur, wo_b);
  1522. }
  1523. return cur;
  1524. }
  1525. // implementation of the 2D RoPE without adding a new op in ggml
  1526. // this is not efficient (use double the memory), but works on all backends
  1527. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  1528. static ggml_tensor * build_rope_2d(
  1529. ggml_context * ctx0,
  1530. ggml_tensor * cur,
  1531. ggml_tensor * pos_a, // first half
  1532. ggml_tensor * pos_b, // second half
  1533. const float freq_base,
  1534. const bool interleave_freq
  1535. ) {
  1536. const int64_t n_dim = cur->ne[0];
  1537. const int64_t n_head = cur->ne[1];
  1538. const int64_t n_pos = cur->ne[2];
  1539. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  1540. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  1541. // first half of cur will use 1e-0, 1e-2 (even)
  1542. // second half of cur will use 1e-1, 1e-3 (odd)
  1543. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  1544. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  1545. // then for the second half, we use freq_scale to shift the inv_freq
  1546. // ^ why? replace (2i) with (2i+1) in the above equation
  1547. const float freq_scale_odd = interleave_freq
  1548. ? std::pow(freq_base, (float)-2/n_dim)
  1549. : 1.0;
  1550. // first half
  1551. ggml_tensor * first;
  1552. {
  1553. first = ggml_view_3d(ctx0, cur,
  1554. n_dim/2, n_head, n_pos,
  1555. ggml_row_size(cur->type, n_dim),
  1556. ggml_row_size(cur->type, n_dim*n_head),
  1557. 0);
  1558. first = ggml_rope_ext(
  1559. ctx0,
  1560. first,
  1561. pos_a, // positions
  1562. nullptr, // freq factors
  1563. n_dim/2, // n_dims
  1564. 0, 0, freq_base,
  1565. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  1566. );
  1567. }
  1568. // second half
  1569. ggml_tensor * second;
  1570. {
  1571. second = ggml_view_3d(ctx0, cur,
  1572. n_dim/2, n_head, n_pos,
  1573. ggml_row_size(cur->type, n_dim),
  1574. ggml_row_size(cur->type, n_dim*n_head),
  1575. n_dim/2 * ggml_element_size(cur));
  1576. second = ggml_cont(ctx0, second); // copy, because ggml_rope don't play well with non-contiguous tensors
  1577. second = ggml_rope_ext(
  1578. ctx0,
  1579. second,
  1580. pos_b, // positions
  1581. nullptr, // freq factors
  1582. n_dim/2, // n_dims
  1583. 0, 0, freq_base,
  1584. freq_scale_odd,
  1585. 0.0f, 1.0f, 0.0f, 0.0f
  1586. );
  1587. }
  1588. cur = ggml_concat(ctx0, first, second, 0);
  1589. return cur;
  1590. }
  1591. };
  1592. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  1593. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  1594. clip_graph graph(ctx, *imgs.entries[0]);
  1595. ggml_cgraph * res;
  1596. switch (ctx->proj_type) {
  1597. case PROJECTOR_TYPE_GEMMA3:
  1598. case PROJECTOR_TYPE_IDEFICS3:
  1599. {
  1600. res = graph.build_siglip();
  1601. } break;
  1602. case PROJECTOR_TYPE_PIXTRAL:
  1603. {
  1604. res = graph.build_pixtral();
  1605. } break;
  1606. case PROJECTOR_TYPE_QWEN2VL:
  1607. case PROJECTOR_TYPE_QWEN25VL:
  1608. {
  1609. res = graph.build_qwen2vl();
  1610. } break;
  1611. case PROJECTOR_TYPE_MINICPMV:
  1612. {
  1613. res = graph.build_minicpmv();
  1614. } break;
  1615. case PROJECTOR_TYPE_INTERNVL:
  1616. {
  1617. res = graph.build_internvl();
  1618. } break;
  1619. case PROJECTOR_TYPE_LLAMA4:
  1620. {
  1621. res = graph.build_llama4();
  1622. } break;
  1623. case PROJECTOR_TYPE_ULTRAVOX:
  1624. {
  1625. res = graph.build_whisper_enc();
  1626. } break;
  1627. default:
  1628. {
  1629. res = graph.build_llava();
  1630. } break;
  1631. }
  1632. return res;
  1633. }
  1634. struct clip_model_loader {
  1635. ggml_context_ptr ctx_meta;
  1636. gguf_context_ptr ctx_gguf;
  1637. clip_ctx & ctx_clip;
  1638. std::string fname;
  1639. size_t model_size = 0; // in bytes
  1640. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model
  1641. clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) {
  1642. struct ggml_context * meta = nullptr;
  1643. struct gguf_init_params params = {
  1644. /*.no_alloc = */ true,
  1645. /*.ctx = */ &meta,
  1646. };
  1647. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  1648. if (!ctx_gguf.get()) {
  1649. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  1650. }
  1651. ctx_meta.reset(meta);
  1652. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  1653. // print gguf info
  1654. {
  1655. std::string name;
  1656. get_string(KEY_NAME, name, false);
  1657. std::string description;
  1658. get_string(KEY_DESCRIPTION, description, false);
  1659. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  1660. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  1661. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  1662. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  1663. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  1664. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  1665. LOG_INF("\n");
  1666. }
  1667. // tensors
  1668. {
  1669. for (int i = 0; i < n_tensors; ++i) {
  1670. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1671. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  1672. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  1673. ggml_tensor * cur = ggml_get_tensor(meta, name);
  1674. size_t tensor_size = ggml_nbytes(cur);
  1675. model_size += tensor_size;
  1676. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  1677. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  1678. }
  1679. }
  1680. }
  1681. void load_hparams() {
  1682. auto & hparams = ctx_clip.vision_model.hparams;
  1683. std::string log_ffn_op; // for logging
  1684. // projector type
  1685. std::string proj_type;
  1686. {
  1687. get_string(KEY_PROJ_TYPE, proj_type, false);
  1688. if (!proj_type.empty()) {
  1689. ctx_clip.proj_type = clip_projector_type_from_string(proj_type);
  1690. }
  1691. if (ctx_clip.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  1692. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  1693. }
  1694. }
  1695. // other hparams
  1696. {
  1697. get_bool(KEY_HAS_AUDIO_ENC, hparams.has_audio, false);
  1698. get_bool(KEY_HAS_VISION_ENC, hparams.has_vision, false);
  1699. const char * prefix = hparams.has_vision ? "vision" : "audio";
  1700. get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
  1701. get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
  1702. get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
  1703. get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
  1704. get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
  1705. get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
  1706. if (hparams.has_vision) {
  1707. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  1708. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  1709. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  1710. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false);
  1711. get_i32(KEY_MINICPMV_VERSION, ctx_clip.minicpmv_version, false); // legacy
  1712. } else if (hparams.has_audio) {
  1713. get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
  1714. } else {
  1715. throw std::runtime_error(string_format("%s: neither vision nor audio encoder is present\n", __func__));
  1716. }
  1717. // default warmup value
  1718. hparams.warmup_image_size = hparams.image_size;
  1719. ctx_clip.has_llava_projector = ctx_clip.proj_type == PROJECTOR_TYPE_MLP
  1720. || ctx_clip.proj_type == PROJECTOR_TYPE_MLP_NORM
  1721. || ctx_clip.proj_type == PROJECTOR_TYPE_LDP
  1722. || ctx_clip.proj_type == PROJECTOR_TYPE_LDPV2;
  1723. {
  1724. bool use_gelu = false;
  1725. bool use_silu = false;
  1726. get_bool(KEY_USE_GELU, use_gelu, false);
  1727. get_bool(KEY_USE_SILU, use_silu, false);
  1728. if (use_gelu && use_silu) {
  1729. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  1730. }
  1731. if (use_gelu) {
  1732. hparams.ffn_op = FFN_GELU;
  1733. log_ffn_op = "gelu";
  1734. } else if (use_silu) {
  1735. hparams.ffn_op = FFN_SILU;
  1736. log_ffn_op = "silu";
  1737. } else {
  1738. hparams.ffn_op = FFN_GELU_QUICK;
  1739. log_ffn_op = "gelu_quick";
  1740. }
  1741. }
  1742. {
  1743. std::string mm_patch_merge_type;
  1744. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  1745. if (mm_patch_merge_type == "spatial_unpad") {
  1746. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  1747. }
  1748. }
  1749. if (hparams.has_vision) {
  1750. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  1751. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  1752. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  1753. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  1754. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  1755. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  1756. for (int i = 0; i < 3; ++i) {
  1757. ctx_clip.image_mean[i] = mean_data[i];
  1758. ctx_clip.image_std[i] = std_data[i];
  1759. }
  1760. }
  1761. // Load the vision feature layer indices if they are explicitly provided;
  1762. // if multiple vision feature layers are present, the values will be concatenated
  1763. // to form the final visual features.
  1764. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  1765. // be non-negative, since we use -1 to mark values as unset here.
  1766. std::vector<int> vision_feature_layer;
  1767. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  1768. // convert std::vector to std::unordered_set
  1769. for (auto & layer : vision_feature_layer) {
  1770. hparams.vision_feature_layer.insert(layer);
  1771. }
  1772. // model-specific params
  1773. switch (ctx_clip.proj_type) {
  1774. case PROJECTOR_TYPE_MINICPMV:
  1775. {
  1776. if (ctx_clip.minicpmv_version == 0) {
  1777. ctx_clip.minicpmv_version = 2; // default to 2 if not set
  1778. }
  1779. } break;
  1780. case PROJECTOR_TYPE_IDEFICS3:
  1781. case PROJECTOR_TYPE_INTERNVL:
  1782. {
  1783. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1784. } break;
  1785. case PROJECTOR_TYPE_PIXTRAL:
  1786. {
  1787. hparams.rope_theta = 10000.0f;
  1788. hparams.warmup_image_size = hparams.patch_size * 8;
  1789. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false);
  1790. } break;
  1791. case PROJECTOR_TYPE_GEMMA3:
  1792. {
  1793. // default value (used by all model sizes in gemma 3 family)
  1794. // number of patches for each **side** is reduced by a factor of 4
  1795. hparams.proj_scale_factor = 4;
  1796. // test model (tinygemma3) has a different value, we optionally read it
  1797. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1798. } break;
  1799. case PROJECTOR_TYPE_QWEN2VL:
  1800. {
  1801. // max image size = sqrt(max_pixels) = 3584
  1802. // ref: https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct/blob/main/preprocessor_config.json
  1803. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  1804. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  1805. hparams.image_size = 1024;
  1806. hparams.warmup_image_size = hparams.patch_size * 8;
  1807. } break;
  1808. case PROJECTOR_TYPE_QWEN25VL:
  1809. {
  1810. // max image size = sqrt(max_pixels)
  1811. // https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  1812. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  1813. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  1814. hparams.image_size = 1024;
  1815. hparams.warmup_image_size = hparams.patch_size * 8;
  1816. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
  1817. } break;
  1818. case PROJECTOR_TYPE_LLAMA4:
  1819. {
  1820. hparams.rope_theta = 10000.0f;
  1821. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor);
  1822. // borrowed from llava-1.6
  1823. const int isize = hparams.image_size;
  1824. hparams.image_grid_pinpoints = {
  1825. isize, isize*2, // 336, 672
  1826. isize*2, isize, // 672, 336
  1827. isize*2, isize*2, // 672, 672
  1828. isize*3, isize, // 1008, 336
  1829. isize, isize*3, // 336, 1008
  1830. };
  1831. } break;
  1832. case PROJECTOR_TYPE_ULTRAVOX:
  1833. {
  1834. get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor);
  1835. if (hparams.n_mel_bins != 128) {
  1836. throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
  1837. }
  1838. hparams.ffn_op = FFN_GELU_ERF;
  1839. log_ffn_op = "gelu_erf"; // temporary solution for logging
  1840. } break;
  1841. default:
  1842. break;
  1843. }
  1844. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  1845. LOG_INF("%s: has_vision_encoder: %d\n", __func__, hparams.has_vision);
  1846. LOG_INF("%s: has_audio_encoder: %d\n", __func__, hparams.has_audio);
  1847. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  1848. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  1849. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  1850. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  1851. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  1852. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  1853. LOG_INF("\n");
  1854. if (hparams.has_vision) {
  1855. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  1856. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  1857. LOG_INF("%s: has_llava_proj: %d\n", __func__, ctx_clip.has_llava_projector);
  1858. LOG_INF("%s: minicpmv_version: %d\n", __func__, ctx_clip.minicpmv_version);
  1859. LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor);
  1860. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  1861. } else if (hparams.has_audio) {
  1862. LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
  1863. LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
  1864. }
  1865. LOG_INF("\n");
  1866. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1867. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1868. }
  1869. }
  1870. void load_tensors() {
  1871. auto & hparams = ctx_clip.vision_model.hparams;
  1872. std::map<std::string, size_t> tensor_offset;
  1873. std::vector<ggml_tensor *> tensors_to_load;
  1874. // TODO @ngxson : support both audio and video in the future
  1875. const char * prefix = hparams.has_audio ? "a" : "v";
  1876. // get offsets
  1877. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1878. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1879. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1880. }
  1881. // create data context
  1882. struct ggml_init_params params = {
  1883. /*.mem_size =*/ (gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1884. /*.mem_buffer =*/ NULL,
  1885. /*.no_alloc =*/ true,
  1886. };
  1887. ctx_clip.ctx_data.reset(ggml_init(params));
  1888. if (!ctx_clip.ctx_data) {
  1889. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1890. }
  1891. // helper function
  1892. auto get_tensor = [&](const std::string & name, bool required = true) {
  1893. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1894. if (!cur && required) {
  1895. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1896. }
  1897. if (cur) {
  1898. tensors_to_load.push_back(cur);
  1899. // add tensors to context
  1900. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1901. ggml_set_name(data_tensor, cur->name);
  1902. cur = data_tensor;
  1903. }
  1904. return cur;
  1905. };
  1906. auto & vision_model = ctx_clip.vision_model;
  1907. vision_model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1908. vision_model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
  1909. vision_model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
  1910. vision_model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
  1911. vision_model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
  1912. vision_model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1913. vision_model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1914. vision_model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1915. vision_model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
  1916. // layers
  1917. vision_model.layers.resize(hparams.n_layer);
  1918. for (int il = 0; il < hparams.n_layer; ++il) {
  1919. auto & layer = vision_model.layers[il];
  1920. layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"));
  1921. layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"));
  1922. layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"));
  1923. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
  1924. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
  1925. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
  1926. layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
  1927. layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
  1928. layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
  1929. layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
  1930. layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
  1931. layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
  1932. layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
  1933. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
  1934. layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
  1935. layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
  1936. // ffn
  1937. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
  1938. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
  1939. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
  1940. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
  1941. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
  1942. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
  1943. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  1944. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  1945. if (layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd) {
  1946. // swap up and down weights
  1947. ggml_tensor * tmp = layer.ff_up_w;
  1948. layer.ff_up_w = layer.ff_down_w;
  1949. layer.ff_down_w = tmp;
  1950. // swap up and down biases
  1951. tmp = layer.ff_up_b;
  1952. layer.ff_up_b = layer.ff_down_b;
  1953. layer.ff_down_b = tmp;
  1954. }
  1955. }
  1956. switch (ctx_clip.proj_type) {
  1957. case PROJECTOR_TYPE_MLP:
  1958. case PROJECTOR_TYPE_MLP_NORM:
  1959. {
  1960. // LLaVA projection
  1961. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  1962. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  1963. // Yi-type llava
  1964. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  1965. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1966. // missing in Yi-type llava
  1967. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  1968. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1969. // Yi-type llava
  1970. vision_model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  1971. vision_model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  1972. vision_model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  1973. vision_model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  1974. if (vision_model.mm_3_w) {
  1975. // TODO: this is a hack to support Yi-type llava
  1976. ctx_clip.proj_type = PROJECTOR_TYPE_MLP_NORM;
  1977. }
  1978. vision_model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  1979. } break;
  1980. case PROJECTOR_TYPE_LDP:
  1981. {
  1982. // MobileVLM projection
  1983. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1984. vision_model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1985. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1986. vision_model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1987. vision_model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1988. vision_model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1989. vision_model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1990. vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1991. vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1992. vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1993. vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1994. vision_model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1995. vision_model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1996. vision_model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1997. vision_model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1998. vision_model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1999. vision_model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  2000. vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  2001. vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  2002. vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  2003. vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  2004. vision_model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  2005. vision_model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  2006. vision_model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  2007. } break;
  2008. case PROJECTOR_TYPE_LDPV2:
  2009. {
  2010. // MobilVLM_V2 projection
  2011. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2012. vision_model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2013. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2014. vision_model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  2015. vision_model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  2016. vision_model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  2017. } break;
  2018. case PROJECTOR_TYPE_MINICPMV:
  2019. {
  2020. // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  2021. vision_model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  2022. vision_model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  2023. vision_model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  2024. vision_model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  2025. vision_model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  2026. vision_model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  2027. vision_model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  2028. vision_model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  2029. vision_model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  2030. vision_model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  2031. vision_model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  2032. vision_model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  2033. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  2034. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  2035. vision_model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  2036. vision_model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  2037. vision_model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  2038. vision_model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  2039. } break;
  2040. case PROJECTOR_TYPE_GLM_EDGE:
  2041. {
  2042. vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  2043. vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  2044. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  2045. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  2046. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  2047. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  2048. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  2049. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  2050. vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  2051. vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  2052. } break;
  2053. case PROJECTOR_TYPE_QWEN2VL:
  2054. case PROJECTOR_TYPE_QWEN25VL:
  2055. {
  2056. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  2057. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  2058. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2059. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2060. } break;
  2061. case PROJECTOR_TYPE_GEMMA3:
  2062. {
  2063. vision_model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  2064. vision_model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  2065. } break;
  2066. case PROJECTOR_TYPE_IDEFICS3:
  2067. {
  2068. vision_model.projection = get_tensor(TN_MM_PROJECTOR);
  2069. } break;
  2070. case PROJECTOR_TYPE_PIXTRAL:
  2071. {
  2072. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2073. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2074. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2075. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2076. // [IMG_BREAK] token embedding
  2077. vision_model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  2078. // for mistral small 3.1
  2079. vision_model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  2080. vision_model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  2081. } break;
  2082. case PROJECTOR_TYPE_ULTRAVOX:
  2083. {
  2084. vision_model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2085. vision_model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2086. vision_model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2087. vision_model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2088. vision_model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  2089. vision_model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  2090. vision_model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  2091. vision_model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
  2092. } break;
  2093. case PROJECTOR_TYPE_INTERNVL:
  2094. {
  2095. vision_model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2096. vision_model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2097. vision_model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2098. vision_model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2099. vision_model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2100. vision_model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2101. } break;
  2102. case PROJECTOR_TYPE_LLAMA4:
  2103. {
  2104. vision_model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  2105. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2106. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2107. } break;
  2108. default:
  2109. GGML_ASSERT(false && "unknown projector type");
  2110. }
  2111. // load data
  2112. {
  2113. std::vector<uint8_t> read_buf;
  2114. auto fin = std::ifstream(fname, std::ios::binary);
  2115. if (!fin) {
  2116. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  2117. }
  2118. // alloc memory and offload data
  2119. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  2120. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  2121. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  2122. for (auto & t : tensors_to_load) {
  2123. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  2124. const size_t offset = tensor_offset[t->name];
  2125. fin.seekg(offset, std::ios::beg);
  2126. if (!fin) {
  2127. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  2128. }
  2129. size_t num_bytes = ggml_nbytes(cur);
  2130. if (ggml_backend_buft_is_host(buft)) {
  2131. // for the CPU and Metal backend, we can read directly into the tensor
  2132. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  2133. } else {
  2134. // read into a temporary buffer first, then copy to device memory
  2135. read_buf.resize(num_bytes);
  2136. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  2137. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  2138. }
  2139. }
  2140. fin.close();
  2141. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  2142. }
  2143. }
  2144. void alloc_compute_meta() {
  2145. const auto & hparams = ctx_clip.vision_model.hparams;
  2146. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  2147. // create a fake batch
  2148. clip_image_f32_batch batch;
  2149. clip_image_f32_ptr img(clip_image_f32_init());
  2150. if (hparams.has_vision) {
  2151. img->nx = hparams.warmup_image_size;
  2152. img->ny = hparams.warmup_image_size;
  2153. } else {
  2154. img->nx = 1024; // TODO @ngxson : use a better default
  2155. img->ny = hparams.n_mel_bins;
  2156. }
  2157. img->buf.resize(img->nx * img->ny * 3);
  2158. batch.entries.push_back(std::move(img));
  2159. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  2160. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  2161. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  2162. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  2163. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  2164. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  2165. if (size > 1) {
  2166. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  2167. ggml_backend_buft_name(buft),
  2168. size / 1024.0 / 1024.0);
  2169. }
  2170. }
  2171. }
  2172. void get_bool(const std::string & key, bool & output, bool required = true) {
  2173. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2174. if (i < 0) {
  2175. if (required) throw std::runtime_error("Key not found: " + key);
  2176. return;
  2177. }
  2178. output = gguf_get_val_bool(ctx_gguf.get(), i);
  2179. }
  2180. void get_i32(const std::string & key, int & output, bool required = true) {
  2181. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2182. if (i < 0) {
  2183. if (required) throw std::runtime_error("Key not found: " + key);
  2184. return;
  2185. }
  2186. output = gguf_get_val_i32(ctx_gguf.get(), i);
  2187. }
  2188. void get_u32(const std::string & key, int & output, bool required = true) {
  2189. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2190. if (i < 0) {
  2191. if (required) throw std::runtime_error("Key not found: " + key);
  2192. return;
  2193. }
  2194. output = gguf_get_val_u32(ctx_gguf.get(), i);
  2195. }
  2196. void get_f32(const std::string & key, float & output, bool required = true) {
  2197. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2198. if (i < 0) {
  2199. if (required) throw std::runtime_error("Key not found: " + key);
  2200. return;
  2201. }
  2202. output = gguf_get_val_f32(ctx_gguf.get(), i);
  2203. }
  2204. void get_string(const std::string & key, std::string & output, bool required = true) {
  2205. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2206. if (i < 0) {
  2207. if (required) throw std::runtime_error("Key not found: " + key);
  2208. return;
  2209. }
  2210. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  2211. }
  2212. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
  2213. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2214. if (i < 0) {
  2215. if (required) throw std::runtime_error("Key not found: " + key);
  2216. return;
  2217. }
  2218. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  2219. output.resize(n);
  2220. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  2221. for (int i = 0; i < n; ++i) {
  2222. output[i] = values[i];
  2223. }
  2224. }
  2225. };
  2226. struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) {
  2227. g_logger_state.verbosity_thold = ctx_params.verbosity;
  2228. clip_ctx * ctx_clip = nullptr;
  2229. try {
  2230. ctx_clip = new clip_ctx(ctx_params);
  2231. clip_model_loader loader(fname, *ctx_clip);
  2232. loader.load_hparams();
  2233. loader.load_tensors();
  2234. loader.alloc_compute_meta();
  2235. } catch (const std::exception & e) {
  2236. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  2237. delete ctx_clip;
  2238. return nullptr;
  2239. }
  2240. return ctx_clip;
  2241. }
  2242. struct clip_image_size * clip_image_size_init() {
  2243. struct clip_image_size * load_image_size = new struct clip_image_size();
  2244. load_image_size->width = 448;
  2245. load_image_size->height = 448;
  2246. return load_image_size;
  2247. }
  2248. struct clip_image_u8 * clip_image_u8_init() {
  2249. return new clip_image_u8();
  2250. }
  2251. struct clip_image_f32 * clip_image_f32_init() {
  2252. return new clip_image_f32();
  2253. }
  2254. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  2255. return new clip_image_f32_batch();
  2256. }
  2257. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  2258. if (nx) *nx = img->nx;
  2259. if (ny) *ny = img->ny;
  2260. return img->buf.data();
  2261. }
  2262. void clip_image_size_free(struct clip_image_size * load_image_size) {
  2263. if (load_image_size == nullptr) {
  2264. return;
  2265. }
  2266. delete load_image_size;
  2267. }
  2268. void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
  2269. void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
  2270. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
  2271. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
  2272. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  2273. return batch->entries.size();
  2274. }
  2275. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  2276. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2277. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2278. return 0;
  2279. }
  2280. return batch->entries[idx]->nx;
  2281. }
  2282. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  2283. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2284. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2285. return 0;
  2286. }
  2287. return batch->entries[idx]->ny;
  2288. }
  2289. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  2290. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2291. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2292. return nullptr;
  2293. }
  2294. return batch->entries[idx].get();
  2295. }
  2296. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  2297. img->nx = nx;
  2298. img->ny = ny;
  2299. img->buf.resize(3 * nx * ny);
  2300. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  2301. }
  2302. bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
  2303. int nx, ny, nc;
  2304. auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
  2305. if (!data) {
  2306. LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
  2307. return false;
  2308. }
  2309. clip_build_img_from_pixels(data, nx, ny, img);
  2310. stbi_image_free(data);
  2311. return true;
  2312. }
  2313. bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
  2314. int nx, ny, nc;
  2315. auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
  2316. if (!data) {
  2317. LOG_ERR("%s: failed to decode image bytes\n", __func__);
  2318. return false;
  2319. }
  2320. clip_build_img_from_pixels(data, nx, ny, img);
  2321. stbi_image_free(data);
  2322. return true;
  2323. }
  2324. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  2325. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  2326. dst.nx = src.nx;
  2327. dst.ny = src.ny;
  2328. dst.buf.resize(src.buf.size());
  2329. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  2330. for (size_t i = 0; i < src.buf.size(); ++i) {
  2331. int c = i % 3; // rgb
  2332. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  2333. }
  2334. }
  2335. // set of tools to manupulate images
  2336. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  2337. struct image_manipulation {
  2338. // Bilinear resize function
  2339. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  2340. dst.nx = target_width;
  2341. dst.ny = target_height;
  2342. dst.buf.resize(3 * target_width * target_height);
  2343. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  2344. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  2345. for (int y = 0; y < target_height; y++) {
  2346. for (int x = 0; x < target_width; x++) {
  2347. float px = x_ratio * x;
  2348. float py = y_ratio * y;
  2349. int x_floor = static_cast<int>(px);
  2350. int y_floor = static_cast<int>(py);
  2351. float x_lerp = px - x_floor;
  2352. float y_lerp = py - y_floor;
  2353. for (int c = 0; c < 3; c++) {
  2354. float top = lerp(
  2355. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  2356. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  2357. x_lerp
  2358. );
  2359. float bottom = lerp(
  2360. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  2361. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  2362. x_lerp
  2363. );
  2364. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  2365. }
  2366. }
  2367. }
  2368. }
  2369. // Bicubic resize function
  2370. // part of image will be cropped if the aspect ratio is different
  2371. static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  2372. const int nx = img.nx;
  2373. const int ny = img.ny;
  2374. dst.nx = target_width;
  2375. dst.ny = target_height;
  2376. dst.buf.resize(3 * target_width * target_height);
  2377. float Cc;
  2378. float C[5];
  2379. float d0, d2, d3, a0, a1, a2, a3;
  2380. int i, j, k, jj;
  2381. int x, y;
  2382. float dx, dy;
  2383. float tx, ty;
  2384. tx = (float)nx / (float)target_width;
  2385. ty = (float)ny / (float)target_height;
  2386. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  2387. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  2388. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  2389. for (i = 0; i < target_height; i++) {
  2390. for (j = 0; j < target_width; j++) {
  2391. x = (int)(tx * j);
  2392. y = (int)(ty * i);
  2393. dx = tx * j - x;
  2394. dy = ty * i - y;
  2395. for (k = 0; k < 3; k++) {
  2396. for (jj = 0; jj <= 3; jj++) {
  2397. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2398. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2399. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2400. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2401. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2402. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2403. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2404. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  2405. d0 = C[0] - C[1];
  2406. d2 = C[2] - C[1];
  2407. d3 = C[3] - C[1];
  2408. a0 = C[1];
  2409. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2410. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2411. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2412. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  2413. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  2414. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  2415. }
  2416. }
  2417. }
  2418. }
  2419. return true;
  2420. }
  2421. // llava-1.6 type of resize_and_pad
  2422. // if the ratio is not 1:1, padding with pad_color will be applied
  2423. // pad_color is single channel, default is 0 (black)
  2424. static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  2425. int target_width = target_resolution.width;
  2426. int target_height = target_resolution.height;
  2427. float scale_w = static_cast<float>(target_width) / image.nx;
  2428. float scale_h = static_cast<float>(target_height) / image.ny;
  2429. int new_width, new_height;
  2430. if (scale_w < scale_h) {
  2431. new_width = target_width;
  2432. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  2433. } else {
  2434. new_height = target_height;
  2435. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  2436. }
  2437. clip_image_u8 resized_image;
  2438. bicubic_resize(image, resized_image, new_width, new_height);
  2439. clip_image_u8 padded_image;
  2440. padded_image.nx = target_width;
  2441. padded_image.ny = target_height;
  2442. padded_image.buf.resize(3 * target_width * target_height);
  2443. // Fill the padded image with the fill color
  2444. for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
  2445. padded_image.buf[i] = pad_color[0];
  2446. padded_image.buf[i + 1] = pad_color[1];
  2447. padded_image.buf[i + 2] = pad_color[2];
  2448. }
  2449. // Calculate padding offsets
  2450. int pad_x = (target_width - new_width) / 2;
  2451. int pad_y = (target_height - new_height) / 2;
  2452. // Copy the resized image into the center of the padded buffer
  2453. for (int y = 0; y < new_height; ++y) {
  2454. for (int x = 0; x < new_width; ++x) {
  2455. for (int c = 0; c < 3; ++c) {
  2456. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  2457. }
  2458. }
  2459. }
  2460. dst = std::move(padded_image);
  2461. }
  2462. static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  2463. dst.nx = w;
  2464. dst.ny = h;
  2465. dst.buf.resize(3 * w * h);
  2466. for (int i = 0; i < h; ++i) {
  2467. for (int j = 0; j < w; ++j) {
  2468. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  2469. int dst_idx = 3 * (i*w + j);
  2470. dst.buf[dst_idx] = image.buf[src_idx];
  2471. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  2472. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  2473. }
  2474. }
  2475. }
  2476. // calculate the size of the **resized** image, while preserving the aspect ratio
  2477. // the calculated size will be aligned to the nearest multiple of align_size
  2478. // if H or W size is larger than max_dimension, it will be resized to max_dimension
  2479. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int max_dimension) {
  2480. if (inp_size.width <= 0 || inp_size.height <= 0 || align_size <= 0 || max_dimension <= 0) {
  2481. return {0, 0};
  2482. }
  2483. float scale = std::min(1.0f, std::min(static_cast<float>(max_dimension) / inp_size.width,
  2484. static_cast<float>(max_dimension) / inp_size.height));
  2485. float target_width_f = static_cast<float>(inp_size.width) * scale;
  2486. float target_height_f = static_cast<float>(inp_size.height) * scale;
  2487. int aligned_width = CLIP_ALIGN((int)target_width_f, align_size);
  2488. int aligned_height = CLIP_ALIGN((int)target_height_f, align_size);
  2489. return {aligned_width, aligned_height};
  2490. }
  2491. private:
  2492. static inline int clip(int x, int lower, int upper) {
  2493. return std::max(lower, std::min(x, upper));
  2494. }
  2495. // Linear interpolation between two points
  2496. static inline float lerp(float s, float e, float t) {
  2497. return s + (e - s) * t;
  2498. }
  2499. };
  2500. /**
  2501. * implementation of LLaVA-UHD:
  2502. * - https://arxiv.org/pdf/2403.11703
  2503. * - https://github.com/thunlp/LLaVA-UHD
  2504. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  2505. *
  2506. * overview:
  2507. * - an image always have a single overview (downscaled image)
  2508. * - an image can have 0 or multiple slices, depending on the image size
  2509. * - each slice can then be considered as a separate image
  2510. *
  2511. * for example:
  2512. *
  2513. * [overview] --> [slice 1] --> [slice 2]
  2514. * | |
  2515. * +--> [slice 3] --> [slice 4]
  2516. */
  2517. struct llava_uhd {
  2518. struct slice_coordinates {
  2519. int x;
  2520. int y;
  2521. clip_image_size size;
  2522. };
  2523. struct slice_instructions {
  2524. clip_image_size overview_size; // size of downscaled image
  2525. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2526. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2527. std::vector<slice_coordinates> slices;
  2528. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2529. };
  2530. static int get_max_slices(struct clip_ctx * ctx) {
  2531. if (clip_is_minicpmv(ctx)) {
  2532. return 9;
  2533. }
  2534. return 0;
  2535. }
  2536. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2537. slice_instructions res;
  2538. const int patch_size = clip_get_patch_size(ctx);
  2539. const int slice_size = clip_get_image_size(ctx);
  2540. const int max_slice_nums = get_max_slices(ctx);
  2541. const int original_width = original_size.width;
  2542. const int original_height = original_size.height;
  2543. const float log_ratio = log((float)original_width / original_height);
  2544. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2545. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2546. const bool has_slices = (multiple > 1);
  2547. const bool has_pinpoints = !ctx->vision_model.hparams.image_grid_pinpoints.empty();
  2548. if (has_pinpoints) {
  2549. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2550. auto refine_size = llava_uhd::select_best_resolution(
  2551. ctx->vision_model.hparams.image_grid_pinpoints,
  2552. original_size);
  2553. res.overview_size = clip_image_size{slice_size, slice_size};
  2554. res.refined_size = refine_size;
  2555. res.grid_size = clip_image_size{0, 0};
  2556. res.padding_refined = true;
  2557. for (int y = 0; y < refine_size.height; y += slice_size) {
  2558. for (int x = 0; x < refine_size.width; x += slice_size) {
  2559. slice_coordinates slice;
  2560. slice.x = x;
  2561. slice.y = y;
  2562. slice.size.width = std::min(slice_size, refine_size.width - x);
  2563. slice.size.height = std::min(slice_size, refine_size.height - y);
  2564. res.slices.push_back(slice);
  2565. if (x == 0) {
  2566. res.grid_size.width++;
  2567. }
  2568. }
  2569. res.grid_size.height++;
  2570. }
  2571. return res;
  2572. }
  2573. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2574. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  2575. res.overview_size = best_size;
  2576. if (!has_slices) {
  2577. // skip slicing logic
  2578. res.refined_size = clip_image_size{0, 0};
  2579. res.grid_size = clip_image_size{0, 0};
  2580. } else {
  2581. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2582. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2583. res.grid_size = best_grid;
  2584. res.refined_size = refine_size;
  2585. int width = refine_size.width;
  2586. int height = refine_size.height;
  2587. int grid_x = int(width / best_grid.width);
  2588. int grid_y = int(height / best_grid.height);
  2589. for (int patches_y = 0, ic = 0;
  2590. patches_y < refine_size.height && ic < best_grid.height;
  2591. patches_y += grid_y, ic += 1) {
  2592. for (int patches_x = 0, jc = 0;
  2593. patches_x < refine_size.width && jc < best_grid.width;
  2594. patches_x += grid_x, jc += 1) {
  2595. slice_coordinates slice;
  2596. slice.x = patches_x;
  2597. slice.y = patches_y;
  2598. slice.size.width = grid_x;
  2599. slice.size.height = grid_y;
  2600. res.slices.push_back(slice);
  2601. // LOG_INF("slice %d: %d %d %d %d\n", ic, patches_i, patches_j, grid_x, grid_y);
  2602. }
  2603. }
  2604. }
  2605. return res;
  2606. }
  2607. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2608. std::vector<clip_image_u8_ptr> output;
  2609. // resize to overview size
  2610. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2611. image_manipulation::bicubic_resize(*img, *resized_img, inst.overview_size.width, inst.overview_size.height);
  2612. output.push_back(std::move(resized_img));
  2613. if (inst.slices.empty()) {
  2614. // no slices, just return the resized image
  2615. return output;
  2616. }
  2617. // resize to refined size
  2618. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2619. if (inst.padding_refined) {
  2620. image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
  2621. } else {
  2622. image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
  2623. }
  2624. // create slices
  2625. for (const auto & slice : inst.slices) {
  2626. int x = slice.x;
  2627. int y = slice.y;
  2628. int w = slice.size.width;
  2629. int h = slice.size.height;
  2630. clip_image_u8_ptr img_slice(clip_image_u8_init());
  2631. image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
  2632. output.push_back(std::move(img_slice));
  2633. }
  2634. return output;
  2635. }
  2636. private:
  2637. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2638. int width = original_size.width;
  2639. int height = original_size.height;
  2640. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  2641. float r = static_cast<float>(width) / height;
  2642. height = static_cast<int>(scale_resolution / std::sqrt(r));
  2643. width = static_cast<int>(height * r);
  2644. }
  2645. clip_image_size res;
  2646. res.width = ensure_divide(width, patch_size);
  2647. res.height = ensure_divide(height, patch_size);
  2648. return res;
  2649. }
  2650. /**
  2651. * Selects the best resolution from a list of possible resolutions based on the original size.
  2652. *
  2653. * @param original_size The original size of the image
  2654. * @param possible_resolutions A list of possible resolutions
  2655. * @return The best fit resolution
  2656. */
  2657. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  2658. int original_width = original_size.width;
  2659. int original_height = original_size.height;
  2660. clip_image_size best_fit;
  2661. int max_effective_resolution = 0;
  2662. int min_wasted_resolution = std::numeric_limits<int>::max();
  2663. for (const auto & resolution : possible_resolutions) {
  2664. int width = resolution.width;
  2665. int height = resolution.height;
  2666. float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
  2667. int downscaled_width = static_cast<int>(original_width * scale);
  2668. int downscaled_height = static_cast<int>(original_height * scale);
  2669. int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
  2670. int wasted_resolution = (width * height) - effective_resolution;
  2671. // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
  2672. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
  2673. max_effective_resolution = effective_resolution;
  2674. min_wasted_resolution = wasted_resolution;
  2675. best_fit = resolution;
  2676. }
  2677. }
  2678. return best_fit;
  2679. }
  2680. // used by llava 1.6 with custom list of pinpoints
  2681. static clip_image_size select_best_resolution(const std::vector<int32_t> & pinpoints, const clip_image_size & original_size) {
  2682. std::vector<clip_image_size> possible_resolutions; // TODO @ngxson : construct this inside hparams, not here
  2683. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  2684. possible_resolutions.push_back(clip_image_size{pinpoints[i], pinpoints[i+1]});
  2685. }
  2686. return select_best_resolution(original_size, possible_resolutions);
  2687. }
  2688. static int ensure_divide(int length, int patch_size) {
  2689. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  2690. }
  2691. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2692. int width = original_size.width;
  2693. int height = original_size.height;
  2694. int grid_x = grid.width;
  2695. int grid_y = grid.height;
  2696. int refine_width = ensure_divide(width, grid_x);
  2697. int refine_height = ensure_divide(height, grid_y);
  2698. clip_image_size grid_size;
  2699. grid_size.width = refine_width / grid_x;
  2700. grid_size.height = refine_height / grid_y;
  2701. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  2702. int best_grid_width = best_grid_size.width;
  2703. int best_grid_height = best_grid_size.height;
  2704. clip_image_size refine_size;
  2705. refine_size.width = best_grid_width * grid_x;
  2706. refine_size.height = best_grid_height * grid_y;
  2707. return refine_size;
  2708. }
  2709. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  2710. std::vector<int> candidate_split_grids_nums;
  2711. for (int i : {multiple - 1, multiple, multiple + 1}) {
  2712. if (i == 1 || i > max_slice_nums) {
  2713. continue;
  2714. }
  2715. candidate_split_grids_nums.push_back(i);
  2716. }
  2717. std::vector<clip_image_size> candidate_grids;
  2718. for (int split_grids_nums : candidate_split_grids_nums) {
  2719. int m = 1;
  2720. while (m <= split_grids_nums) {
  2721. if (split_grids_nums % m == 0) {
  2722. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  2723. }
  2724. ++m;
  2725. }
  2726. }
  2727. clip_image_size best_grid{1, 1};
  2728. float min_error = std::numeric_limits<float>::infinity();
  2729. for (const auto& grid : candidate_grids) {
  2730. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  2731. if (error < min_error) {
  2732. best_grid = grid;
  2733. min_error = error;
  2734. }
  2735. }
  2736. return best_grid;
  2737. }
  2738. };
  2739. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  2740. // res_imgs memory is being allocated here, previous allocations will be freed if found
  2741. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  2742. clip_image_size original_size{img->nx, img->ny};
  2743. bool pad_to_square = true;
  2744. auto & params = ctx->vision_model.hparams;
  2745. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  2746. if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
  2747. pad_to_square = false;
  2748. }
  2749. if (clip_is_minicpmv(ctx)) {
  2750. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2751. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2752. for (size_t i = 0; i < imgs.size(); ++i) {
  2753. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2754. clip_image_f32_ptr res(clip_image_f32_init());
  2755. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2756. res_imgs->entries.push_back(std::move(res));
  2757. }
  2758. res_imgs->grid_x = inst.grid_size.width;
  2759. res_imgs->grid_y = inst.grid_size.height;
  2760. return true;
  2761. } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2762. clip_image_u8 resized;
  2763. auto patch_size = params.patch_size * 2;
  2764. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size);
  2765. image_manipulation::bicubic_resize(*img, resized, new_size.width, new_size.height);
  2766. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2767. // clip_image_f32_ptr res(clip_image_f32_init());
  2768. normalize_image_u8_to_f32(resized, *img_f32, ctx->image_mean, ctx->image_std);
  2769. // res_imgs->data[0] = *res;
  2770. res_imgs->entries.push_back(std::move(img_f32));
  2771. return true;
  2772. }
  2773. else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE
  2774. || ctx->proj_type == PROJECTOR_TYPE_GEMMA3
  2775. || ctx->proj_type == PROJECTOR_TYPE_IDEFICS3
  2776. || ctx->proj_type == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
  2777. ) {
  2778. clip_image_u8 resized_image;
  2779. int sz = params.image_size;
  2780. image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
  2781. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2782. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  2783. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  2784. res_imgs->entries.push_back(std::move(img_f32));
  2785. return true;
  2786. } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
  2787. clip_image_u8 resized_image;
  2788. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
  2789. image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
  2790. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2791. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  2792. res_imgs->entries.push_back(std::move(img_f32));
  2793. return true;
  2794. } else if (ctx->proj_type == PROJECTOR_TYPE_LLAMA4) {
  2795. GGML_ASSERT(!params.image_grid_pinpoints.empty());
  2796. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2797. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2798. for (size_t i = 0; i < imgs.size(); ++i) {
  2799. clip_image_f32_ptr res(clip_image_f32_init());
  2800. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2801. res_imgs->entries.push_back(std::move(res));
  2802. }
  2803. res_imgs->grid_x = inst.grid_size.width;
  2804. res_imgs->grid_y = inst.grid_size.height;
  2805. return true;
  2806. }
  2807. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  2808. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2809. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  2810. if (pad_to_square) {
  2811. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  2812. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2813. const int longer_side = std::max(img->nx, img->ny);
  2814. temp->nx = longer_side;
  2815. temp->ny = longer_side;
  2816. temp->buf.resize(3 * longer_side * longer_side);
  2817. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  2818. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2819. // resize the image to the target_size
  2820. image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
  2821. clip_image_f32_ptr res(clip_image_f32_init());
  2822. normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
  2823. res_imgs->entries.push_back(std::move(res));
  2824. return true;
  2825. } else if (!params.image_grid_pinpoints.empty()) {
  2826. // "spatial_unpad" with "anyres" processing for llava-1.6
  2827. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2828. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2829. for (size_t i = 0; i < imgs.size(); ++i) {
  2830. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2831. clip_image_f32_ptr res(clip_image_f32_init());
  2832. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2833. res_imgs->entries.push_back(std::move(res));
  2834. }
  2835. return true;
  2836. }
  2837. GGML_ASSERT(false && "Unknown image preprocessing type");
  2838. }
  2839. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  2840. return ctx->vision_model.image_newline;
  2841. }
  2842. void clip_free(clip_ctx * ctx) {
  2843. if (ctx == nullptr) {
  2844. return;
  2845. }
  2846. delete ctx;
  2847. }
  2848. // deprecated
  2849. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  2850. const int32_t nx = ctx->vision_model.hparams.image_size;
  2851. const int32_t ny = ctx->vision_model.hparams.image_size;
  2852. return clip_embd_nbytes_by_img(ctx, nx, ny);
  2853. }
  2854. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  2855. clip_image_f32 img;
  2856. img.nx = img_w;
  2857. img.ny = img_h;
  2858. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2859. }
  2860. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  2861. return ctx->vision_model.hparams.image_size;
  2862. }
  2863. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  2864. return ctx->vision_model.hparams.patch_size;
  2865. }
  2866. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  2867. return ctx->vision_model.hparams.n_embd;
  2868. }
  2869. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  2870. return ctx->vision_model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  2871. }
  2872. const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
  2873. if (ctx->vision_model.hparams.image_grid_pinpoints.size()) {
  2874. return &ctx->vision_model.hparams.image_grid_pinpoints.front();
  2875. }
  2876. return nullptr;
  2877. }
  2878. size_t get_clip_image_grid_size(const struct clip_ctx * ctx) {
  2879. return ctx->vision_model.hparams.image_grid_pinpoints.size();
  2880. }
  2881. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2882. const auto & params = ctx->vision_model.hparams;
  2883. const int n_total = clip_n_output_tokens(ctx, img);
  2884. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2885. return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0);
  2886. }
  2887. return n_total;
  2888. }
  2889. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2890. const auto & params = ctx->vision_model.hparams;
  2891. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2892. return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0);
  2893. }
  2894. return 1;
  2895. }
  2896. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2897. const auto & params = ctx->vision_model.hparams;
  2898. int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
  2899. int scale_factor = ctx->vision_model.hparams.proj_scale_factor;
  2900. if (ctx->proj_type == PROJECTOR_TYPE_LDP
  2901. || ctx->proj_type == PROJECTOR_TYPE_LDPV2
  2902. || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  2903. n_patches /= 4;
  2904. if (ctx->vision_model.mm_glm_tok_boi) {
  2905. n_patches += 2; // for BOI and EOI token embeddings
  2906. }
  2907. } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  2908. if (ctx->minicpmv_version == 2) {
  2909. n_patches = 96;
  2910. }
  2911. else if (ctx->minicpmv_version == 3) {
  2912. n_patches = 64;
  2913. }
  2914. else if (ctx->minicpmv_version == 4) {
  2915. n_patches = 64;
  2916. }
  2917. else {
  2918. GGML_ABORT("Unknown minicpmv version");
  2919. }
  2920. } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2921. int patch_size = params.patch_size * 2;
  2922. int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
  2923. int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
  2924. n_patches = x_patch * y_patch;
  2925. } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  2926. int n_per_side = params.image_size / params.patch_size;
  2927. int n_per_side_2d_pool = n_per_side / params.proj_scale_factor;
  2928. n_patches = n_per_side_2d_pool * n_per_side_2d_pool;
  2929. } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3 || ctx->proj_type == PROJECTOR_TYPE_INTERNVL) {
  2930. // both W and H are divided by proj_scale_factor
  2931. n_patches /= (params.proj_scale_factor * params.proj_scale_factor);
  2932. } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
  2933. int n_merge = params.spatial_merge_size;
  2934. int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1);
  2935. int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1);
  2936. n_patches = n_patches_y*n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  2937. } else if (ctx->proj_type == PROJECTOR_TYPE_LLAMA4) {
  2938. n_patches /= (scale_factor * scale_factor);
  2939. } else if (ctx->proj_type == PROJECTOR_TYPE_ULTRAVOX) {
  2940. const int proj_stack_factor = ctx->vision_model.hparams.proj_stack_factor;
  2941. const int n_len = CLIP_ALIGN(img->nx, proj_stack_factor);
  2942. n_patches = n_len / proj_stack_factor / 2;
  2943. }
  2944. return n_patches;
  2945. }
  2946. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  2947. assert(embed_dim % 2 == 0);
  2948. int H = pos.size();
  2949. int W = pos[0].size();
  2950. std::vector<float> omega(embed_dim / 2);
  2951. for (int i = 0; i < embed_dim / 2; ++i) {
  2952. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  2953. }
  2954. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2955. for (int h = 0; h < H; ++h) {
  2956. for (int w = 0; w < W; ++w) {
  2957. for (int d = 0; d < embed_dim / 2; ++d) {
  2958. float out_value = pos[h][w] * omega[d];
  2959. emb[h][w][d] = sin(out_value);
  2960. emb[h][w][d + embed_dim / 2] = cos(out_value);
  2961. }
  2962. }
  2963. }
  2964. return emb;
  2965. }
  2966. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  2967. assert(embed_dim % 2 == 0);
  2968. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  2969. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  2970. int H = emb_h.size();
  2971. int W = emb_h[0].size();
  2972. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2973. for (int h = 0; h < H; ++h) {
  2974. for (int w = 0; w < W; ++w) {
  2975. for (int d = 0; d < embed_dim / 2; ++d) {
  2976. emb[h][w][d] = emb_h[h][w][d];
  2977. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  2978. }
  2979. }
  2980. }
  2981. return emb;
  2982. }
  2983. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  2984. int grid_h_size = image_size.first;
  2985. int grid_w_size = image_size.second;
  2986. std::vector<float> grid_h(grid_h_size);
  2987. std::vector<float> grid_w(grid_w_size);
  2988. for (int i = 0; i < grid_h_size; ++i) {
  2989. grid_h[i] = static_cast<float>(i);
  2990. }
  2991. for (int i = 0; i < grid_w_size; ++i) {
  2992. grid_w[i] = static_cast<float>(i);
  2993. }
  2994. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  2995. for (int h = 0; h < grid_h_size; ++h) {
  2996. for (int w = 0; w < grid_w_size; ++w) {
  2997. grid[h][w] = grid_w[w];
  2998. }
  2999. }
  3000. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  3001. for (int h = 0; h < grid_h_size; ++h) {
  3002. for (int w = 0; w < grid_w_size; ++w) {
  3003. grid_2d[0][h][w] = grid_h[h];
  3004. grid_2d[1][h][w] = grid_w[w];
  3005. }
  3006. }
  3007. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  3008. int H = image_size.first;
  3009. int W = image_size.second;
  3010. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  3011. for (int h = 0; h < H; ++h) {
  3012. for (int w = 0; w < W; ++w) {
  3013. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  3014. }
  3015. }
  3016. return pos_embed_2d;
  3017. }
  3018. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  3019. clip_image_f32_batch imgs;
  3020. clip_image_f32_ptr img_copy(clip_image_f32_init());
  3021. *img_copy = *img;
  3022. imgs.entries.push_back(std::move(img_copy));
  3023. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  3024. }
  3025. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  3026. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  3027. int batch_size = imgs.entries.size();
  3028. // TODO @ngxson : implement batch size > 1 as a loop
  3029. // we don't need true batching support because the cgraph will gonna be big anyway
  3030. if (batch_size != 1) {
  3031. return false; // only support batch size of 1
  3032. }
  3033. // build the inference graph
  3034. ctx->debug_print_tensors.clear();
  3035. ggml_backend_sched_reset(ctx->sched.get());
  3036. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  3037. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  3038. // set inputs
  3039. const auto & model = ctx->vision_model;
  3040. const auto & hparams = model.hparams;
  3041. const int image_size_width = imgs.entries[0]->nx;
  3042. const int image_size_height = imgs.entries[0]->ny;
  3043. const int patch_size = hparams.patch_size;
  3044. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  3045. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  3046. const int pos_w = image_size_width / patch_size;
  3047. const int pos_h = image_size_height / patch_size;
  3048. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  3049. auto get_inp_tensor = [&gf](const char * name) {
  3050. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  3051. if (inp == nullptr) {
  3052. GGML_ABORT("Failed to get tensor %s", name);
  3053. }
  3054. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  3055. GGML_ABORT("Tensor %s is not an input tensor", name);
  3056. }
  3057. return inp;
  3058. };
  3059. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  3060. ggml_tensor * cur = get_inp_tensor(name);
  3061. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  3062. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3063. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3064. };
  3065. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  3066. ggml_tensor * cur = get_inp_tensor(name);
  3067. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  3068. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3069. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3070. };
  3071. // set input pixel values
  3072. if (!imgs.is_audio) {
  3073. size_t nelem = 0;
  3074. for (const auto & img : imgs.entries) {
  3075. nelem += img->nx * img->ny * 3;
  3076. }
  3077. std::vector<float> inp_raw(nelem);
  3078. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  3079. //
  3080. // ┌──W──┐
  3081. // │ H │ channel = R
  3082. // ├─────┤ │
  3083. // │ H │ channel = G
  3084. // ├─────┤ │
  3085. // │ H │ channel = B
  3086. // └─────┘ │
  3087. // ──────┘ x B
  3088. for (size_t i = 0; i < imgs.entries.size(); i++) {
  3089. const int nx = imgs.entries[i]->nx;
  3090. const int ny = imgs.entries[i]->ny;
  3091. const int n = nx * ny;
  3092. for (int b = 0; b < batch_size; b++) {
  3093. float * batch_entry = inp_raw.data() + b * (3*n);
  3094. for (int y = 0; y < ny; y++) {
  3095. for (int x = 0; x < nx; x++) {
  3096. size_t base_src = 3*(y * nx + x); // idx of the first channel
  3097. size_t base_dst = y * nx + x; // idx of the first channel
  3098. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  3099. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  3100. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  3101. }
  3102. }
  3103. }
  3104. }
  3105. set_input_f32("inp_raw", inp_raw);
  3106. } else {
  3107. // audio input
  3108. GGML_ASSERT(imgs.entries.size() == 1);
  3109. const auto & mel_inp = imgs.entries[0];
  3110. const int n_step = mel_inp->nx;
  3111. const int n_mel = mel_inp->ny;
  3112. std::vector<float> inp_raw(n_step * n_mel);
  3113. std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
  3114. set_input_f32("inp_raw", inp_raw);
  3115. }
  3116. // set input per projector
  3117. switch (ctx->proj_type) {
  3118. case PROJECTOR_TYPE_MINICPMV:
  3119. {
  3120. // inspired from siglip:
  3121. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  3122. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  3123. std::vector<int32_t> positions(pos_h * pos_w);
  3124. int bucket_coords_h[1024];
  3125. int bucket_coords_w[1024];
  3126. for (int i = 0; i < pos_h; i++){
  3127. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  3128. }
  3129. for (int i = 0; i < pos_w; i++){
  3130. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  3131. }
  3132. for (int i = 0, id = 0; i < pos_h; i++){
  3133. for (int j = 0; j < pos_w; j++){
  3134. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  3135. }
  3136. }
  3137. set_input_i32("positions", positions);
  3138. // inspired from resampler of Qwen-VL:
  3139. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  3140. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  3141. int embed_dim = clip_n_mmproj_embd(ctx);
  3142. // TODO @ngxson : this is very inefficient, can we do this using ggml_sin and ggml_cos?
  3143. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  3144. std::vector<float> pos_embed(embed_dim * pos_w * pos_h);
  3145. for(int i = 0; i < pos_w * pos_h; ++i){
  3146. for(int j = 0; j < embed_dim; ++j){
  3147. pos_embed[i * embed_dim + j] = pos_embed_t[i][j];
  3148. }
  3149. }
  3150. set_input_f32("pos_embed", pos_embed);
  3151. } break;
  3152. case PROJECTOR_TYPE_QWEN2VL:
  3153. {
  3154. const int merge_ratio = 2;
  3155. const int pw = image_size_width / patch_size;
  3156. const int ph = image_size_height / patch_size;
  3157. std::vector<int> positions(n_pos * 4);
  3158. int ptr = 0;
  3159. for (int y = 0; y < ph; y += merge_ratio) {
  3160. for (int x = 0; x < pw; x += merge_ratio) {
  3161. for (int dy = 0; dy < 2; dy++) {
  3162. for (int dx = 0; dx < 2; dx++) {
  3163. positions[ ptr] = y + dy;
  3164. positions[ num_patches + ptr] = x + dx;
  3165. positions[2 * num_patches + ptr] = y + dy;
  3166. positions[3 * num_patches + ptr] = x + dx;
  3167. ptr++;
  3168. }
  3169. }
  3170. }
  3171. }
  3172. set_input_i32("positions", positions);
  3173. } break;
  3174. case PROJECTOR_TYPE_QWEN25VL:
  3175. {
  3176. // pw * ph = number of tokens output by ViT after apply patch merger
  3177. // ipw * ipw = number of vision token been processed inside ViT
  3178. const int merge_ratio = 2;
  3179. const int pw = image_size_width / patch_size / merge_ratio;
  3180. const int ph = image_size_height / patch_size / merge_ratio;
  3181. const int ipw = image_size_width / patch_size;
  3182. const int iph = image_size_height / patch_size;
  3183. std::vector<int> idx (ph * pw);
  3184. std::vector<int> inv_idx(ph * pw);
  3185. if (use_window_attn) {
  3186. const int attn_window_size = 112;
  3187. const int grid_window = attn_window_size / patch_size / merge_ratio;
  3188. int dst = 0;
  3189. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  3190. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  3191. int mask_row = 0;
  3192. for (int y = 0; y < ph; y += grid_window) {
  3193. for (int x = 0; x < pw; x += grid_window) {
  3194. const int win_h = std::min(grid_window, ph - y);
  3195. const int win_w = std::min(grid_window, pw - x);
  3196. const int dst_0 = dst;
  3197. // group all tokens belong to the same window togather (to a continue range)
  3198. for (int dy = 0; dy < win_h; dy++) {
  3199. for (int dx = 0; dx < win_w; dx++) {
  3200. const int src = (y + dy) * pw + (x + dx);
  3201. GGML_ASSERT(src < (int)idx.size());
  3202. GGML_ASSERT(dst < (int)inv_idx.size());
  3203. idx [src] = dst;
  3204. inv_idx[dst] = src;
  3205. dst++;
  3206. }
  3207. }
  3208. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  3209. int row_offset = mask_row * (ipw * iph);
  3210. std::fill(
  3211. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  3212. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  3213. 0.0);
  3214. mask_row++;
  3215. }
  3216. }
  3217. }
  3218. set_input_i32("window_idx", idx);
  3219. set_input_i32("inv_window_idx", inv_idx);
  3220. set_input_f32("window_mask", mask);
  3221. } else {
  3222. for (int i = 0; i < ph * pw; i++) {
  3223. idx[i] = i;
  3224. }
  3225. }
  3226. const int mpow = merge_ratio * merge_ratio;
  3227. std::vector<int> positions(n_pos * 4);
  3228. int ptr = 0;
  3229. for (int y = 0; y < iph; y += merge_ratio) {
  3230. for (int x = 0; x < ipw; x += merge_ratio) {
  3231. for (int dy = 0; dy < 2; dy++) {
  3232. for (int dx = 0; dx < 2; dx++) {
  3233. auto remap = idx[ptr / mpow];
  3234. remap = (remap * mpow) + (ptr % mpow);
  3235. positions[ remap] = y + dy;
  3236. positions[ num_patches + remap] = x + dx;
  3237. positions[2 * num_patches + remap] = y + dy;
  3238. positions[3 * num_patches + remap] = x + dx;
  3239. ptr++;
  3240. }
  3241. }
  3242. }
  3243. }
  3244. set_input_i32("positions", positions);
  3245. } break;
  3246. case PROJECTOR_TYPE_PIXTRAL:
  3247. {
  3248. // set the 2D positions
  3249. int n_patches_per_col = image_size_width / patch_size;
  3250. std::vector<int> pos_data(n_pos);
  3251. // dimension H
  3252. for (int i = 0; i < n_pos; i++) {
  3253. pos_data[i] = i / n_patches_per_col;
  3254. }
  3255. set_input_i32("pos_h", pos_data);
  3256. // dimension W
  3257. for (int i = 0; i < n_pos; i++) {
  3258. pos_data[i] = i % n_patches_per_col;
  3259. }
  3260. set_input_i32("pos_w", pos_data);
  3261. } break;
  3262. case PROJECTOR_TYPE_GLM_EDGE:
  3263. {
  3264. // llava and other models
  3265. std::vector<int32_t> positions(n_pos);
  3266. for (int i = 0; i < n_pos; i++) {
  3267. positions[i] = i;
  3268. }
  3269. set_input_i32("positions", positions);
  3270. } break;
  3271. case PROJECTOR_TYPE_MLP:
  3272. case PROJECTOR_TYPE_MLP_NORM:
  3273. case PROJECTOR_TYPE_LDP:
  3274. case PROJECTOR_TYPE_LDPV2:
  3275. {
  3276. // llava and other models
  3277. std::vector<int32_t> positions(n_pos);
  3278. for (int i = 0; i < n_pos; i++) {
  3279. positions[i] = i;
  3280. }
  3281. set_input_i32("positions", positions);
  3282. // The patches vector is used to get rows to index into the embeds with;
  3283. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  3284. // when retrieving the rows.
  3285. int patch_offset = model.class_embedding ? 1 : 0;
  3286. std::vector<int32_t> patches(num_patches);
  3287. for (int i = 0; i < num_patches; i++) {
  3288. patches[i] = i + patch_offset;
  3289. }
  3290. set_input_i32("patches", patches);
  3291. } break;
  3292. case PROJECTOR_TYPE_GEMMA3:
  3293. case PROJECTOR_TYPE_IDEFICS3:
  3294. case PROJECTOR_TYPE_INTERNVL:
  3295. case PROJECTOR_TYPE_ULTRAVOX:
  3296. {
  3297. // do nothing
  3298. } break;
  3299. case PROJECTOR_TYPE_LLAMA4:
  3300. {
  3301. // set the 2D positions
  3302. int n_patches_per_col = image_size_width / patch_size;
  3303. std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
  3304. // last pos is always kept 0, it's for CLS
  3305. // dimension H
  3306. for (int i = 0; i < num_patches; i++) {
  3307. pos_data[i] = (i / n_patches_per_col) + 1;
  3308. }
  3309. set_input_i32("pos_h", pos_data);
  3310. // dimension W
  3311. for (int i = 0; i < num_patches; i++) {
  3312. pos_data[i] = (i % n_patches_per_col) + 1;
  3313. }
  3314. set_input_i32("pos_w", pos_data);
  3315. } break;
  3316. default:
  3317. GGML_ABORT("Unknown projector type");
  3318. }
  3319. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  3320. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  3321. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  3322. if (reg) {
  3323. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  3324. if (ggml_backend_set_n_threads_fn) {
  3325. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  3326. }
  3327. }
  3328. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  3329. if (status != GGML_STATUS_SUCCESS) {
  3330. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  3331. return false;
  3332. }
  3333. // print debug nodes
  3334. if (ctx->debug_graph) {
  3335. LOG_INF("\n\n---\n\n");
  3336. LOG_INF("\n\nDebug graph:\n\n");
  3337. for (ggml_tensor * t : ctx->debug_print_tensors) {
  3338. std::vector<uint8_t> data(ggml_nbytes(t));
  3339. ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
  3340. print_tensor_shape(t);
  3341. print_tensor_data(t, data.data(), 3);
  3342. }
  3343. }
  3344. // the last node is the embedding tensor
  3345. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  3346. // sanity check (only support batch size of 1 for now)
  3347. const int n_tokens_out = embeddings->ne[1];
  3348. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  3349. if (n_tokens_out != expected_n_tokens_out) {
  3350. LOG_ERR("%s: expected %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  3351. GGML_ABORT("Invalid number of output tokens");
  3352. }
  3353. // copy the embeddings to the location passed by the user
  3354. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  3355. return true;
  3356. }
  3357. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  3358. switch (ctx->proj_type) {
  3359. case PROJECTOR_TYPE_LDP:
  3360. return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
  3361. case PROJECTOR_TYPE_LDPV2:
  3362. return ctx->vision_model.mm_model_peg_0_b->ne[0];
  3363. case PROJECTOR_TYPE_MLP:
  3364. case PROJECTOR_TYPE_PIXTRAL:
  3365. return ctx->vision_model.mm_2_w->ne[1];
  3366. case PROJECTOR_TYPE_MLP_NORM:
  3367. return ctx->vision_model.mm_3_b->ne[0];
  3368. case PROJECTOR_TYPE_MINICPMV:
  3369. if (ctx->minicpmv_version == 2) {
  3370. return 4096;
  3371. } else if (ctx->minicpmv_version == 3) {
  3372. return 3584;
  3373. } else if (ctx->minicpmv_version == 4) {
  3374. return 3584;
  3375. }
  3376. GGML_ABORT("Unknown minicpmv version");
  3377. case PROJECTOR_TYPE_GLM_EDGE:
  3378. return ctx->vision_model.mm_model_mlp_3_w->ne[1];
  3379. case PROJECTOR_TYPE_QWEN2VL:
  3380. case PROJECTOR_TYPE_QWEN25VL:
  3381. return ctx->vision_model.mm_1_b->ne[0];
  3382. case PROJECTOR_TYPE_GEMMA3:
  3383. return ctx->vision_model.mm_input_proj_w->ne[0];
  3384. case PROJECTOR_TYPE_IDEFICS3:
  3385. return ctx->vision_model.projection->ne[1];
  3386. case PROJECTOR_TYPE_ULTRAVOX:
  3387. return ctx->vision_model.mm_2_w->ne[1];
  3388. case PROJECTOR_TYPE_INTERNVL:
  3389. return ctx->vision_model.mm_3_w->ne[1];
  3390. case PROJECTOR_TYPE_LLAMA4:
  3391. return ctx->vision_model.mm_model_proj->ne[1];
  3392. default:
  3393. GGML_ABORT("Unknown projector type");
  3394. }
  3395. }
  3396. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  3397. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  3398. return ctx->minicpmv_version;
  3399. }
  3400. return 0;
  3401. }
  3402. bool clip_is_glm(const struct clip_ctx * ctx) {
  3403. return ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE;
  3404. }
  3405. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  3406. return ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL;
  3407. }
  3408. bool clip_is_llava(const struct clip_ctx * ctx) {
  3409. return ctx->has_llava_projector;
  3410. }
  3411. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  3412. return ctx->proj_type == PROJECTOR_TYPE_GEMMA3;
  3413. }
  3414. bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
  3415. return ctx->vision_model.hparams.has_vision;
  3416. }
  3417. bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
  3418. return ctx->vision_model.hparams.has_audio;
  3419. }
  3420. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  3421. clip_image_f32 clip_img;
  3422. clip_img.buf.resize(h * w * 3);
  3423. for (int i = 0; i < h*w*3; i++)
  3424. {
  3425. clip_img.buf[i] = img[i];
  3426. }
  3427. clip_img.nx = w;
  3428. clip_img.ny = h;
  3429. clip_image_encode(ctx, n_threads, &clip_img, vec);
  3430. return true;
  3431. }
  3432. //
  3433. // API used internally with mtmd
  3434. //
  3435. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  3436. return ctx->proj_type;
  3437. }
  3438. void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
  3439. clip_image_f32 * audio = new clip_image_f32;
  3440. audio->nx = n_frames;
  3441. audio->ny = n_mel;
  3442. audio->buf.resize(n_frames * n_mel);
  3443. std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
  3444. batch->entries.push_back(clip_image_f32_ptr(audio));
  3445. batch->is_audio = true;
  3446. }