clip.cpp 195 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-cpu.h"
  10. #include "ggml-alloc.h"
  11. #include "ggml-backend.h"
  12. #include "gguf.h"
  13. #include <cassert>
  14. #include <cmath>
  15. #include <cstdlib>
  16. #include <cstring>
  17. #include <fstream>
  18. #include <map>
  19. #include <regex>
  20. #include <stdexcept>
  21. #include <unordered_set>
  22. #include <vector>
  23. #include <sstream>
  24. #include <cinttypes>
  25. #include <limits>
  26. #include <array>
  27. #include <numeric>
  28. #include <functional>
  29. struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
  30. enum ffn_op_type {
  31. FFN_GELU,
  32. FFN_GELU_ERF,
  33. FFN_SILU,
  34. FFN_GELU_QUICK,
  35. };
  36. enum norm_type {
  37. NORM_TYPE_NORMAL,
  38. NORM_TYPE_RMS,
  39. };
  40. //#define CLIP_DEBUG_FUNCTIONS
  41. #ifdef CLIP_DEBUG_FUNCTIONS
  42. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  43. std::ofstream file(filename, std::ios::binary);
  44. if (!file.is_open()) {
  45. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  46. return;
  47. }
  48. // PPM header: P6 format, width, height, and max color value
  49. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  50. // Write pixel data
  51. for (size_t i = 0; i < img.buf.size(); i += 3) {
  52. // PPM expects binary data in RGB format, which matches our image buffer
  53. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  54. }
  55. file.close();
  56. }
  57. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  58. std::ofstream file(filename, std::ios::binary);
  59. if (!file.is_open()) {
  60. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  61. return;
  62. }
  63. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  64. int bytesPerPixel = 3;
  65. int widthInBytes = img.nx * bytesPerPixel;
  66. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  67. int stride = widthInBytes + paddingAmount;
  68. // Bitmap file header
  69. unsigned char fileHeader[14] = {
  70. 'B','M', // Signature
  71. 0,0,0,0, // Image file size in bytes
  72. 0,0,0,0, // Reserved
  73. 54,0,0,0 // Start of pixel array
  74. };
  75. // Total file size
  76. fileSize = 54 + (stride * img.ny);
  77. fileHeader[2] = (unsigned char)(fileSize);
  78. fileHeader[3] = (unsigned char)(fileSize >> 8);
  79. fileHeader[4] = (unsigned char)(fileSize >> 16);
  80. fileHeader[5] = (unsigned char)(fileSize >> 24);
  81. // Bitmap information header (BITMAPINFOHEADER)
  82. unsigned char infoHeader[40] = {
  83. 40,0,0,0, // Size of this header (40 bytes)
  84. 0,0,0,0, // Image width
  85. 0,0,0,0, // Image height
  86. 1,0, // Number of color planes
  87. 24,0, // Bits per pixel
  88. 0,0,0,0, // No compression
  89. 0,0,0,0, // Image size (can be 0 for no compression)
  90. 0,0,0,0, // X pixels per meter (not specified)
  91. 0,0,0,0, // Y pixels per meter (not specified)
  92. 0,0,0,0, // Total colors (color table not used)
  93. 0,0,0,0 // Important colors (all are important)
  94. };
  95. // Width and height in the information header
  96. infoHeader[4] = (unsigned char)(img.nx);
  97. infoHeader[5] = (unsigned char)(img.nx >> 8);
  98. infoHeader[6] = (unsigned char)(img.nx >> 16);
  99. infoHeader[7] = (unsigned char)(img.nx >> 24);
  100. infoHeader[8] = (unsigned char)(img.ny);
  101. infoHeader[9] = (unsigned char)(img.ny >> 8);
  102. infoHeader[10] = (unsigned char)(img.ny >> 16);
  103. infoHeader[11] = (unsigned char)(img.ny >> 24);
  104. // Write file headers
  105. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  106. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  107. // Pixel data
  108. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  109. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  110. for (int x = 0; x < img.nx; ++x) {
  111. // Each pixel
  112. size_t pixelIndex = (y * img.nx + x) * 3;
  113. unsigned char pixel[3] = {
  114. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  115. img.buf[pixelIndex + 1],
  116. img.buf[pixelIndex]
  117. };
  118. file.write(reinterpret_cast<char*>(pixel), 3);
  119. }
  120. // Write padding for the row
  121. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  122. }
  123. file.close();
  124. }
  125. // debug function to convert f32 to u8
  126. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  127. dst.nx = src.nx;
  128. dst.ny = src.ny;
  129. dst.buf.resize(3 * src.nx * src.ny);
  130. for (size_t i = 0; i < src.buf.size(); ++i) {
  131. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  132. }
  133. }
  134. #endif
  135. //
  136. // clip layers
  137. //
  138. enum patch_merge_type {
  139. PATCH_MERGE_FLAT,
  140. PATCH_MERGE_SPATIAL_UNPAD,
  141. };
  142. struct clip_hparams {
  143. int32_t image_size;
  144. int32_t patch_size;
  145. int32_t n_embd;
  146. int32_t n_ff;
  147. int32_t projection_dim;
  148. int32_t n_head;
  149. int32_t n_layer;
  150. // idefics3
  151. int32_t preproc_image_size = 0; // aka max_dimension
  152. int32_t proj_scale_factor = 0;
  153. float image_mean[3];
  154. float image_std[3];
  155. // for models using dynamic image size, we need to have a smaller image size to warmup
  156. // otherwise, user will get OOM everytime they load the model
  157. int32_t warmup_image_size = 0;
  158. int32_t warmup_audio_size = 3000;
  159. ffn_op_type ffn_op = FFN_GELU;
  160. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  161. float eps = 1e-6;
  162. float rope_theta = 0.0;
  163. std::vector<clip_image_size> image_res_candidates; // for llava-uhd style models
  164. int32_t image_crop_resolution;
  165. std::unordered_set<int32_t> vision_feature_layer;
  166. int32_t attn_window_size = 0;
  167. int32_t n_wa_pattern = 0;
  168. int32_t spatial_merge_size = 0;
  169. // audio
  170. int32_t n_mel_bins = 0; // whisper preprocessor
  171. int32_t proj_stack_factor = 0; // ultravox
  172. // legacy
  173. bool has_llava_projector = false;
  174. int minicpmv_version = 0;
  175. int32_t minicpmv_query_num = 0; // MiniCPM-V query number
  176. };
  177. struct clip_layer {
  178. // attention
  179. ggml_tensor * k_w = nullptr;
  180. ggml_tensor * k_b = nullptr;
  181. ggml_tensor * q_w = nullptr;
  182. ggml_tensor * q_b = nullptr;
  183. ggml_tensor * v_w = nullptr;
  184. ggml_tensor * v_b = nullptr;
  185. ggml_tensor * qkv_w = nullptr;
  186. ggml_tensor * qkv_b = nullptr;
  187. ggml_tensor * o_w = nullptr;
  188. ggml_tensor * o_b = nullptr;
  189. ggml_tensor * k_norm = nullptr;
  190. ggml_tensor * q_norm = nullptr;
  191. // layernorm 1
  192. ggml_tensor * ln_1_w = nullptr;
  193. ggml_tensor * ln_1_b = nullptr;
  194. ggml_tensor * ff_up_w = nullptr;
  195. ggml_tensor * ff_up_b = nullptr;
  196. ggml_tensor * ff_gate_w = nullptr;
  197. ggml_tensor * ff_gate_b = nullptr;
  198. ggml_tensor * ff_down_w = nullptr;
  199. ggml_tensor * ff_down_b = nullptr;
  200. // layernorm 2
  201. ggml_tensor * ln_2_w = nullptr;
  202. ggml_tensor * ln_2_b = nullptr;
  203. // layer scale (no bias)
  204. ggml_tensor * ls_1_w = nullptr;
  205. ggml_tensor * ls_2_w = nullptr;
  206. };
  207. struct clip_model {
  208. clip_modality modality = CLIP_MODALITY_VISION;
  209. projector_type proj_type = PROJECTOR_TYPE_MLP;
  210. clip_hparams hparams;
  211. // embeddings
  212. ggml_tensor * class_embedding = nullptr;
  213. ggml_tensor * patch_embeddings_0 = nullptr;
  214. ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  215. ggml_tensor * patch_bias = nullptr;
  216. ggml_tensor * position_embeddings = nullptr;
  217. ggml_tensor * pre_ln_w = nullptr;
  218. ggml_tensor * pre_ln_b = nullptr;
  219. std::vector<clip_layer> layers;
  220. ggml_tensor * post_ln_w;
  221. ggml_tensor * post_ln_b;
  222. ggml_tensor * projection; // TODO: rename it to fc (fully connected layer)
  223. ggml_tensor * mm_fc_w;
  224. ggml_tensor * mm_fc_b;
  225. // LLaVA projection
  226. ggml_tensor * mm_input_norm_w = nullptr;
  227. ggml_tensor * mm_input_norm_b = nullptr;
  228. ggml_tensor * mm_0_w = nullptr;
  229. ggml_tensor * mm_0_b = nullptr;
  230. ggml_tensor * mm_2_w = nullptr;
  231. ggml_tensor * mm_2_b = nullptr;
  232. ggml_tensor * image_newline = nullptr;
  233. // Yi type models with mlp+normalization projection
  234. ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  235. ggml_tensor * mm_1_b = nullptr;
  236. ggml_tensor * mm_3_w = nullptr;
  237. ggml_tensor * mm_3_b = nullptr;
  238. ggml_tensor * mm_4_w = nullptr;
  239. ggml_tensor * mm_4_b = nullptr;
  240. // GLMV-Edge projection
  241. ggml_tensor * mm_model_adapter_conv_w = nullptr;
  242. ggml_tensor * mm_model_adapter_conv_b = nullptr;
  243. // MobileVLM projection
  244. ggml_tensor * mm_model_mlp_1_w = nullptr;
  245. ggml_tensor * mm_model_mlp_1_b = nullptr;
  246. ggml_tensor * mm_model_mlp_3_w = nullptr;
  247. ggml_tensor * mm_model_mlp_3_b = nullptr;
  248. ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  249. ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  250. ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  251. ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  252. ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  253. ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  254. ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  255. ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  256. ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  257. ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  258. ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  259. ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  260. ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  261. ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  262. ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  263. ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  264. ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  265. ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  266. ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  267. ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  268. // MobileVLM_V2 projection
  269. ggml_tensor * mm_model_mlp_0_w = nullptr;
  270. ggml_tensor * mm_model_mlp_0_b = nullptr;
  271. ggml_tensor * mm_model_mlp_2_w = nullptr;
  272. ggml_tensor * mm_model_mlp_2_b = nullptr;
  273. ggml_tensor * mm_model_peg_0_w = nullptr;
  274. ggml_tensor * mm_model_peg_0_b = nullptr;
  275. // MINICPMV projection
  276. ggml_tensor * mm_model_pos_embed_k = nullptr;
  277. ggml_tensor * mm_model_query = nullptr;
  278. ggml_tensor * mm_model_proj = nullptr;
  279. ggml_tensor * mm_model_kv_proj = nullptr;
  280. ggml_tensor * mm_model_attn_q_w = nullptr;
  281. ggml_tensor * mm_model_attn_q_b = nullptr;
  282. ggml_tensor * mm_model_attn_k_w = nullptr;
  283. ggml_tensor * mm_model_attn_k_b = nullptr;
  284. ggml_tensor * mm_model_attn_v_w = nullptr;
  285. ggml_tensor * mm_model_attn_v_b = nullptr;
  286. ggml_tensor * mm_model_attn_o_w = nullptr;
  287. ggml_tensor * mm_model_attn_o_b = nullptr;
  288. ggml_tensor * mm_model_ln_q_w = nullptr;
  289. ggml_tensor * mm_model_ln_q_b = nullptr;
  290. ggml_tensor * mm_model_ln_kv_w = nullptr;
  291. ggml_tensor * mm_model_ln_kv_b = nullptr;
  292. ggml_tensor * mm_model_ln_post_w = nullptr;
  293. ggml_tensor * mm_model_ln_post_b = nullptr;
  294. // gemma3
  295. ggml_tensor * mm_input_proj_w = nullptr;
  296. ggml_tensor * mm_soft_emb_norm_w = nullptr;
  297. // pixtral
  298. ggml_tensor * token_embd_img_break = nullptr;
  299. ggml_tensor * mm_patch_merger_w = nullptr;
  300. // ultravox / whisper encoder
  301. ggml_tensor * conv1d_1_w = nullptr;
  302. ggml_tensor * conv1d_1_b = nullptr;
  303. ggml_tensor * conv1d_2_w = nullptr;
  304. ggml_tensor * conv1d_2_b = nullptr;
  305. ggml_tensor * mm_norm_pre_w = nullptr;
  306. ggml_tensor * mm_norm_mid_w = nullptr;
  307. // cogvlm
  308. ggml_tensor * mm_post_fc_norm_w = nullptr;
  309. ggml_tensor * mm_post_fc_norm_b = nullptr;
  310. ggml_tensor * mm_h_to_4h_w = nullptr;
  311. ggml_tensor * mm_gate_w = nullptr;
  312. ggml_tensor * mm_4h_to_h_w = nullptr;
  313. ggml_tensor * mm_boi = nullptr;
  314. ggml_tensor * mm_eoi = nullptr;
  315. bool audio_has_avgpool() const {
  316. return proj_type == PROJECTOR_TYPE_QWEN2A
  317. || proj_type == PROJECTOR_TYPE_VOXTRAL;
  318. }
  319. bool audio_has_stack_frames() const {
  320. return proj_type == PROJECTOR_TYPE_ULTRAVOX
  321. || proj_type == PROJECTOR_TYPE_VOXTRAL;
  322. }
  323. };
  324. struct clip_ctx {
  325. clip_model model;
  326. gguf_context_ptr ctx_gguf;
  327. ggml_context_ptr ctx_data;
  328. std::vector<uint8_t> buf_compute_meta;
  329. std::vector<ggml_backend_t> backend_ptrs;
  330. std::vector<ggml_backend_buffer_type_t> backend_buft;
  331. ggml_backend_t backend = nullptr;
  332. ggml_backend_t backend_cpu = nullptr;
  333. ggml_backend_buffer_ptr buf;
  334. int max_nodes = 8192;
  335. ggml_backend_sched_ptr sched;
  336. // for debugging
  337. bool debug_graph = false;
  338. std::vector<ggml_tensor *> debug_print_tensors;
  339. clip_ctx(clip_context_params & ctx_params) {
  340. debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
  341. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  342. if (!backend_cpu) {
  343. throw std::runtime_error("failed to initialize CPU backend");
  344. }
  345. if (ctx_params.use_gpu) {
  346. auto backend_name = std::getenv("MTMD_BACKEND_DEVICE");
  347. if (backend_name != nullptr) {
  348. backend = ggml_backend_init_by_name(backend_name, nullptr);
  349. if (!backend) {
  350. LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name);
  351. }
  352. }
  353. if (!backend) {
  354. backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
  355. backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr);
  356. }
  357. }
  358. if (backend) {
  359. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  360. backend_ptrs.push_back(backend);
  361. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  362. } else {
  363. backend = backend_cpu;
  364. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  365. }
  366. backend_ptrs.push_back(backend_cpu);
  367. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  368. sched.reset(
  369. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  370. );
  371. }
  372. ~clip_ctx() {
  373. ggml_backend_free(backend);
  374. if (backend != backend_cpu) {
  375. ggml_backend_free(backend_cpu);
  376. }
  377. }
  378. // this function is added so that we don't change too much of the existing code
  379. projector_type proj_type() const {
  380. return model.proj_type;
  381. }
  382. };
  383. struct clip_graph {
  384. clip_ctx * ctx;
  385. const clip_model & model;
  386. const clip_hparams & hparams;
  387. // we only support single image per batch
  388. const clip_image_f32 & img;
  389. const int patch_size;
  390. const int n_patches_x;
  391. const int n_patches_y;
  392. const int n_patches;
  393. const int n_embd;
  394. const int n_head;
  395. const int d_head;
  396. const int n_layer;
  397. const float eps;
  398. const float kq_scale;
  399. ggml_context_ptr ctx0_ptr;
  400. ggml_context * ctx0;
  401. ggml_cgraph * gf;
  402. clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  403. ctx(ctx),
  404. model(ctx->model),
  405. hparams(model.hparams),
  406. img(img),
  407. patch_size(hparams.patch_size),
  408. n_patches_x(img.nx / patch_size),
  409. n_patches_y(img.ny / patch_size),
  410. n_patches(n_patches_x * n_patches_y),
  411. n_embd(hparams.n_embd),
  412. n_head(hparams.n_head),
  413. d_head(n_embd / n_head),
  414. n_layer(hparams.n_layer),
  415. eps(hparams.eps),
  416. kq_scale(1.0f / sqrtf((float)d_head)) {
  417. struct ggml_init_params params = {
  418. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  419. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  420. /*.no_alloc =*/ true,
  421. };
  422. ctx0_ptr.reset(ggml_init(params));
  423. ctx0 = ctx0_ptr.get();
  424. gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
  425. }
  426. ggml_cgraph * build_siglip() {
  427. ggml_tensor * inp = build_inp();
  428. ggml_tensor * learned_pos_embd = model.position_embeddings;
  429. if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
  430. learned_pos_embd = resize_position_embeddings();
  431. }
  432. ggml_tensor * cur = build_vit(
  433. inp, n_patches,
  434. NORM_TYPE_NORMAL,
  435. hparams.ffn_op,
  436. learned_pos_embd,
  437. nullptr);
  438. if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) {
  439. const int batch_size = 1;
  440. GGML_ASSERT(n_patches_x == n_patches_y);
  441. const int patches_per_image = n_patches_x;
  442. const int kernel_size = hparams.proj_scale_factor;
  443. cur = ggml_transpose(ctx0, cur);
  444. cur = ggml_cont_4d(ctx0, cur, patches_per_image, patches_per_image, n_embd, batch_size);
  445. // doing a pool2d to reduce the number of output tokens
  446. cur = ggml_pool_2d(ctx0, cur, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  447. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0] * cur->ne[0], n_embd, batch_size);
  448. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  449. // apply norm before projection
  450. cur = ggml_rms_norm(ctx0, cur, eps);
  451. cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
  452. // apply projection
  453. cur = ggml_mul_mat(ctx0,
  454. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  455. cur);
  456. } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
  457. // pixel_shuffle
  458. // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
  459. const int scale_factor = model.hparams.proj_scale_factor;
  460. cur = build_patch_merge_permute(cur, scale_factor);
  461. cur = ggml_mul_mat(ctx0, model.projection, cur);
  462. } else if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
  463. // pixel unshuffle block
  464. const int scale_factor = model.hparams.proj_scale_factor;
  465. cur = build_patch_merge_permute(cur, scale_factor);
  466. // projection
  467. cur = ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
  468. cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
  469. cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
  470. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  471. cur = ggml_add(ctx0, cur, model.mm_1_b);
  472. cur = ggml_gelu(ctx0, cur);
  473. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  474. cur = ggml_add(ctx0, cur, model.mm_2_b);
  475. } else {
  476. GGML_ABORT("SigLIP: Unsupported projector type");
  477. }
  478. // build the graph
  479. ggml_build_forward_expand(gf, cur);
  480. return gf;
  481. }
  482. ggml_cgraph * build_pixtral() {
  483. const int n_merge = hparams.spatial_merge_size;
  484. // 2D input positions
  485. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  486. ggml_set_name(pos_h, "pos_h");
  487. ggml_set_input(pos_h);
  488. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  489. ggml_set_name(pos_w, "pos_w");
  490. ggml_set_input(pos_w);
  491. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  492. return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta, true);
  493. };
  494. ggml_tensor * inp = build_inp();
  495. ggml_tensor * cur = build_vit(
  496. inp, n_patches,
  497. NORM_TYPE_RMS,
  498. hparams.ffn_op,
  499. nullptr, // no learned pos embd
  500. add_pos);
  501. // mistral small 3.1 patch merger
  502. // ref: https://github.com/huggingface/transformers/blob/7a3e208892c06a5e278144eaf38c8599a42f53e7/src/transformers/models/mistral3/modeling_mistral3.py#L67
  503. if (model.mm_patch_merger_w) {
  504. GGML_ASSERT(hparams.spatial_merge_size > 0);
  505. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.mm_input_norm_w);
  506. // reshape image tokens to 2D grid
  507. cur = ggml_reshape_3d(ctx0, cur, n_embd, n_patches_x, n_patches_y);
  508. cur = ggml_permute(ctx0, cur, 2, 0, 1, 3); // [x, y, n_embd]
  509. cur = ggml_cont(ctx0, cur);
  510. // torch.nn.functional.unfold is just an im2col under the hood
  511. // we just need a dummy kernel to make it work
  512. ggml_tensor * kernel = ggml_view_3d(ctx0, cur, n_merge, n_merge, cur->ne[2], 0, 0, 0);
  513. cur = ggml_im2col(ctx0, kernel, cur, n_merge, n_merge, 0, 0, 1, 1, true, inp->type);
  514. // project to n_embd
  515. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  516. cur = ggml_mul_mat(ctx0, model.mm_patch_merger_w, cur);
  517. }
  518. // LlavaMultiModalProjector (always using GELU activation)
  519. {
  520. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  521. if (model.mm_1_b) {
  522. cur = ggml_add(ctx0, cur, model.mm_1_b);
  523. }
  524. cur = ggml_gelu(ctx0, cur);
  525. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  526. if (model.mm_2_b) {
  527. cur = ggml_add(ctx0, cur, model.mm_2_b);
  528. }
  529. }
  530. // arrangement of the [IMG_BREAK] token
  531. if (model.token_embd_img_break) {
  532. // not efficient, but works
  533. // the trick is to view the embeddings as a 3D tensor with shape [n_embd, n_patches_per_row, n_rows]
  534. // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
  535. // after the concatenation, we have a tensor with shape [n_embd, n_patches_per_row + 1, n_rows]
  536. const int p_y = n_merge > 0 ? n_patches_y / n_merge : n_patches_y;
  537. const int p_x = n_merge > 0 ? n_patches_x / n_merge : n_patches_x;
  538. const int p_total = p_x * p_y;
  539. const int n_embd_text = cur->ne[0];
  540. const int n_tokens_output = p_total + p_y - 1; // one [IMG_BREAK] per row, except the last row
  541. ggml_tensor * tmp = ggml_reshape_3d(ctx0, cur, n_embd_text, p_x, p_y);
  542. ggml_tensor * tok = ggml_new_tensor_3d(ctx0, tmp->type, n_embd_text, 1, p_y);
  543. tok = ggml_scale(ctx0, tok, 0.0); // clear the tensor
  544. tok = ggml_add(ctx0, tok, model.token_embd_img_break);
  545. tmp = ggml_concat(ctx0, tmp, tok, 1);
  546. cur = ggml_view_2d(ctx0, tmp,
  547. n_embd_text, n_tokens_output,
  548. ggml_row_size(tmp->type, n_embd_text), 0);
  549. }
  550. // build the graph
  551. ggml_build_forward_expand(gf, cur);
  552. return gf;
  553. }
  554. // Qwen2VL and Qwen2.5VL use M-RoPE
  555. ggml_cgraph * build_qwen2vl() {
  556. GGML_ASSERT(model.patch_bias == nullptr);
  557. GGML_ASSERT(model.class_embedding == nullptr);
  558. const int batch_size = 1;
  559. const bool use_window_attn = hparams.n_wa_pattern > 0;
  560. const int n_wa_pattern = hparams.n_wa_pattern;
  561. const int n_pos = n_patches;
  562. const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
  563. norm_type norm_t = ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
  564. ? NORM_TYPE_RMS // qwen 2.5 vl
  565. : NORM_TYPE_NORMAL; // qwen 2 vl
  566. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  567. ggml_tensor * inp_raw = build_inp_raw();
  568. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  569. GGML_ASSERT(img.nx % (patch_size * 2) == 0);
  570. GGML_ASSERT(img.ny % (patch_size * 2) == 0);
  571. // second conv dimension
  572. {
  573. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  574. inp = ggml_add(ctx0, inp, inp_1);
  575. inp = ggml_permute(ctx0, inp, 1, 2, 0, 3); // [w, h, c, b] -> [c, w, h, b]
  576. inp = ggml_cont_4d(
  577. ctx0, inp,
  578. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  579. inp = ggml_reshape_4d(
  580. ctx0, inp,
  581. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  582. inp = ggml_permute(ctx0, inp, 0, 2, 1, 3);
  583. inp = ggml_cont_3d(
  584. ctx0, inp,
  585. n_embd, n_patches_x * n_patches_y, batch_size);
  586. }
  587. ggml_tensor * inpL = inp;
  588. ggml_tensor * window_mask = nullptr;
  589. ggml_tensor * window_idx = nullptr;
  590. ggml_tensor * inv_window_idx = nullptr;
  591. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  592. ggml_set_name(positions, "positions");
  593. ggml_set_input(positions);
  594. // pre-layernorm
  595. if (model.pre_ln_w) {
  596. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  597. }
  598. if (use_window_attn) {
  599. // handle window attention inputs
  600. inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  601. ggml_set_name(inv_window_idx, "inv_window_idx");
  602. ggml_set_input(inv_window_idx);
  603. // mask for window attention
  604. window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_pos, n_pos);
  605. ggml_set_name(window_mask, "window_mask");
  606. ggml_set_input(window_mask);
  607. // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  608. GGML_ASSERT(batch_size == 1);
  609. inpL = ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
  610. inpL = ggml_get_rows(ctx0, inpL, inv_window_idx);
  611. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
  612. }
  613. // loop over layers
  614. for (int il = 0; il < n_layer; il++) {
  615. auto & layer = model.layers[il];
  616. const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
  617. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  618. // layernorm1
  619. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  620. cb(cur, "ln1", il);
  621. // self-attention
  622. {
  623. ggml_tensor * Qcur = ggml_add(ctx0,
  624. ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
  625. ggml_tensor * Kcur = ggml_add(ctx0,
  626. ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
  627. ggml_tensor * Vcur = ggml_add(ctx0,
  628. ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
  629. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
  630. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
  631. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
  632. cb(Qcur, "Qcur", il);
  633. cb(Kcur, "Kcur", il);
  634. cb(Vcur, "Vcur", il);
  635. // apply M-RoPE
  636. Qcur = ggml_rope_multi(
  637. ctx0, Qcur, positions, nullptr,
  638. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  639. Kcur = ggml_rope_multi(
  640. ctx0, Kcur, positions, nullptr,
  641. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  642. cb(Qcur, "Qcur_rope", il);
  643. cb(Kcur, "Kcur_rope", il);
  644. ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
  645. cur = build_attn(layer.o_w, layer.o_b,
  646. Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
  647. cb(cur, "attn_out", il);
  648. }
  649. // re-add the layer input, e.g., residual
  650. cur = ggml_add(ctx0, cur, inpL);
  651. inpL = cur; // inpL = residual, cur = hidden_states
  652. cb(cur, "ffn_inp", il);
  653. // layernorm2
  654. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  655. cb(cur, "ffn_inp_normed", il);
  656. // ffn
  657. cur = build_ffn(cur,
  658. layer.ff_up_w, layer.ff_up_b,
  659. layer.ff_gate_w, layer.ff_gate_b,
  660. layer.ff_down_w, layer.ff_down_b,
  661. hparams.ffn_op, il);
  662. cb(cur, "ffn_out", il);
  663. // residual 2
  664. cur = ggml_add(ctx0, inpL, cur);
  665. cb(cur, "layer_out", il);
  666. inpL = cur;
  667. }
  668. // post-layernorm
  669. if (model.post_ln_w) {
  670. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
  671. }
  672. // multimodal projection
  673. ggml_tensor * embeddings = inpL;
  674. embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
  675. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  676. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  677. // GELU activation
  678. embeddings = ggml_gelu(ctx0, embeddings);
  679. // Second linear layer
  680. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  681. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  682. if (use_window_attn) {
  683. window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  684. ggml_set_name(window_idx, "window_idx");
  685. ggml_set_input(window_idx);
  686. // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  687. GGML_ASSERT(batch_size == 1);
  688. embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
  689. embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
  690. embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
  691. }
  692. // build the graph
  693. ggml_build_forward_expand(gf, embeddings);
  694. return gf;
  695. }
  696. ggml_cgraph * build_minicpmv() {
  697. const int batch_size = 1;
  698. GGML_ASSERT(model.class_embedding == nullptr);
  699. const int n_pos = n_patches;
  700. // position embeddings for the projector (not for ViT)
  701. int n_output_dim = clip_n_mmproj_embd(ctx);
  702. ggml_tensor * pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_output_dim, n_pos, batch_size);
  703. ggml_set_name(pos_embed, "pos_embed");
  704. ggml_set_input(pos_embed);
  705. // for selecting learned pos embd, used by ViT
  706. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  707. ggml_set_name(positions, "positions");
  708. ggml_set_input(positions);
  709. ggml_tensor * learned_pos_embd = ggml_get_rows(ctx0, model.position_embeddings, positions);
  710. ggml_tensor * inp = build_inp();
  711. ggml_tensor * embeddings = build_vit(
  712. inp, n_patches,
  713. NORM_TYPE_NORMAL,
  714. hparams.ffn_op,
  715. learned_pos_embd,
  716. nullptr);
  717. // resampler projector (it is just another transformer)
  718. ggml_tensor * q = model.mm_model_query;
  719. ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  720. // norm
  721. q = build_norm(q, model.mm_model_ln_q_w, model.mm_model_ln_q_b, NORM_TYPE_NORMAL, eps, -1);
  722. v = build_norm(v, model.mm_model_ln_kv_w, model.mm_model_ln_kv_b, NORM_TYPE_NORMAL, eps, -1);
  723. // k = v + pos_embed
  724. ggml_tensor * k = ggml_add(ctx0, v, pos_embed);
  725. // attention
  726. {
  727. int n_embd = clip_n_mmproj_embd(ctx);
  728. const int d_head = 128;
  729. int n_head = n_embd/d_head;
  730. // Use actual config value if available, otherwise fall back to hardcoded values
  731. int num_query = ctx->model.hparams.minicpmv_query_num;
  732. ggml_tensor * Q = ggml_add(ctx0,
  733. ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q),
  734. model.mm_model_attn_q_b);
  735. ggml_tensor * K = ggml_add(ctx0,
  736. ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k),
  737. model.mm_model_attn_k_b);
  738. ggml_tensor * V = ggml_add(ctx0,
  739. ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v),
  740. model.mm_model_attn_v_b);
  741. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_query);
  742. K = ggml_reshape_3d(ctx0, K, d_head, n_head, n_pos);
  743. V = ggml_reshape_3d(ctx0, V, d_head, n_head, n_pos);
  744. cb(Q, "resampler_Q", -1);
  745. cb(K, "resampler_K", -1);
  746. cb(V, "resampler_V", -1);
  747. embeddings = build_attn(
  748. model.mm_model_attn_o_w,
  749. model.mm_model_attn_o_b,
  750. Q, K, V, nullptr, kq_scale, -1);
  751. cb(embeddings, "resampler_attn_out", -1);
  752. }
  753. // layernorm
  754. embeddings = build_norm(embeddings, model.mm_model_ln_post_w, model.mm_model_ln_post_b, NORM_TYPE_NORMAL, eps, -1);
  755. // projection
  756. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  757. // build the graph
  758. ggml_build_forward_expand(gf, embeddings);
  759. return gf;
  760. }
  761. ggml_cgraph * build_internvl() {
  762. GGML_ASSERT(model.class_embedding != nullptr);
  763. GGML_ASSERT(model.position_embeddings != nullptr);
  764. const int n_pos = n_patches + 1;
  765. ggml_tensor * inp = build_inp();
  766. // add CLS token
  767. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  768. // The larger models use a different ViT, which uses RMS norm instead of layer norm
  769. // ref: https://github.com/ggml-org/llama.cpp/pull/13443#issuecomment-2869786188
  770. norm_type norm_t = (hparams.n_embd == 3200 && hparams.n_layer == 45)
  771. ? NORM_TYPE_RMS // 6B ViT (Used by InternVL 2.5/3 - 26B, 38B, 78B)
  772. : NORM_TYPE_NORMAL; // 300M ViT (Used by all smaller InternVL models)
  773. ggml_tensor * cur = build_vit(
  774. inp, n_pos,
  775. norm_t,
  776. hparams.ffn_op,
  777. model.position_embeddings,
  778. nullptr);
  779. // remove CLS token
  780. cur = ggml_view_2d(ctx0, cur,
  781. n_embd, n_patches,
  782. ggml_row_size(cur->type, n_embd), 0);
  783. // pixel shuffle
  784. {
  785. const int scale_factor = model.hparams.proj_scale_factor;
  786. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  787. const int height = n_patches_y;
  788. const int width = n_patches_x;
  789. GGML_ASSERT(scale_factor > 0);
  790. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, height / scale_factor, width, bsz);
  791. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  792. cur = ggml_cont_4d(ctx0, cur,
  793. n_embd * scale_factor * scale_factor,
  794. height / scale_factor,
  795. width / scale_factor,
  796. bsz);
  797. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  798. // flatten to 2D
  799. cur = ggml_cont_2d(ctx0, cur,
  800. n_embd * scale_factor * scale_factor,
  801. cur->ne[1] * cur->ne[2]);
  802. }
  803. // projector (always using GELU activation)
  804. {
  805. // projector LayerNorm uses pytorch's default eps = 1e-5
  806. // ref: https://huggingface.co/OpenGVLab/InternVL3-8B-Instruct/blob/a34d3e4e129a5856abfd6aa6de79776484caa14e/modeling_internvl_chat.py#L79
  807. cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
  808. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  809. cur = ggml_add(ctx0, cur, model.mm_1_b);
  810. cur = ggml_gelu(ctx0, cur);
  811. cur = ggml_mul_mat(ctx0, model.mm_3_w, cur);
  812. cur = ggml_add(ctx0, cur, model.mm_3_b);
  813. }
  814. // build the graph
  815. ggml_build_forward_expand(gf, cur);
  816. return gf;
  817. }
  818. ggml_cgraph * build_llama4() {
  819. GGML_ASSERT(model.class_embedding != nullptr);
  820. GGML_ASSERT(model.position_embeddings != nullptr);
  821. const int n_pos = n_patches + 1; // +1 for [CLS]
  822. // 2D input positions
  823. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  824. ggml_set_name(pos_h, "pos_h");
  825. ggml_set_input(pos_h);
  826. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  827. ggml_set_name(pos_w, "pos_w");
  828. ggml_set_input(pos_w);
  829. ggml_tensor * inp = build_inp_raw();
  830. // Llama4UnfoldConvolution
  831. {
  832. ggml_tensor * kernel = ggml_reshape_4d(ctx0, model.patch_embeddings_0,
  833. patch_size, patch_size, 3, n_embd);
  834. inp = ggml_im2col(ctx0, kernel, inp, patch_size, patch_size, 0, 0, 1, 1, true, inp->type);
  835. inp = ggml_mul_mat(ctx0, model.patch_embeddings_0, inp);
  836. inp = ggml_reshape_2d(ctx0, inp, n_embd, n_patches);
  837. cb(inp, "patch_conv", -1);
  838. }
  839. // add CLS token
  840. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  841. // build ViT with 2D position embeddings
  842. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  843. // first half is X axis and second half is Y axis
  844. // ref: https://github.com/huggingface/transformers/blob/40a493c7ed4f19f08eadb0639cf26d49bfa5e180/src/transformers/models/llama4/modeling_llama4.py#L1312
  845. // ref: https://github.com/Blaizzy/mlx-vlm/blob/a57156aa87b33cca6e5ee6cfc14dd4ef8f611be6/mlx_vlm/models/llama4/vision.py#L441
  846. return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
  847. };
  848. ggml_tensor * cur = build_vit(
  849. inp, n_pos,
  850. NORM_TYPE_NORMAL,
  851. hparams.ffn_op,
  852. model.position_embeddings,
  853. add_pos);
  854. // remove CLS token
  855. cur = ggml_view_2d(ctx0, cur,
  856. n_embd, n_patches,
  857. ggml_row_size(cur->type, n_embd), 0);
  858. // pixel shuffle
  859. // based on Llama4VisionPixelShuffleMLP
  860. // https://github.com/huggingface/transformers/blob/2932f318a20d9e54cc7aea052e040164d85de7d6/src/transformers/models/llama4/modeling_llama4.py#L1151
  861. {
  862. const int scale_factor = model.hparams.proj_scale_factor;
  863. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  864. GGML_ASSERT(scale_factor > 0);
  865. GGML_ASSERT(n_patches_x == n_patches_y); // llama4 only supports square images
  866. cur = ggml_reshape_4d(ctx0, cur,
  867. n_embd * scale_factor,
  868. n_patches_x / scale_factor,
  869. n_patches_y,
  870. bsz);
  871. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  872. cur = ggml_cont_4d(ctx0, cur,
  873. n_embd * scale_factor * scale_factor,
  874. n_patches_x / scale_factor,
  875. n_patches_y / scale_factor,
  876. bsz);
  877. //cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  878. // flatten to 2D
  879. cur = ggml_cont_2d(ctx0, cur,
  880. n_embd * scale_factor * scale_factor,
  881. n_patches / scale_factor / scale_factor);
  882. cb(cur, "pixel_shuffle", -1);
  883. }
  884. // based on Llama4VisionMLP2 (always uses GELU activation, no bias)
  885. {
  886. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, cur);
  887. cur = ggml_gelu(ctx0, cur);
  888. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, cur);
  889. cur = ggml_gelu(ctx0, cur);
  890. cb(cur, "adapter_mlp", -1);
  891. }
  892. // Llama4MultiModalProjector
  893. cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
  894. cb(cur, "projected", -1);
  895. // build the graph
  896. ggml_build_forward_expand(gf, cur);
  897. return gf;
  898. }
  899. ggml_cgraph * build_kimivl() {
  900. // 2D input positions
  901. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  902. ggml_set_name(pos_h, "pos_h");
  903. ggml_set_input(pos_h);
  904. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  905. ggml_set_name(pos_w, "pos_w");
  906. ggml_set_input(pos_w);
  907. ggml_tensor * learned_pos_embd = resize_position_embeddings();
  908. // build ViT with 2D position embeddings
  909. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  910. // first half is X axis and second half is Y axis
  911. return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
  912. };
  913. ggml_tensor * inp = build_inp();
  914. ggml_tensor * cur = build_vit(
  915. inp, n_patches,
  916. NORM_TYPE_NORMAL,
  917. hparams.ffn_op,
  918. learned_pos_embd,
  919. add_pos);
  920. cb(cur, "vit_out", -1);
  921. {
  922. // patch_merger
  923. const int scale_factor = model.hparams.proj_scale_factor;
  924. cur = build_patch_merge_permute(cur, scale_factor);
  925. // projection norm
  926. int proj_inp_dim = cur->ne[0];
  927. cur = ggml_view_2d(ctx0, cur,
  928. n_embd, cur->ne[1] * scale_factor * scale_factor,
  929. ggml_row_size(cur->type, n_embd), 0);
  930. cur = ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
  931. cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
  932. cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
  933. cur = ggml_view_2d(ctx0, cur,
  934. proj_inp_dim, cur->ne[1] / scale_factor / scale_factor,
  935. ggml_row_size(cur->type, proj_inp_dim), 0);
  936. cb(cur, "proj_inp_normed", -1);
  937. // projection mlp
  938. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  939. cur = ggml_add(ctx0, cur, model.mm_1_b);
  940. cur = ggml_gelu(ctx0, cur);
  941. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  942. cur = ggml_add(ctx0, cur, model.mm_2_b);
  943. cb(cur, "proj_out", -1);
  944. }
  945. // build the graph
  946. ggml_build_forward_expand(gf, cur);
  947. return gf;
  948. }
  949. // this graph is used by llava, granite and glm
  950. // due to having embedding_stack (used by granite), we cannot reuse build_vit
  951. ggml_cgraph * build_llava() {
  952. const int batch_size = 1;
  953. const int n_pos = n_patches + (model.class_embedding ? 1 : 0);
  954. GGML_ASSERT(n_patches_x == n_patches_y && "only square images supported");
  955. // Calculate the deepest feature layer based on hparams and projector type
  956. int max_feature_layer = n_layer;
  957. {
  958. // Get the index of the second to last layer; this is the default for models that have a llava projector
  959. int il_last = hparams.n_layer - 1;
  960. int deepest_feature_layer = -1;
  961. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
  962. il_last += 1;
  963. }
  964. // If we set explicit vision feature layers, only go up to the deepest one
  965. // NOTE: only used by granite-vision models for now
  966. for (const auto & feature_layer : hparams.vision_feature_layer) {
  967. if (feature_layer > deepest_feature_layer) {
  968. deepest_feature_layer = feature_layer;
  969. }
  970. }
  971. max_feature_layer = deepest_feature_layer < 0 ? il_last : deepest_feature_layer;
  972. }
  973. ggml_tensor * inp = build_inp();
  974. // concat class_embeddings and patch_embeddings
  975. if (model.class_embedding) {
  976. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  977. }
  978. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  979. ggml_set_name(positions, "positions");
  980. ggml_set_input(positions);
  981. inp = ggml_add(ctx0, inp, ggml_get_rows(ctx0, model.position_embeddings, positions));
  982. ggml_tensor * inpL = inp;
  983. // pre-layernorm
  984. if (model.pre_ln_w) {
  985. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, NORM_TYPE_NORMAL, eps, -1);
  986. cb(inpL, "pre_ln", -1);
  987. }
  988. std::vector<ggml_tensor *> embedding_stack;
  989. const auto & vision_feature_layer = hparams.vision_feature_layer;
  990. // loop over layers
  991. for (int il = 0; il < max_feature_layer; il++) {
  992. auto & layer = model.layers[il];
  993. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  994. // If this is an embedding feature layer, save the output.
  995. // NOTE: 0 index here refers to the input to the encoder.
  996. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  997. embedding_stack.push_back(cur);
  998. }
  999. // layernorm1
  1000. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  1001. cb(cur, "layer_inp_normed", il);
  1002. // self-attention
  1003. {
  1004. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1005. if (layer.q_b) {
  1006. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1007. }
  1008. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1009. if (layer.k_b) {
  1010. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1011. }
  1012. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1013. if (layer.v_b) {
  1014. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1015. }
  1016. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1017. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1018. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1019. cb(Qcur, "Qcur", il);
  1020. cb(Kcur, "Kcur", il);
  1021. cb(Vcur, "Vcur", il);
  1022. cur = build_attn(layer.o_w, layer.o_b,
  1023. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1024. cb(cur, "attn_out", il);
  1025. }
  1026. // re-add the layer input, e.g., residual
  1027. cur = ggml_add(ctx0, cur, inpL);
  1028. inpL = cur; // inpL = residual, cur = hidden_states
  1029. cb(cur, "ffn_inp", il);
  1030. // layernorm2
  1031. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  1032. cb(cur, "ffn_inp_normed", il);
  1033. // ffn
  1034. cur = build_ffn(cur,
  1035. layer.ff_up_w, layer.ff_up_b,
  1036. layer.ff_gate_w, layer.ff_gate_b,
  1037. layer.ff_down_w, layer.ff_down_b,
  1038. hparams.ffn_op, il);
  1039. cb(cur, "ffn_out", il);
  1040. // residual 2
  1041. cur = ggml_add(ctx0, inpL, cur);
  1042. cb(cur, "layer_out", il);
  1043. inpL = cur;
  1044. }
  1045. // post-layernorm
  1046. if (model.post_ln_w) {
  1047. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, NORM_TYPE_NORMAL, eps, -1);
  1048. }
  1049. ggml_tensor * embeddings = inpL;
  1050. // process vision feature layers (used by granite)
  1051. {
  1052. // final layer is a vision feature layer
  1053. if (vision_feature_layer.find(max_feature_layer) != vision_feature_layer.end()) {
  1054. embedding_stack.push_back(inpL);
  1055. }
  1056. // If feature layers are explicitly set, stack them (if we have multiple)
  1057. if (!embedding_stack.empty()) {
  1058. embeddings = embedding_stack[0];
  1059. for (size_t i = 1; i < embedding_stack.size(); i++) {
  1060. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  1061. }
  1062. }
  1063. }
  1064. // llava projector (also used by granite)
  1065. if (ctx->model.hparams.has_llava_projector) {
  1066. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  1067. ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  1068. ggml_set_name(patches, "patches");
  1069. ggml_set_input(patches);
  1070. // shape [1, 576, 1024]
  1071. // ne is whcn, ne = [1024, 576, 1, 1]
  1072. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  1073. // print_tensor_info(embeddings, "embeddings");
  1074. // llava projector
  1075. if (ctx->proj_type() == PROJECTOR_TYPE_MLP) {
  1076. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1077. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1078. embeddings = ggml_gelu(ctx0, embeddings);
  1079. if (model.mm_2_w) {
  1080. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  1081. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  1082. }
  1083. }
  1084. else if (ctx->proj_type() == PROJECTOR_TYPE_MLP_NORM) {
  1085. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1086. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1087. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  1088. // First LayerNorm
  1089. embeddings = ggml_norm(ctx0, embeddings, eps);
  1090. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  1091. model.mm_1_b);
  1092. // GELU activation
  1093. embeddings = ggml_gelu(ctx0, embeddings);
  1094. // Second linear layer
  1095. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  1096. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  1097. // Second LayerNorm
  1098. embeddings = ggml_norm(ctx0, embeddings, eps);
  1099. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  1100. model.mm_4_b);
  1101. }
  1102. else if (ctx->proj_type() == PROJECTOR_TYPE_LDP) {
  1103. // MobileVLM projector
  1104. int n_patch = 24;
  1105. ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  1106. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  1107. mlp_1 = ggml_gelu(ctx0, mlp_1);
  1108. ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  1109. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  1110. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  1111. // block 1
  1112. ggml_tensor * block_1 = nullptr;
  1113. {
  1114. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  1115. mlp_3 = ggml_permute(ctx0, mlp_3, 1, 0, 2, 3);
  1116. mlp_3 = ggml_cont_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  1117. // stride = 1, padding = 1, bias is nullptr
  1118. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  1119. // layer norm
  1120. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1121. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1122. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1123. block_1 = ggml_norm(ctx0, block_1, eps);
  1124. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  1125. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1126. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1127. // hardswish
  1128. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1129. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1130. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1131. // pointwise conv
  1132. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1133. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  1134. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  1135. block_1 = ggml_relu(ctx0, block_1);
  1136. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  1137. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  1138. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1139. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  1140. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1141. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1142. int w = block_1->ne[0], h = block_1->ne[1];
  1143. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1144. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1145. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1146. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  1147. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1148. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1149. block_1 = ggml_norm(ctx0, block_1, eps);
  1150. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  1151. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1152. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1153. // residual
  1154. block_1 = ggml_add(ctx0, mlp_3, block_1);
  1155. }
  1156. // block_2
  1157. {
  1158. // stride = 2
  1159. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  1160. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1161. // layer norm
  1162. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1163. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1164. block_1 = ggml_norm(ctx0, block_1, eps);
  1165. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  1166. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1167. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1168. // hardswish
  1169. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1170. // not sure the parameters is right for globalAvgPooling
  1171. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1172. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1173. // pointwise conv
  1174. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1175. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  1176. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  1177. block_1 = ggml_relu(ctx0, block_1);
  1178. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  1179. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  1180. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1181. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1182. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1183. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1184. int w = block_1->ne[0], h = block_1->ne[1];
  1185. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1186. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1187. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1188. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  1189. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1190. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1191. block_1 = ggml_norm(ctx0, block_1, eps);
  1192. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  1193. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  1194. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  1195. }
  1196. embeddings = block_1;
  1197. }
  1198. else if (ctx->proj_type() == PROJECTOR_TYPE_LDPV2)
  1199. {
  1200. int n_patch = 24;
  1201. ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1202. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  1203. mlp_0 = ggml_gelu(ctx0, mlp_0);
  1204. ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  1205. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  1206. // mlp_2 ne = [2048, 576, 1, 1]
  1207. // // AVG Pool Layer 2*2, strides = 2
  1208. mlp_2 = ggml_permute(ctx0, mlp_2, 1, 0, 2, 3);
  1209. // mlp_2 ne = [576, 2048, 1, 1]
  1210. mlp_2 = ggml_cont_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  1211. // mlp_2 ne [24, 24, 2048, 1]
  1212. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  1213. // weight ne = [3, 3, 2048, 1]
  1214. ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  1215. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  1216. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  1217. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  1218. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  1219. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  1220. embeddings = peg_0;
  1221. }
  1222. else {
  1223. GGML_ABORT("fatal error");
  1224. }
  1225. }
  1226. // glm projector
  1227. else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
  1228. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  1229. embeddings = ggml_permute(ctx0,embeddings,1,0,2,3);
  1230. embeddings = ggml_cont_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  1231. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  1232. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  1233. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  1234. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  1235. // GLU
  1236. {
  1237. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1238. embeddings = ggml_norm(ctx0, embeddings, eps);
  1239. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1240. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  1241. ggml_tensor * x = embeddings;
  1242. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  1243. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  1244. embeddings = ggml_swiglu_split(ctx0, embeddings, x);
  1245. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  1246. }
  1247. // arrangement of BOI/EOI token embeddings
  1248. // note: these embeddings are not present in text model, hence we cannot process them as text tokens
  1249. // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
  1250. {
  1251. embeddings = ggml_concat(ctx0, model.mm_boi, embeddings, 1); // BOI
  1252. embeddings = ggml_concat(ctx0, embeddings, model.mm_eoi, 1); // EOI
  1253. }
  1254. }
  1255. else {
  1256. GGML_ABORT("llava: unknown projector type");
  1257. }
  1258. // build the graph
  1259. ggml_build_forward_expand(gf, embeddings);
  1260. return gf;
  1261. }
  1262. // whisper encoder with custom projector
  1263. ggml_cgraph * build_whisper_enc() {
  1264. const int n_frames = img.nx;
  1265. const int n_pos = n_frames / 2;
  1266. GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
  1267. ggml_tensor * inp = build_inp_raw(1);
  1268. // conv1d block
  1269. {
  1270. // convolution + gelu
  1271. ggml_tensor * cur = ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
  1272. cur = ggml_add(ctx0, cur, model.conv1d_1_b);
  1273. cur = ggml_gelu_erf(ctx0, cur);
  1274. cur = ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
  1275. cur = ggml_add(ctx0, cur, model.conv1d_2_b);
  1276. cur = ggml_gelu_erf(ctx0, cur);
  1277. // transpose
  1278. inp = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  1279. cb(inp, "after_conv1d", -1);
  1280. }
  1281. // sanity check (only check one layer, but it should be the same for all)
  1282. GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
  1283. GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
  1284. GGML_ASSERT(model.layers[0].q_b);
  1285. GGML_ASSERT(model.layers[0].v_b);
  1286. GGML_ASSERT(!model.layers[0].k_b); // no bias for k
  1287. GGML_ASSERT(model.post_ln_w && model.post_ln_b);
  1288. ggml_tensor * pos_embd_selected = ggml_view_2d(
  1289. ctx0, model.position_embeddings,
  1290. model.position_embeddings->ne[0], n_pos,
  1291. model.position_embeddings->nb[1], 0
  1292. );
  1293. ggml_tensor * cur = build_vit(
  1294. inp, n_pos,
  1295. NORM_TYPE_NORMAL,
  1296. hparams.ffn_op,
  1297. pos_embd_selected,
  1298. nullptr);
  1299. cb(cur, "after_transformer", -1);
  1300. if (model.audio_has_stack_frames()) {
  1301. // StackAudioFrames
  1302. // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
  1303. int64_t stride = n_embd * hparams.proj_stack_factor;
  1304. int64_t padded_len = GGML_PAD(ggml_nelements(cur), stride);
  1305. int64_t pad = padded_len - ggml_nelements(cur);
  1306. if (pad > 0) {
  1307. cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0);
  1308. cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
  1309. }
  1310. cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
  1311. ggml_row_size(cur->type, stride), 0);
  1312. cb(cur, "after_stacked", -1);
  1313. }
  1314. if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) {
  1315. // UltravoxProjector
  1316. // pre-norm
  1317. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1318. cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
  1319. // ffn in
  1320. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1321. // swiglu
  1322. // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
  1323. cur = ggml_swiglu_swapped(ctx0, cur);
  1324. // mid-norm
  1325. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1326. cur = ggml_mul(ctx0, cur, model.mm_norm_mid_w);
  1327. // ffn out
  1328. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1329. } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) {
  1330. // projector
  1331. cur = ggml_mul_mat(ctx0, model.mm_fc_w, cur);
  1332. cur = ggml_add(ctx0, cur, model.mm_fc_b);
  1333. } else if (ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL) {
  1334. // projector
  1335. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1336. cur = ggml_gelu_erf(ctx0, cur);
  1337. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1338. } else {
  1339. GGML_ABORT("%s: unknown projector type", __func__);
  1340. }
  1341. cb(cur, "projected", -1);
  1342. ggml_build_forward_expand(gf, cur);
  1343. return gf;
  1344. }
  1345. // cogvlm vision encoder
  1346. ggml_cgraph * build_cogvlm() {
  1347. GGML_ASSERT(model.class_embedding != nullptr);
  1348. GGML_ASSERT(model.position_embeddings != nullptr);
  1349. const int n_pos = n_patches + 1; // +1 for [CLS]
  1350. // build input and concatenate class embedding
  1351. ggml_tensor * inp = build_inp();
  1352. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  1353. inp = ggml_add(ctx0, inp, model.position_embeddings);
  1354. cb(inp, "inp_pos", -1);
  1355. ggml_tensor * inpL = inp;
  1356. for (int il = 0; il < n_layer; il++) {
  1357. auto & layer = model.layers[il];
  1358. ggml_tensor * cur = inpL;
  1359. cur = ggml_mul_mat(ctx0, layer.qkv_w, cur);
  1360. cur = ggml_add(ctx0, cur, layer.qkv_b);
  1361. ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, d_head*sizeof(float),
  1362. cur->nb[1], 0);
  1363. ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, d_head*sizeof(float),
  1364. cur->nb[1], n_embd * sizeof(float));
  1365. ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, d_head*sizeof(float),
  1366. cur->nb[1], 2 * n_embd * sizeof(float));
  1367. cb(Qcur, "Qcur", il);
  1368. cb(Kcur, "Kcur", il);
  1369. cb(Vcur, "Vcur", il);
  1370. cur = build_attn(layer.o_w, layer.o_b,
  1371. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1372. cb(cur, "attn_out", il);
  1373. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  1374. cb(cur, "attn_post_norm", il);
  1375. cur = ggml_add(ctx0, cur, inpL);
  1376. inpL = cur;
  1377. cur = build_ffn(cur,
  1378. layer.ff_up_w, layer.ff_up_b,
  1379. layer.ff_gate_w, layer.ff_gate_b,
  1380. layer.ff_down_w, layer.ff_down_b,
  1381. hparams.ffn_op, il);
  1382. cb(cur, "ffn_out", il);
  1383. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  1384. cb(cur, "ffn_post_norm", il);
  1385. cur = ggml_add(ctx0, cur, inpL);
  1386. cb(cur, "layer_out", il);
  1387. inpL = cur;
  1388. }
  1389. // remove CLS token (like build_llama4 does)
  1390. ggml_tensor * cur = ggml_view_2d(ctx0, inpL,
  1391. n_embd, n_patches,
  1392. ggml_row_size(inpL->type, n_embd), 0);
  1393. // Multiply with mm_model_proj
  1394. cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
  1395. // Apply layernorm, weight, bias
  1396. cur = build_norm(cur, model.mm_post_fc_norm_w, model.mm_post_fc_norm_b, NORM_TYPE_NORMAL, 1e-5, -1);
  1397. // Apply GELU
  1398. cur = ggml_gelu_inplace(ctx0, cur);
  1399. // Branch 1: multiply with mm_h_to_4h_w
  1400. ggml_tensor * h_to_4h = ggml_mul_mat(ctx0, model.mm_h_to_4h_w, cur);
  1401. // Branch 2: multiply with mm_gate_w
  1402. ggml_tensor * gate = ggml_mul_mat(ctx0, model.mm_gate_w, cur);
  1403. // Apply silu
  1404. gate = ggml_swiglu_split(ctx0, gate, h_to_4h);
  1405. // Apply mm_4h_to_h_w
  1406. cur = ggml_mul_mat(ctx0, model.mm_4h_to_h_w, gate);
  1407. // Concatenate with boi and eoi
  1408. cur = ggml_concat(ctx0, model.mm_boi, cur, 1);
  1409. cur = ggml_concat(ctx0, cur, model.mm_eoi, 1);
  1410. // build the graph
  1411. ggml_build_forward_expand(gf, cur);
  1412. return gf;
  1413. }
  1414. private:
  1415. //
  1416. // utility functions
  1417. //
  1418. void cb(ggml_tensor * cur0, const char * name, int il) const {
  1419. if (ctx->debug_graph) {
  1420. ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
  1421. std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
  1422. ggml_set_name(cur, cur_name.c_str());
  1423. ggml_set_output(cur);
  1424. ggml_build_forward_expand(gf, cur);
  1425. ctx->debug_print_tensors.push_back(cur);
  1426. }
  1427. }
  1428. // siglip2 naflex
  1429. ggml_tensor * resize_position_embeddings() {
  1430. ggml_tensor * pos_embd = model.position_embeddings;
  1431. const int height = img.ny / patch_size;
  1432. const int width = img.nx / patch_size;
  1433. const uint32_t mode = GGML_SCALE_MODE_BILINEAR;
  1434. const int n_per_side = (int)std::sqrt(pos_embd->ne[1]);
  1435. GGML_ASSERT(pos_embd);
  1436. if (height == n_per_side && width == n_per_side) {
  1437. return pos_embd;
  1438. }
  1439. pos_embd = ggml_reshape_3d(ctx0, pos_embd, n_embd, n_per_side, n_per_side); // -> (n_embd, n_per_side, n_per_side)
  1440. pos_embd = ggml_permute(ctx0, pos_embd, 2, 0, 1, 3); // -> (n_per_side, n_per_side, n_embd)
  1441. pos_embd = ggml_interpolate(ctx0, pos_embd, width, height, n_embd, 1, mode); // -> (width, height, n_embd)
  1442. pos_embd = ggml_permute(ctx0, pos_embd, 1, 2, 0, 3); // -> (n_embd, width, height)
  1443. pos_embd = ggml_cont_2d(ctx0, pos_embd, n_embd, width * height); // -> (n_embd, width * height)
  1444. return pos_embd;
  1445. }
  1446. // build vision transformer (ViT) cgraph
  1447. // this function should cover most of the models
  1448. // if your model has specific features, you should probably duplicate this function
  1449. ggml_tensor * build_vit(
  1450. ggml_tensor * inp,
  1451. int64_t n_pos,
  1452. norm_type norm_t,
  1453. ffn_op_type ffn_t,
  1454. ggml_tensor * learned_pos_embd,
  1455. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  1456. ) {
  1457. if (learned_pos_embd) {
  1458. inp = ggml_add(ctx0, inp, learned_pos_embd);
  1459. cb(inp, "pos_embed", -1);
  1460. }
  1461. ggml_tensor * inpL = inp;
  1462. // pre-layernorm
  1463. if (model.pre_ln_w) {
  1464. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  1465. cb(inpL, "pre_ln", -1);
  1466. }
  1467. // loop over layers
  1468. for (int il = 0; il < n_layer; il++) {
  1469. auto & layer = model.layers[il];
  1470. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  1471. // layernorm1
  1472. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  1473. cb(cur, "layer_inp_normed", il);
  1474. // self-attention
  1475. {
  1476. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1477. if (layer.q_b) {
  1478. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1479. }
  1480. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1481. if (layer.k_b) {
  1482. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1483. }
  1484. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1485. if (layer.v_b) {
  1486. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1487. }
  1488. if (layer.q_norm) {
  1489. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  1490. cb(Qcur, "Qcur_norm", il);
  1491. }
  1492. if (layer.k_norm) {
  1493. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  1494. cb(Kcur, "Kcur_norm", il);
  1495. }
  1496. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1497. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1498. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1499. cb(Qcur, "Qcur", il);
  1500. cb(Kcur, "Kcur", il);
  1501. cb(Vcur, "Vcur", il);
  1502. if (add_pos) {
  1503. Qcur = add_pos(Qcur, layer);
  1504. Kcur = add_pos(Kcur, layer);
  1505. cb(Qcur, "Qcur_pos", il);
  1506. cb(Kcur, "Kcur_pos", il);
  1507. }
  1508. cur = build_attn(layer.o_w, layer.o_b,
  1509. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1510. cb(cur, "attn_out", il);
  1511. }
  1512. if (layer.ls_1_w) {
  1513. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  1514. cb(cur, "attn_out_scaled", il);
  1515. }
  1516. // re-add the layer input, e.g., residual
  1517. cur = ggml_add(ctx0, cur, inpL);
  1518. inpL = cur; // inpL = residual, cur = hidden_states
  1519. cb(cur, "ffn_inp", il);
  1520. // layernorm2
  1521. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  1522. cb(cur, "ffn_inp_normed", il);
  1523. // ffn
  1524. cur = build_ffn(cur,
  1525. layer.ff_up_w, layer.ff_up_b,
  1526. layer.ff_gate_w, layer.ff_gate_b,
  1527. layer.ff_down_w, layer.ff_down_b,
  1528. ffn_t, il);
  1529. cb(cur, "ffn_out", il);
  1530. if (layer.ls_2_w) {
  1531. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  1532. cb(cur, "ffn_out_scaled", il);
  1533. }
  1534. // residual 2
  1535. cur = ggml_add(ctx0, inpL, cur);
  1536. cb(cur, "layer_out", il);
  1537. inpL = cur;
  1538. }
  1539. if (ctx->model.audio_has_avgpool()) {
  1540. ggml_tensor * cur = inpL;
  1541. cur = ggml_transpose(ctx0, cur);
  1542. cur = ggml_cont(ctx0, cur);
  1543. cur = ggml_pool_1d(ctx0, cur, GGML_OP_POOL_AVG, 2, 2, 0);
  1544. cur = ggml_transpose(ctx0, cur);
  1545. cur = ggml_cont(ctx0, cur);
  1546. inpL = cur;
  1547. }
  1548. // post-layernorm
  1549. if (model.post_ln_w) {
  1550. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  1551. }
  1552. return inpL;
  1553. }
  1554. // build the input after conv2d (inp_raw --> patches)
  1555. // returns tensor with shape [n_embd, n_patches]
  1556. ggml_tensor * build_inp() {
  1557. ggml_tensor * inp_raw = build_inp_raw();
  1558. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  1559. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  1560. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  1561. if (model.patch_bias) {
  1562. inp = ggml_add(ctx0, inp, model.patch_bias);
  1563. cb(inp, "patch_bias", -1);
  1564. }
  1565. return inp;
  1566. }
  1567. ggml_tensor * build_inp_raw(int channels = 3) {
  1568. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
  1569. ggml_set_name(inp_raw, "inp_raw");
  1570. ggml_set_input(inp_raw);
  1571. return inp_raw;
  1572. }
  1573. ggml_tensor * build_norm(
  1574. ggml_tensor * cur,
  1575. ggml_tensor * mw,
  1576. ggml_tensor * mb,
  1577. norm_type type,
  1578. float norm_eps,
  1579. int il) const {
  1580. cur = type == NORM_TYPE_RMS
  1581. ? ggml_rms_norm(ctx0, cur, norm_eps)
  1582. : ggml_norm(ctx0, cur, norm_eps);
  1583. if (mw || mb) {
  1584. cb(cur, "norm", il);
  1585. }
  1586. if (mw) {
  1587. cur = ggml_mul(ctx0, cur, mw);
  1588. if (mb) {
  1589. cb(cur, "norm_w", il);
  1590. }
  1591. }
  1592. if (mb) {
  1593. cur = ggml_add(ctx0, cur, mb);
  1594. }
  1595. return cur;
  1596. }
  1597. ggml_tensor * build_ffn(
  1598. ggml_tensor * cur,
  1599. ggml_tensor * up,
  1600. ggml_tensor * up_b,
  1601. ggml_tensor * gate,
  1602. ggml_tensor * gate_b,
  1603. ggml_tensor * down,
  1604. ggml_tensor * down_b,
  1605. ffn_op_type type_op,
  1606. int il) const {
  1607. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  1608. cb(tmp, "ffn_up", il);
  1609. if (up_b) {
  1610. tmp = ggml_add(ctx0, tmp, up_b);
  1611. cb(tmp, "ffn_up_b", il);
  1612. }
  1613. if (gate) {
  1614. cur = ggml_mul_mat(ctx0, gate, cur);
  1615. cb(cur, "ffn_gate", il);
  1616. if (gate_b) {
  1617. cur = ggml_add(ctx0, cur, gate_b);
  1618. cb(cur, "ffn_gate_b", il);
  1619. }
  1620. } else {
  1621. cur = tmp;
  1622. }
  1623. // we only support parallel ffn for now
  1624. switch (type_op) {
  1625. case FFN_SILU:
  1626. if (gate) {
  1627. cur = ggml_swiglu_split(ctx0, cur, tmp);
  1628. cb(cur, "ffn_swiglu", il);
  1629. } else {
  1630. cur = ggml_silu(ctx0, cur);
  1631. cb(cur, "ffn_silu", il);
  1632. } break;
  1633. case FFN_GELU:
  1634. if (gate) {
  1635. cur = ggml_geglu_split(ctx0, cur, tmp);
  1636. cb(cur, "ffn_geglu", il);
  1637. } else {
  1638. cur = ggml_gelu(ctx0, cur);
  1639. cb(cur, "ffn_gelu", il);
  1640. } break;
  1641. case FFN_GELU_ERF:
  1642. if (gate) {
  1643. cur = ggml_geglu_erf_split(ctx0, cur, tmp);
  1644. cb(cur, "ffn_geglu_erf", il);
  1645. } else {
  1646. cur = ggml_gelu_erf(ctx0, cur);
  1647. cb(cur, "ffn_gelu_erf", il);
  1648. } break;
  1649. case FFN_GELU_QUICK:
  1650. if (gate) {
  1651. cur = ggml_geglu_quick_split(ctx0, cur, tmp);
  1652. cb(cur, "ffn_geglu_quick", il);
  1653. } else {
  1654. cur = ggml_gelu_quick(ctx0, cur);
  1655. cb(cur, "ffn_gelu_quick", il);
  1656. } break;
  1657. }
  1658. if (down) {
  1659. cur = ggml_mul_mat(ctx0, down, cur);
  1660. }
  1661. if (down_b) {
  1662. cb(cur, "ffn_down", il);
  1663. }
  1664. if (down_b) {
  1665. cur = ggml_add(ctx0, cur, down_b);
  1666. }
  1667. return cur;
  1668. }
  1669. ggml_tensor * build_attn(
  1670. ggml_tensor * wo,
  1671. ggml_tensor * wo_b,
  1672. ggml_tensor * q_cur,
  1673. ggml_tensor * k_cur,
  1674. ggml_tensor * v_cur,
  1675. ggml_tensor * kq_mask,
  1676. float kq_scale,
  1677. int il) const {
  1678. // these nodes are added to the graph together so that they are not reordered
  1679. // by doing so, the number of splits in the graph is reduced
  1680. ggml_build_forward_expand(gf, q_cur);
  1681. ggml_build_forward_expand(gf, k_cur);
  1682. ggml_build_forward_expand(gf, v_cur);
  1683. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  1684. //cb(q, "q", il);
  1685. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  1686. //cb(k, "k", il);
  1687. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  1688. v = ggml_cont(ctx0, v);
  1689. //cb(k, "v", il);
  1690. ggml_tensor * cur;
  1691. // TODO @ngxson : support flash attention
  1692. {
  1693. const auto n_tokens = q->ne[1];
  1694. const auto n_head = q->ne[2];
  1695. // const auto n_kv = k->ne[1]; // for flash attention
  1696. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  1697. // F32 may not needed for vision encoders?
  1698. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  1699. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  1700. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  1701. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  1702. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  1703. }
  1704. cb(cur, "kqv_out", il);
  1705. if (wo) {
  1706. cur = ggml_mul_mat(ctx0, wo, cur);
  1707. }
  1708. if (wo_b) {
  1709. cur = ggml_add(ctx0, cur, wo_b);
  1710. }
  1711. return cur;
  1712. }
  1713. // implementation of the 2D RoPE without adding a new op in ggml
  1714. // this is not efficient (use double the memory), but works on all backends
  1715. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  1716. static ggml_tensor * build_rope_2d(
  1717. ggml_context * ctx0,
  1718. ggml_tensor * cur,
  1719. ggml_tensor * pos_a, // first half
  1720. ggml_tensor * pos_b, // second half
  1721. const float freq_base,
  1722. const bool interleave_freq
  1723. ) {
  1724. const int64_t n_dim = cur->ne[0];
  1725. const int64_t n_head = cur->ne[1];
  1726. const int64_t n_pos = cur->ne[2];
  1727. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  1728. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  1729. // first half of cur will use 1e-0, 1e-2 (even)
  1730. // second half of cur will use 1e-1, 1e-3 (odd)
  1731. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  1732. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  1733. // then for the second half, we use freq_scale to shift the inv_freq
  1734. // ^ why? replace (2i) with (2i+1) in the above equation
  1735. const float freq_scale_odd = interleave_freq
  1736. ? std::pow(freq_base, (float)-2/n_dim)
  1737. : 1.0;
  1738. // first half
  1739. ggml_tensor * first;
  1740. {
  1741. first = ggml_view_3d(ctx0, cur,
  1742. n_dim/2, n_head, n_pos,
  1743. ggml_row_size(cur->type, n_dim),
  1744. ggml_row_size(cur->type, n_dim*n_head),
  1745. 0);
  1746. first = ggml_rope_ext(
  1747. ctx0,
  1748. first,
  1749. pos_a, // positions
  1750. nullptr, // freq factors
  1751. n_dim/2, // n_dims
  1752. 0, 0, freq_base,
  1753. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  1754. );
  1755. }
  1756. // second half
  1757. ggml_tensor * second;
  1758. {
  1759. second = ggml_view_3d(ctx0, cur,
  1760. n_dim/2, n_head, n_pos,
  1761. ggml_row_size(cur->type, n_dim),
  1762. ggml_row_size(cur->type, n_dim*n_head),
  1763. n_dim/2 * ggml_element_size(cur));
  1764. second = ggml_rope_ext(
  1765. ctx0,
  1766. second,
  1767. pos_b, // positions
  1768. nullptr, // freq factors
  1769. n_dim/2, // n_dims
  1770. 0, 0, freq_base,
  1771. freq_scale_odd,
  1772. 0.0f, 1.0f, 0.0f, 0.0f
  1773. );
  1774. }
  1775. cur = ggml_concat(ctx0, first, second, 0);
  1776. return cur;
  1777. }
  1778. // aka pixel_shuffle / pixel_unshuffle / patch_merger (Kimi-VL)
  1779. // support dynamic resolution
  1780. ggml_tensor * build_patch_merge_permute(ggml_tensor * cur, int scale_factor) {
  1781. GGML_ASSERT(scale_factor > 1);
  1782. const int n_embd = cur->ne[0];
  1783. int width = img.nx / patch_size;
  1784. int height = img.ny / patch_size;
  1785. // pad width and height to factor
  1786. const int64_t pad_width = CLIP_ALIGN(width, scale_factor) - width;
  1787. const int64_t pad_height = CLIP_ALIGN(height, scale_factor) - height;
  1788. cur = ggml_reshape_3d(ctx0, cur, n_embd, width, height);
  1789. if (pad_width || pad_height) {
  1790. cur = ggml_pad(ctx0, cur, 0, pad_width, pad_height, 0);
  1791. width += pad_width;
  1792. height += pad_height;
  1793. }
  1794. // unshuffle h
  1795. cur = ggml_reshape_3d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height);
  1796. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1797. // unshuffle w
  1798. cur = ggml_cont_3d(ctx0, cur, n_embd * scale_factor * scale_factor, height / scale_factor, width / scale_factor);
  1799. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1800. cur = ggml_cont_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  1801. cb(cur, "pixel_shuffle", -1);
  1802. return cur;
  1803. }
  1804. };
  1805. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  1806. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  1807. clip_graph graph(ctx, *imgs.entries[0]);
  1808. ggml_cgraph * res;
  1809. switch (ctx->proj_type()) {
  1810. case PROJECTOR_TYPE_GEMMA3:
  1811. case PROJECTOR_TYPE_IDEFICS3:
  1812. case PROJECTOR_TYPE_LFM2:
  1813. {
  1814. res = graph.build_siglip();
  1815. } break;
  1816. case PROJECTOR_TYPE_PIXTRAL:
  1817. case PROJECTOR_TYPE_LIGHTONOCR:
  1818. {
  1819. res = graph.build_pixtral();
  1820. } break;
  1821. case PROJECTOR_TYPE_QWEN2VL:
  1822. case PROJECTOR_TYPE_QWEN25VL:
  1823. {
  1824. res = graph.build_qwen2vl();
  1825. } break;
  1826. case PROJECTOR_TYPE_MINICPMV:
  1827. {
  1828. res = graph.build_minicpmv();
  1829. } break;
  1830. case PROJECTOR_TYPE_INTERNVL:
  1831. {
  1832. res = graph.build_internvl();
  1833. } break;
  1834. case PROJECTOR_TYPE_LLAMA4:
  1835. {
  1836. res = graph.build_llama4();
  1837. } break;
  1838. case PROJECTOR_TYPE_ULTRAVOX:
  1839. case PROJECTOR_TYPE_VOXTRAL:
  1840. case PROJECTOR_TYPE_QWEN2A:
  1841. {
  1842. res = graph.build_whisper_enc();
  1843. } break;
  1844. case PROJECTOR_TYPE_KIMIVL:
  1845. {
  1846. res = graph.build_kimivl();
  1847. } break;
  1848. case PROJECTOR_TYPE_COGVLM:
  1849. {
  1850. res = graph.build_cogvlm();
  1851. } break;
  1852. default:
  1853. {
  1854. res = graph.build_llava();
  1855. } break;
  1856. }
  1857. return res;
  1858. }
  1859. struct clip_model_loader {
  1860. ggml_context_ptr ctx_meta;
  1861. gguf_context_ptr ctx_gguf;
  1862. std::string fname;
  1863. size_t model_size = 0; // in bytes
  1864. bool has_vision = false;
  1865. bool has_audio = false;
  1866. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
  1867. clip_model_loader(const char * fname) : fname(fname) {
  1868. struct ggml_context * meta = nullptr;
  1869. struct gguf_init_params params = {
  1870. /*.no_alloc = */ true,
  1871. /*.ctx = */ &meta,
  1872. };
  1873. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  1874. if (!ctx_gguf.get()) {
  1875. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  1876. }
  1877. ctx_meta.reset(meta);
  1878. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  1879. // print gguf info
  1880. {
  1881. std::string name;
  1882. get_string(KEY_NAME, name, false);
  1883. std::string description;
  1884. get_string(KEY_DESCRIPTION, description, false);
  1885. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  1886. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  1887. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  1888. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  1889. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  1890. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  1891. LOG_INF("\n");
  1892. }
  1893. // modalities
  1894. {
  1895. get_bool(KEY_HAS_VISION_ENC, has_vision, false);
  1896. get_bool(KEY_HAS_AUDIO_ENC, has_audio, false);
  1897. if (has_vision) {
  1898. LOG_INF("%s: has vision encoder\n", __func__);
  1899. }
  1900. if (has_audio) {
  1901. LOG_INF("%s: has audio encoder\n", __func__);
  1902. }
  1903. }
  1904. // tensors
  1905. {
  1906. for (int i = 0; i < n_tensors; ++i) {
  1907. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1908. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  1909. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  1910. ggml_tensor * cur = ggml_get_tensor(meta, name);
  1911. size_t tensor_size = ggml_nbytes(cur);
  1912. model_size += tensor_size;
  1913. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  1914. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  1915. }
  1916. }
  1917. }
  1918. void load_hparams(clip_model & model, clip_modality modality) {
  1919. auto & hparams = model.hparams;
  1920. std::string log_ffn_op; // for logging
  1921. // sanity check
  1922. if (modality == CLIP_MODALITY_VISION) {
  1923. GGML_ASSERT(has_vision);
  1924. } else if (modality == CLIP_MODALITY_AUDIO) {
  1925. GGML_ASSERT(has_audio);
  1926. }
  1927. model.modality = modality;
  1928. // projector type
  1929. std::string proj_type;
  1930. {
  1931. // default key
  1932. get_string(KEY_PROJ_TYPE, proj_type, false);
  1933. // for models with mixed modalities
  1934. if (proj_type.empty()) {
  1935. if (modality == CLIP_MODALITY_VISION) {
  1936. get_string(KEY_VISION_PROJ_TYPE, proj_type, false);
  1937. } else if (modality == CLIP_MODALITY_AUDIO) {
  1938. get_string(KEY_AUDIO_PROJ_TYPE, proj_type, false);
  1939. } else {
  1940. GGML_ABORT("unknown modality");
  1941. }
  1942. }
  1943. model.proj_type = clip_projector_type_from_string(proj_type);
  1944. if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  1945. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  1946. }
  1947. // correct arch for multimodal models (legacy method)
  1948. if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
  1949. model.proj_type = modality == CLIP_MODALITY_VISION
  1950. ? PROJECTOR_TYPE_QWEN25VL
  1951. : PROJECTOR_TYPE_QWEN2A;
  1952. }
  1953. }
  1954. const bool is_vision = model.modality == CLIP_MODALITY_VISION;
  1955. const bool is_audio = model.modality == CLIP_MODALITY_AUDIO;
  1956. // other hparams
  1957. {
  1958. const char * prefix = is_vision ? "vision" : "audio";
  1959. get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
  1960. get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
  1961. get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
  1962. get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
  1963. get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
  1964. get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
  1965. if (is_vision) {
  1966. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  1967. get_u32(KEY_PREPROC_IMAGE_SIZE, hparams.preproc_image_size, false);
  1968. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  1969. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  1970. get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
  1971. get_u32(KEY_MINICPMV_QUERY_NUM, hparams.minicpmv_query_num, false);
  1972. if (hparams.minicpmv_query_num == 0) {
  1973. // Fallback to hardcoded values for legacy models
  1974. if (hparams.minicpmv_version == 3) {
  1975. hparams.minicpmv_query_num = 64;
  1976. } else if (hparams.minicpmv_version == 4) {
  1977. hparams.minicpmv_query_num = 64;
  1978. } else if (hparams.minicpmv_version == 5) {
  1979. hparams.minicpmv_query_num = 64;
  1980. } else if (hparams.minicpmv_version == 6) {
  1981. hparams.minicpmv_query_num = 64;
  1982. } else {
  1983. hparams.minicpmv_query_num = 96;
  1984. }
  1985. }
  1986. } else if (is_audio) {
  1987. get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
  1988. } else {
  1989. GGML_ASSERT(false && "unknown modality");
  1990. }
  1991. // for pinpoints, we need to convert it into a list of resolution candidates
  1992. {
  1993. std::vector<int> pinpoints;
  1994. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
  1995. if (!pinpoints.empty()) {
  1996. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  1997. hparams.image_res_candidates.push_back({
  1998. pinpoints[i],
  1999. pinpoints[i+1],
  2000. });
  2001. }
  2002. }
  2003. }
  2004. // default warmup value
  2005. hparams.warmup_image_size = hparams.image_size;
  2006. hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
  2007. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  2008. || model.proj_type == PROJECTOR_TYPE_LDP
  2009. || model.proj_type == PROJECTOR_TYPE_LDPV2;
  2010. {
  2011. bool use_gelu = false;
  2012. bool use_silu = false;
  2013. get_bool(KEY_USE_GELU, use_gelu, false);
  2014. get_bool(KEY_USE_SILU, use_silu, false);
  2015. if (use_gelu && use_silu) {
  2016. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  2017. }
  2018. if (use_gelu) {
  2019. hparams.ffn_op = FFN_GELU;
  2020. log_ffn_op = "gelu";
  2021. } else if (use_silu) {
  2022. hparams.ffn_op = FFN_SILU;
  2023. log_ffn_op = "silu";
  2024. } else {
  2025. hparams.ffn_op = FFN_GELU_QUICK;
  2026. log_ffn_op = "gelu_quick";
  2027. }
  2028. }
  2029. {
  2030. std::string mm_patch_merge_type;
  2031. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  2032. if (mm_patch_merge_type == "spatial_unpad") {
  2033. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  2034. }
  2035. }
  2036. if (is_vision) {
  2037. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  2038. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  2039. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  2040. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  2041. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  2042. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  2043. for (int i = 0; i < 3; ++i) {
  2044. hparams.image_mean[i] = mean_data[i];
  2045. hparams.image_std[i] = std_data[i];
  2046. }
  2047. }
  2048. // Load the vision feature layer indices if they are explicitly provided;
  2049. // if multiple vision feature layers are present, the values will be concatenated
  2050. // to form the final visual features.
  2051. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  2052. // be non-negative, since we use -1 to mark values as unset here.
  2053. std::vector<int> vision_feature_layer;
  2054. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  2055. // convert std::vector to std::unordered_set
  2056. for (auto & layer : vision_feature_layer) {
  2057. hparams.vision_feature_layer.insert(layer);
  2058. }
  2059. // model-specific params
  2060. switch (model.proj_type) {
  2061. case PROJECTOR_TYPE_MINICPMV:
  2062. {
  2063. if (hparams.minicpmv_version == 0) {
  2064. hparams.minicpmv_version = 2; // default to 2 if not set
  2065. }
  2066. } break;
  2067. case PROJECTOR_TYPE_IDEFICS3:
  2068. case PROJECTOR_TYPE_LFM2:
  2069. case PROJECTOR_TYPE_INTERNVL:
  2070. {
  2071. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  2072. } break;
  2073. case PROJECTOR_TYPE_PIXTRAL:
  2074. case PROJECTOR_TYPE_LIGHTONOCR:
  2075. {
  2076. hparams.rope_theta = 10000.0f;
  2077. hparams.warmup_image_size = hparams.patch_size * 8;
  2078. // Mistral Small 2506 needs 1024x1024 image size cap to prevent OOM
  2079. // ref: https://github.com/ggml-org/llama.cpp/issues/14310
  2080. hparams.image_size = 1024;
  2081. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false);
  2082. } break;
  2083. case PROJECTOR_TYPE_KIMIVL:
  2084. {
  2085. hparams.rope_theta = 10000.0f;
  2086. hparams.warmup_image_size = hparams.patch_size * 8;
  2087. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  2088. } break;
  2089. case PROJECTOR_TYPE_GEMMA3:
  2090. {
  2091. // default value (used by all model sizes in gemma 3 family)
  2092. // number of patches for each **side** is reduced by a factor of 4
  2093. hparams.proj_scale_factor = 4;
  2094. // test model (tinygemma3) has a different value, we optionally read it
  2095. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  2096. } break;
  2097. case PROJECTOR_TYPE_QWEN2VL:
  2098. {
  2099. // max image size = sqrt(max_pixels) = 3584
  2100. // ref: https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct/blob/main/preprocessor_config.json
  2101. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  2102. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  2103. hparams.image_size = 1024;
  2104. hparams.warmup_image_size = hparams.patch_size * 8;
  2105. } break;
  2106. case PROJECTOR_TYPE_QWEN25VL:
  2107. {
  2108. // max image size = sqrt(max_pixels)
  2109. // https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  2110. // however, the model use unreasonable memory past 1024 size, we force it to 1024 otherwise it's unusable
  2111. // ref: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct/discussions/10
  2112. hparams.image_size = 1024;
  2113. hparams.warmup_image_size = hparams.patch_size * 8;
  2114. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
  2115. } break;
  2116. case PROJECTOR_TYPE_LLAMA4:
  2117. {
  2118. hparams.rope_theta = 10000.0f;
  2119. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor);
  2120. set_llava_uhd_res_candidates(model, 3);
  2121. } break;
  2122. case PROJECTOR_TYPE_ULTRAVOX:
  2123. case PROJECTOR_TYPE_QWEN2A:
  2124. case PROJECTOR_TYPE_VOXTRAL:
  2125. {
  2126. bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX ||
  2127. model.proj_type == PROJECTOR_TYPE_VOXTRAL;
  2128. get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
  2129. if (hparams.n_mel_bins != 128) {
  2130. throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
  2131. }
  2132. hparams.ffn_op = FFN_GELU_ERF;
  2133. log_ffn_op = "gelu_erf"; // temporary solution for logging
  2134. } break;
  2135. default:
  2136. break;
  2137. }
  2138. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  2139. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  2140. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  2141. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  2142. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  2143. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  2144. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  2145. if (is_vision) {
  2146. LOG_INF("\n--- vision hparams ---\n");
  2147. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  2148. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  2149. LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector);
  2150. LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version);
  2151. LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor);
  2152. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  2153. } else if (is_audio) {
  2154. LOG_INF("\n--- audio hparams ---\n");
  2155. LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
  2156. LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
  2157. }
  2158. LOG_INF("\n");
  2159. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  2160. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  2161. }
  2162. }
  2163. void load_tensors(clip_ctx & ctx_clip) {
  2164. auto & model = ctx_clip.model;
  2165. auto & hparams = model.hparams;
  2166. std::map<std::string, size_t> tensor_offset;
  2167. std::vector<ggml_tensor *> tensors_to_load;
  2168. // TODO @ngxson : support both audio and video in the future
  2169. const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
  2170. // get offsets
  2171. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  2172. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  2173. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  2174. }
  2175. // create data context
  2176. struct ggml_init_params params = {
  2177. /*.mem_size =*/ static_cast<size_t>(gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  2178. /*.mem_buffer =*/ NULL,
  2179. /*.no_alloc =*/ true,
  2180. };
  2181. ctx_clip.ctx_data.reset(ggml_init(params));
  2182. if (!ctx_clip.ctx_data) {
  2183. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  2184. }
  2185. // helper function
  2186. auto get_tensor = [&](const std::string & name, bool required = true) {
  2187. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  2188. if (!cur && required) {
  2189. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  2190. }
  2191. if (cur) {
  2192. tensors_to_load.push_back(cur);
  2193. // add tensors to context
  2194. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  2195. ggml_set_name(data_tensor, cur->name);
  2196. cur = data_tensor;
  2197. }
  2198. return cur;
  2199. };
  2200. model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  2201. model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
  2202. model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
  2203. model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
  2204. model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
  2205. model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  2206. model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  2207. model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  2208. model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
  2209. // layers
  2210. model.layers.resize(hparams.n_layer);
  2211. for (int il = 0; il < hparams.n_layer; ++il) {
  2212. auto & layer = model.layers[il];
  2213. layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"), false);
  2214. layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"), false);
  2215. layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"), false);
  2216. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
  2217. layer.qkv_w = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "weight"), false);
  2218. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
  2219. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
  2220. layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
  2221. layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
  2222. layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
  2223. layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
  2224. layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
  2225. layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
  2226. layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
  2227. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
  2228. layer.qkv_b = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "bias"), false);
  2229. layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
  2230. layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
  2231. // ffn
  2232. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
  2233. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
  2234. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
  2235. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
  2236. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
  2237. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
  2238. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  2239. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  2240. bool is_ffn_swapped = (
  2241. // only old models need this fix
  2242. model.proj_type == PROJECTOR_TYPE_MLP
  2243. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  2244. || model.proj_type == PROJECTOR_TYPE_LDP
  2245. || model.proj_type == PROJECTOR_TYPE_LDPV2
  2246. || model.proj_type == PROJECTOR_TYPE_QWEN2VL
  2247. || model.proj_type == PROJECTOR_TYPE_QWEN25VL
  2248. || model.proj_type == PROJECTOR_TYPE_GLM_EDGE
  2249. || model.proj_type == PROJECTOR_TYPE_GEMMA3
  2250. || model.proj_type == PROJECTOR_TYPE_IDEFICS3
  2251. || model.proj_type == PROJECTOR_TYPE_MINICPMV
  2252. ) && layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd;
  2253. if (is_ffn_swapped) {
  2254. // swap up and down weights
  2255. ggml_tensor * tmp = layer.ff_up_w;
  2256. layer.ff_up_w = layer.ff_down_w;
  2257. layer.ff_down_w = tmp;
  2258. // swap up and down biases
  2259. tmp = layer.ff_up_b;
  2260. layer.ff_up_b = layer.ff_down_b;
  2261. layer.ff_down_b = tmp;
  2262. if (il == 0) {
  2263. LOG_WRN("%s: ffn up/down are swapped\n", __func__);
  2264. }
  2265. }
  2266. }
  2267. switch (model.proj_type) {
  2268. case PROJECTOR_TYPE_MLP:
  2269. case PROJECTOR_TYPE_MLP_NORM:
  2270. {
  2271. // LLaVA projection
  2272. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  2273. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  2274. // Yi-type llava
  2275. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  2276. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2277. // missing in Yi-type llava
  2278. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  2279. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2280. // Yi-type llava
  2281. model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  2282. model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  2283. model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  2284. model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  2285. if (model.mm_3_w) {
  2286. // TODO: this is a hack to support Yi-type llava
  2287. model.proj_type = PROJECTOR_TYPE_MLP_NORM;
  2288. }
  2289. model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  2290. } break;
  2291. case PROJECTOR_TYPE_LDP:
  2292. {
  2293. // MobileVLM projection
  2294. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2295. model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2296. model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2297. model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2298. model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  2299. model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  2300. model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  2301. model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  2302. model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  2303. model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  2304. model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  2305. model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  2306. model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  2307. model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  2308. model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  2309. model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  2310. model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  2311. model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  2312. model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  2313. model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  2314. model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  2315. model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  2316. model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  2317. model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  2318. } break;
  2319. case PROJECTOR_TYPE_LDPV2:
  2320. {
  2321. // MobilVLM_V2 projection
  2322. model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2323. model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2324. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2325. model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  2326. model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  2327. model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  2328. } break;
  2329. case PROJECTOR_TYPE_MINICPMV:
  2330. {
  2331. // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  2332. model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  2333. model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  2334. model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  2335. model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  2336. model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  2337. model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  2338. model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  2339. model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  2340. model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  2341. model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  2342. model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  2343. model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  2344. model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  2345. model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  2346. model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  2347. model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  2348. model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  2349. model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  2350. } break;
  2351. case PROJECTOR_TYPE_GLM_EDGE:
  2352. {
  2353. model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  2354. model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  2355. model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  2356. model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  2357. model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  2358. model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  2359. model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  2360. model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  2361. model.mm_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  2362. model.mm_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  2363. } break;
  2364. case PROJECTOR_TYPE_QWEN2VL:
  2365. case PROJECTOR_TYPE_QWEN25VL:
  2366. {
  2367. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  2368. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  2369. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2370. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2371. } break;
  2372. case PROJECTOR_TYPE_GEMMA3:
  2373. {
  2374. model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  2375. model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  2376. } break;
  2377. case PROJECTOR_TYPE_IDEFICS3:
  2378. {
  2379. model.projection = get_tensor(TN_MM_PROJECTOR);
  2380. } break;
  2381. case PROJECTOR_TYPE_LFM2:
  2382. case PROJECTOR_TYPE_KIMIVL:
  2383. {
  2384. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
  2385. model.mm_input_norm_b = get_tensor(TN_MM_INP_NORM_B);
  2386. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2387. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  2388. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2389. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2390. } break;
  2391. case PROJECTOR_TYPE_PIXTRAL:
  2392. {
  2393. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2394. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2395. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2396. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2397. // [IMG_BREAK] token embedding
  2398. model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  2399. // for mistral small 3.1
  2400. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  2401. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  2402. } break;
  2403. case PROJECTOR_TYPE_LIGHTONOCR:
  2404. {
  2405. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2406. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2407. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2408. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2409. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  2410. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  2411. } break;
  2412. case PROJECTOR_TYPE_ULTRAVOX:
  2413. {
  2414. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2415. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2416. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2417. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2418. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  2419. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  2420. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  2421. model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
  2422. } break;
  2423. case PROJECTOR_TYPE_QWEN2A:
  2424. {
  2425. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2426. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2427. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2428. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2429. model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
  2430. model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
  2431. } break;
  2432. case PROJECTOR_TYPE_VOXTRAL:
  2433. {
  2434. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2435. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2436. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2437. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2438. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  2439. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  2440. } break;
  2441. case PROJECTOR_TYPE_INTERNVL:
  2442. {
  2443. model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2444. model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2445. model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2446. model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2447. model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2448. model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2449. } break;
  2450. case PROJECTOR_TYPE_LLAMA4:
  2451. {
  2452. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  2453. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2454. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2455. } break;
  2456. case PROJECTOR_TYPE_COGVLM:
  2457. {
  2458. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  2459. model.mm_post_fc_norm_w = get_tensor(string_format(TN_MM_POST_FC_NORM, "weight"));
  2460. model.mm_post_fc_norm_b = get_tensor(string_format(TN_MM_POST_FC_NORM, "bias"));
  2461. model.mm_h_to_4h_w = get_tensor(string_format(TN_MM_H_TO_4H, "weight"));
  2462. model.mm_gate_w = get_tensor(string_format(TN_MM_GATE, "weight"));
  2463. model.mm_4h_to_h_w = get_tensor(string_format(TN_MM_4H_TO_H, "weight"));
  2464. model.mm_boi = get_tensor(TN_TOK_BOI);
  2465. model.mm_eoi = get_tensor(TN_TOK_EOI);
  2466. } break;
  2467. default:
  2468. GGML_ASSERT(false && "unknown projector type");
  2469. }
  2470. // load data
  2471. {
  2472. std::vector<uint8_t> read_buf;
  2473. auto fin = std::ifstream(fname, std::ios::binary);
  2474. if (!fin) {
  2475. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  2476. }
  2477. // alloc memory and offload data
  2478. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  2479. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  2480. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  2481. for (auto & t : tensors_to_load) {
  2482. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  2483. const size_t offset = tensor_offset[t->name];
  2484. fin.seekg(offset, std::ios::beg);
  2485. if (!fin) {
  2486. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  2487. }
  2488. size_t num_bytes = ggml_nbytes(cur);
  2489. if (ggml_backend_buft_is_host(buft)) {
  2490. // for the CPU and Metal backend, we can read directly into the tensor
  2491. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  2492. } else {
  2493. // read into a temporary buffer first, then copy to device memory
  2494. read_buf.resize(num_bytes);
  2495. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  2496. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  2497. }
  2498. }
  2499. fin.close();
  2500. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  2501. }
  2502. }
  2503. void alloc_compute_meta(clip_ctx & ctx_clip) {
  2504. const auto & hparams = ctx_clip.model.hparams;
  2505. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  2506. // create a fake batch
  2507. clip_image_f32_batch batch;
  2508. clip_image_f32_ptr img(clip_image_f32_init());
  2509. if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
  2510. img->nx = hparams.warmup_image_size;
  2511. img->ny = hparams.warmup_image_size;
  2512. } else {
  2513. img->nx = hparams.warmup_audio_size;
  2514. img->ny = hparams.n_mel_bins;
  2515. }
  2516. batch.entries.push_back(std::move(img));
  2517. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  2518. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  2519. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  2520. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  2521. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  2522. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  2523. if (size > 1) {
  2524. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  2525. ggml_backend_buft_name(buft),
  2526. size / 1024.0 / 1024.0);
  2527. }
  2528. }
  2529. }
  2530. void get_bool(const std::string & key, bool & output, bool required = true) {
  2531. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2532. if (i < 0) {
  2533. if (required) throw std::runtime_error("Key not found: " + key);
  2534. return;
  2535. }
  2536. output = gguf_get_val_bool(ctx_gguf.get(), i);
  2537. }
  2538. void get_i32(const std::string & key, int & output, bool required = true) {
  2539. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2540. if (i < 0) {
  2541. if (required) throw std::runtime_error("Key not found: " + key);
  2542. return;
  2543. }
  2544. output = gguf_get_val_i32(ctx_gguf.get(), i);
  2545. }
  2546. void get_u32(const std::string & key, int & output, bool required = true) {
  2547. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2548. if (i < 0) {
  2549. if (required) throw std::runtime_error("Key not found: " + key);
  2550. return;
  2551. }
  2552. output = gguf_get_val_u32(ctx_gguf.get(), i);
  2553. }
  2554. void get_f32(const std::string & key, float & output, bool required = true) {
  2555. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2556. if (i < 0) {
  2557. if (required) throw std::runtime_error("Key not found: " + key);
  2558. return;
  2559. }
  2560. output = gguf_get_val_f32(ctx_gguf.get(), i);
  2561. }
  2562. void get_string(const std::string & key, std::string & output, bool required = true) {
  2563. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2564. if (i < 0) {
  2565. if (required) throw std::runtime_error("Key not found: " + key);
  2566. return;
  2567. }
  2568. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  2569. }
  2570. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
  2571. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2572. if (i < 0) {
  2573. if (required) throw std::runtime_error("Key not found: " + key);
  2574. return;
  2575. }
  2576. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  2577. output.resize(n);
  2578. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  2579. for (int i = 0; i < n; ++i) {
  2580. output[i] = values[i];
  2581. }
  2582. }
  2583. void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
  2584. auto & hparams = model.hparams;
  2585. for (int x = 1; x <= max_patches_per_side; x++) {
  2586. for (int y = 1; y <= max_patches_per_side; y++) {
  2587. if (x == 1 && y == 1) {
  2588. continue; // skip the first point
  2589. }
  2590. hparams.image_res_candidates.push_back(clip_image_size{
  2591. x*hparams.image_size,
  2592. y*hparams.image_size,
  2593. });
  2594. }
  2595. }
  2596. }
  2597. };
  2598. struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
  2599. g_logger_state.verbosity_thold = ctx_params.verbosity;
  2600. clip_ctx * ctx_vision = nullptr;
  2601. clip_ctx * ctx_audio = nullptr;
  2602. try {
  2603. clip_model_loader loader(fname);
  2604. if (loader.has_vision) {
  2605. ctx_vision = new clip_ctx(ctx_params);
  2606. loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
  2607. loader.load_tensors(*ctx_vision);
  2608. loader.alloc_compute_meta(*ctx_vision);
  2609. }
  2610. if (loader.has_audio) {
  2611. ctx_audio = new clip_ctx(ctx_params);
  2612. loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
  2613. loader.load_tensors(*ctx_audio);
  2614. loader.alloc_compute_meta(*ctx_audio);
  2615. }
  2616. } catch (const std::exception & e) {
  2617. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  2618. if (ctx_vision) {
  2619. delete ctx_vision;
  2620. }
  2621. if (ctx_audio) {
  2622. delete ctx_audio;
  2623. }
  2624. return {nullptr, nullptr};
  2625. }
  2626. return {ctx_vision, ctx_audio};
  2627. }
  2628. struct clip_image_size * clip_image_size_init() {
  2629. struct clip_image_size * load_image_size = new struct clip_image_size();
  2630. load_image_size->width = 448;
  2631. load_image_size->height = 448;
  2632. return load_image_size;
  2633. }
  2634. struct clip_image_u8 * clip_image_u8_init() {
  2635. return new clip_image_u8();
  2636. }
  2637. struct clip_image_f32 * clip_image_f32_init() {
  2638. return new clip_image_f32();
  2639. }
  2640. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  2641. return new clip_image_f32_batch();
  2642. }
  2643. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  2644. if (nx) *nx = img->nx;
  2645. if (ny) *ny = img->ny;
  2646. return img->buf.data();
  2647. }
  2648. void clip_image_size_free(struct clip_image_size * load_image_size) {
  2649. if (load_image_size == nullptr) {
  2650. return;
  2651. }
  2652. delete load_image_size;
  2653. }
  2654. void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
  2655. void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
  2656. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
  2657. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
  2658. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  2659. return batch->entries.size();
  2660. }
  2661. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  2662. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2663. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2664. return 0;
  2665. }
  2666. return batch->entries[idx]->nx;
  2667. }
  2668. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  2669. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2670. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2671. return 0;
  2672. }
  2673. return batch->entries[idx]->ny;
  2674. }
  2675. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  2676. if (idx < 0 || idx >= (int)batch->entries.size()) {
  2677. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  2678. return nullptr;
  2679. }
  2680. return batch->entries[idx].get();
  2681. }
  2682. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  2683. img->nx = nx;
  2684. img->ny = ny;
  2685. img->buf.resize(3 * nx * ny);
  2686. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  2687. }
  2688. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  2689. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  2690. dst.nx = src.nx;
  2691. dst.ny = src.ny;
  2692. dst.buf.resize(src.buf.size());
  2693. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  2694. for (size_t i = 0; i < src.buf.size(); ++i) {
  2695. int c = i % 3; // rgb
  2696. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  2697. }
  2698. }
  2699. // set of tools to manupulate images
  2700. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  2701. struct image_manipulation {
  2702. // Bilinear resize function
  2703. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  2704. dst.nx = target_width;
  2705. dst.ny = target_height;
  2706. dst.buf.resize(3 * target_width * target_height);
  2707. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  2708. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  2709. for (int y = 0; y < target_height; y++) {
  2710. for (int x = 0; x < target_width; x++) {
  2711. float px = x_ratio * x;
  2712. float py = y_ratio * y;
  2713. int x_floor = static_cast<int>(px);
  2714. int y_floor = static_cast<int>(py);
  2715. float x_lerp = px - x_floor;
  2716. float y_lerp = py - y_floor;
  2717. for (int c = 0; c < 3; c++) {
  2718. float top = lerp(
  2719. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  2720. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  2721. x_lerp
  2722. );
  2723. float bottom = lerp(
  2724. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  2725. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  2726. x_lerp
  2727. );
  2728. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  2729. }
  2730. }
  2731. }
  2732. }
  2733. // Bicubic resize function
  2734. // part of image will be cropped if the aspect ratio is different
  2735. static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  2736. const int nx = img.nx;
  2737. const int ny = img.ny;
  2738. dst.nx = target_width;
  2739. dst.ny = target_height;
  2740. dst.buf.resize(3 * target_width * target_height);
  2741. float Cc;
  2742. float C[5] = {};
  2743. float d0, d2, d3, a0, a1, a2, a3;
  2744. int i, j, k, jj;
  2745. int x, y;
  2746. float dx, dy;
  2747. float tx, ty;
  2748. tx = (float)nx / (float)target_width;
  2749. ty = (float)ny / (float)target_height;
  2750. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  2751. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  2752. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  2753. for (i = 0; i < target_height; i++) {
  2754. for (j = 0; j < target_width; j++) {
  2755. x = (int)(tx * j);
  2756. y = (int)(ty * i);
  2757. dx = tx * j - x;
  2758. dy = ty * i - y;
  2759. for (k = 0; k < 3; k++) {
  2760. for (jj = 0; jj <= 3; jj++) {
  2761. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2762. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2763. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2764. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  2765. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2766. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2767. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2768. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  2769. d0 = C[0] - C[1];
  2770. d2 = C[2] - C[1];
  2771. d3 = C[3] - C[1];
  2772. a0 = C[1];
  2773. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  2774. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  2775. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  2776. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  2777. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  2778. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  2779. }
  2780. }
  2781. }
  2782. }
  2783. return true;
  2784. }
  2785. // llava-1.6 type of resize_and_pad
  2786. // if the ratio is not 1:1, padding with pad_color will be applied
  2787. // pad_color is single channel, default is 0 (black)
  2788. static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  2789. int target_width = target_resolution.width;
  2790. int target_height = target_resolution.height;
  2791. float scale_w = static_cast<float>(target_width) / image.nx;
  2792. float scale_h = static_cast<float>(target_height) / image.ny;
  2793. int new_width, new_height;
  2794. if (scale_w < scale_h) {
  2795. new_width = target_width;
  2796. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  2797. } else {
  2798. new_height = target_height;
  2799. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  2800. }
  2801. clip_image_u8 resized_image;
  2802. bicubic_resize(image, resized_image, new_width, new_height);
  2803. clip_image_u8 padded_image;
  2804. padded_image.nx = target_width;
  2805. padded_image.ny = target_height;
  2806. padded_image.buf.resize(3 * target_width * target_height);
  2807. // Fill the padded image with the fill color
  2808. for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
  2809. padded_image.buf[i] = pad_color[0];
  2810. padded_image.buf[i + 1] = pad_color[1];
  2811. padded_image.buf[i + 2] = pad_color[2];
  2812. }
  2813. // Calculate padding offsets
  2814. int pad_x = (target_width - new_width) / 2;
  2815. int pad_y = (target_height - new_height) / 2;
  2816. // Copy the resized image into the center of the padded buffer
  2817. for (int y = 0; y < new_height; ++y) {
  2818. for (int x = 0; x < new_width; ++x) {
  2819. for (int c = 0; c < 3; ++c) {
  2820. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  2821. }
  2822. }
  2823. }
  2824. dst = std::move(padded_image);
  2825. }
  2826. static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  2827. dst.nx = w;
  2828. dst.ny = h;
  2829. dst.buf.resize(3 * w * h);
  2830. for (int i = 0; i < h; ++i) {
  2831. for (int j = 0; j < w; ++j) {
  2832. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  2833. int dst_idx = 3 * (i*w + j);
  2834. dst.buf[dst_idx] = image.buf[src_idx];
  2835. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  2836. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  2837. }
  2838. }
  2839. }
  2840. // calculate the size of the **resized** image, while preserving the aspect ratio
  2841. // the calculated size will be aligned to the nearest multiple of align_size
  2842. // if H or W size is larger than max_dimension, it will be resized to max_dimension
  2843. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int max_dimension) {
  2844. if (inp_size.width <= 0 || inp_size.height <= 0 || align_size <= 0 || max_dimension <= 0) {
  2845. return {0, 0};
  2846. }
  2847. float scale = std::min(static_cast<float>(max_dimension) / inp_size.width,
  2848. static_cast<float>(max_dimension) / inp_size.height);
  2849. float target_width_f = static_cast<float>(inp_size.width) * scale;
  2850. float target_height_f = static_cast<float>(inp_size.height) * scale;
  2851. int aligned_width = CLIP_ALIGN((int)target_width_f, align_size);
  2852. int aligned_height = CLIP_ALIGN((int)target_height_f, align_size);
  2853. return {aligned_width, aligned_height};
  2854. }
  2855. private:
  2856. static inline int clip(int x, int lower, int upper) {
  2857. return std::max(lower, std::min(x, upper));
  2858. }
  2859. // Linear interpolation between two points
  2860. static inline float lerp(float s, float e, float t) {
  2861. return s + (e - s) * t;
  2862. }
  2863. };
  2864. /**
  2865. * implementation of LLaVA-UHD:
  2866. * - https://arxiv.org/pdf/2403.11703
  2867. * - https://github.com/thunlp/LLaVA-UHD
  2868. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  2869. *
  2870. * overview:
  2871. * - an image always have a single overview (downscaled image)
  2872. * - an image can have 0 or multiple slices, depending on the image size
  2873. * - each slice can then be considered as a separate image
  2874. *
  2875. * for example:
  2876. *
  2877. * [overview] --> [slice 1] --> [slice 2]
  2878. * | |
  2879. * +--> [slice 3] --> [slice 4]
  2880. */
  2881. struct llava_uhd {
  2882. struct slice_coordinates {
  2883. int x;
  2884. int y;
  2885. clip_image_size size;
  2886. };
  2887. struct slice_instructions {
  2888. clip_image_size overview_size; // size of downscaled image
  2889. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2890. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2891. std::vector<slice_coordinates> slices;
  2892. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2893. };
  2894. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2895. slice_instructions res;
  2896. const int patch_size = clip_get_patch_size(ctx);
  2897. const int slice_size = clip_get_image_size(ctx);
  2898. const int original_width = original_size.width;
  2899. const int original_height = original_size.height;
  2900. const bool has_slices = original_size.width > slice_size || original_size.height > slice_size;
  2901. const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
  2902. if (!has_slices) {
  2903. // skip slicing logic
  2904. res.overview_size = clip_image_size{slice_size, slice_size};
  2905. res.refined_size = clip_image_size{0, 0};
  2906. res.grid_size = clip_image_size{0, 0};
  2907. return res;
  2908. }
  2909. if (has_pinpoints) {
  2910. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2911. auto refine_size = llava_uhd::select_best_resolution(
  2912. original_size,
  2913. ctx->model.hparams.image_res_candidates);
  2914. res.overview_size = clip_image_size{slice_size, slice_size};
  2915. res.refined_size = refine_size;
  2916. res.grid_size = clip_image_size{0, 0};
  2917. res.padding_refined = true;
  2918. LOG_DBG("%s: using pinpoints for slicing\n", __func__);
  2919. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
  2920. __func__, original_width, original_height,
  2921. res.overview_size.width, res.overview_size.height,
  2922. res.refined_size.width, res.refined_size.height);
  2923. for (int y = 0; y < refine_size.height; y += slice_size) {
  2924. for (int x = 0; x < refine_size.width; x += slice_size) {
  2925. slice_coordinates slice;
  2926. slice.x = x;
  2927. slice.y = y;
  2928. slice.size.width = std::min(slice_size, refine_size.width - x);
  2929. slice.size.height = std::min(slice_size, refine_size.height - y);
  2930. res.slices.push_back(slice);
  2931. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2932. __func__, (int)res.slices.size() - 1,
  2933. slice.x, slice.y, slice.size.width, slice.size.height);
  2934. }
  2935. }
  2936. res.grid_size.height = refine_size.height / slice_size;
  2937. res.grid_size.width = refine_size.width / slice_size;
  2938. LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
  2939. return res;
  2940. }
  2941. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2942. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  2943. res.overview_size = best_size;
  2944. {
  2945. const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
  2946. const float log_ratio = log((float)original_width / original_height);
  2947. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2948. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2949. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2950. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2951. res.grid_size = best_grid;
  2952. res.refined_size = refine_size;
  2953. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
  2954. __func__, original_width, original_height,
  2955. res.overview_size.width, res.overview_size.height,
  2956. res.refined_size.width, res.refined_size.height,
  2957. res.grid_size.width, res.grid_size.height);
  2958. int width = refine_size.width;
  2959. int height = refine_size.height;
  2960. int grid_x = int(width / best_grid.width);
  2961. int grid_y = int(height / best_grid.height);
  2962. for (int patches_y = 0, ic = 0;
  2963. patches_y < refine_size.height && ic < best_grid.height;
  2964. patches_y += grid_y, ic += 1) {
  2965. for (int patches_x = 0, jc = 0;
  2966. patches_x < refine_size.width && jc < best_grid.width;
  2967. patches_x += grid_x, jc += 1) {
  2968. slice_coordinates slice;
  2969. slice.x = patches_x;
  2970. slice.y = patches_y;
  2971. slice.size.width = grid_x;
  2972. slice.size.height = grid_y;
  2973. res.slices.push_back(slice);
  2974. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2975. __func__, (int)res.slices.size() - 1,
  2976. slice.x, slice.y, slice.size.width, slice.size.height);
  2977. }
  2978. }
  2979. }
  2980. return res;
  2981. }
  2982. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2983. std::vector<clip_image_u8_ptr> output;
  2984. // resize to overview size
  2985. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2986. image_manipulation::resize_and_pad_image(*img, *resized_img, inst.overview_size);
  2987. output.push_back(std::move(resized_img));
  2988. if (inst.slices.empty()) {
  2989. // no slices, just return the resized image
  2990. return output;
  2991. }
  2992. // resize to refined size
  2993. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2994. if (inst.padding_refined) {
  2995. image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
  2996. } else {
  2997. image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
  2998. }
  2999. // create slices
  3000. for (const auto & slice : inst.slices) {
  3001. int x = slice.x;
  3002. int y = slice.y;
  3003. int w = slice.size.width;
  3004. int h = slice.size.height;
  3005. clip_image_u8_ptr img_slice(clip_image_u8_init());
  3006. image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
  3007. output.push_back(std::move(img_slice));
  3008. }
  3009. return output;
  3010. }
  3011. private:
  3012. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  3013. int width = original_size.width;
  3014. int height = original_size.height;
  3015. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  3016. float r = static_cast<float>(width) / height;
  3017. height = static_cast<int>(scale_resolution / std::sqrt(r));
  3018. width = static_cast<int>(height * r);
  3019. }
  3020. clip_image_size res;
  3021. res.width = ensure_divide(width, patch_size);
  3022. res.height = ensure_divide(height, patch_size);
  3023. return res;
  3024. }
  3025. static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
  3026. float scale_width = static_cast<float>(target_max.width) / orig.width;
  3027. float scale_height = static_cast<float>(target_max.height) / orig.height;
  3028. float scale = std::min(scale_width, scale_height);
  3029. return clip_image_size{
  3030. static_cast<int>(orig.width * scale),
  3031. static_cast<int>(orig.height * scale),
  3032. };
  3033. }
  3034. /**
  3035. * Selects the best resolution from a list of possible resolutions based on the original size.
  3036. *
  3037. * For example, when given a list of resolutions:
  3038. * - 100x100
  3039. * - 200x100
  3040. * - 100x200
  3041. * - 200x200
  3042. *
  3043. * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
  3044. *
  3045. * @param original_size The original size of the image
  3046. * @param possible_resolutions A list of possible resolutions
  3047. * @return The best fit resolution
  3048. */
  3049. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  3050. clip_image_size best_fit;
  3051. int min_wasted_area = std::numeric_limits<int>::max();
  3052. int max_effective_resolution = 0;
  3053. for (const clip_image_size & candidate : possible_resolutions) {
  3054. auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
  3055. int effective_resolution = std::min(
  3056. target_size.width * target_size.height,
  3057. original_size.width * original_size.height);
  3058. int wasted_area = (candidate.width * candidate.height) - effective_resolution;
  3059. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
  3060. max_effective_resolution = effective_resolution;
  3061. min_wasted_area = wasted_area;
  3062. best_fit = candidate;
  3063. }
  3064. LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
  3065. }
  3066. return best_fit;
  3067. }
  3068. static int ensure_divide(int length, int patch_size) {
  3069. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  3070. }
  3071. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  3072. int width = original_size.width;
  3073. int height = original_size.height;
  3074. int grid_x = grid.width;
  3075. int grid_y = grid.height;
  3076. int refine_width = ensure_divide(width, grid_x);
  3077. int refine_height = ensure_divide(height, grid_y);
  3078. clip_image_size grid_size;
  3079. grid_size.width = refine_width / grid_x;
  3080. grid_size.height = refine_height / grid_y;
  3081. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  3082. int best_grid_width = best_grid_size.width;
  3083. int best_grid_height = best_grid_size.height;
  3084. clip_image_size refine_size;
  3085. refine_size.width = best_grid_width * grid_x;
  3086. refine_size.height = best_grid_height * grid_y;
  3087. return refine_size;
  3088. }
  3089. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  3090. std::vector<int> candidate_split_grids_nums;
  3091. for (int i : {multiple - 1, multiple, multiple + 1}) {
  3092. if (i == 1 || i > max_slice_nums) {
  3093. continue;
  3094. }
  3095. candidate_split_grids_nums.push_back(i);
  3096. }
  3097. std::vector<clip_image_size> candidate_grids;
  3098. for (int split_grids_nums : candidate_split_grids_nums) {
  3099. int m = 1;
  3100. while (m <= split_grids_nums) {
  3101. if (split_grids_nums % m == 0) {
  3102. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  3103. }
  3104. ++m;
  3105. }
  3106. }
  3107. clip_image_size best_grid{1, 1};
  3108. float min_error = std::numeric_limits<float>::infinity();
  3109. for (const auto& grid : candidate_grids) {
  3110. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  3111. if (error < min_error) {
  3112. best_grid = grid;
  3113. min_error = error;
  3114. }
  3115. }
  3116. return best_grid;
  3117. }
  3118. };
  3119. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  3120. // res_imgs memory is being allocated here, previous allocations will be freed if found
  3121. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  3122. clip_image_size original_size{img->nx, img->ny};
  3123. bool pad_to_square = true;
  3124. auto & params = ctx->model.hparams;
  3125. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  3126. if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
  3127. pad_to_square = false;
  3128. }
  3129. if (clip_is_minicpmv(ctx)) {
  3130. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  3131. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  3132. for (size_t i = 0; i < imgs.size(); ++i) {
  3133. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  3134. clip_image_f32_ptr res(clip_image_f32_init());
  3135. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3136. res_imgs->entries.push_back(std::move(res));
  3137. }
  3138. res_imgs->grid_x = inst.grid_size.width;
  3139. res_imgs->grid_y = inst.grid_size.height;
  3140. return true;
  3141. } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
  3142. clip_image_u8 resized;
  3143. auto patch_size = params.patch_size * 2;
  3144. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size);
  3145. image_manipulation::bicubic_resize(*img, resized, new_size.width, new_size.height);
  3146. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3147. // clip_image_f32_ptr res(clip_image_f32_init());
  3148. normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
  3149. // res_imgs->data[0] = *res;
  3150. res_imgs->entries.push_back(std::move(img_f32));
  3151. return true;
  3152. } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
  3153. // The refined size has two steps:
  3154. // 1. Resize w/ aspect-ratio preserving such that the longer side is
  3155. // the preprocessor longest size
  3156. // 2. Resize w/out preserving aspect ratio such that both sides are
  3157. // multiples of image_size (always rounding up)
  3158. //
  3159. // CITE: https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics3/image_processing_idefics3.py#L737
  3160. const clip_image_size refined_size = image_manipulation::calc_size_preserved_ratio(
  3161. original_size, params.image_size, params.preproc_image_size);
  3162. // LOG_INF("%s: original size: %d x %d, refined size: %d x %d\n",
  3163. // __func__, original_size.width, original_size.height,
  3164. // refined_size.width, refined_size.height);
  3165. llava_uhd::slice_instructions instructions;
  3166. instructions.overview_size = clip_image_size{params.image_size, params.image_size};
  3167. instructions.refined_size = refined_size;
  3168. instructions.grid_size = clip_image_size{
  3169. static_cast<int>(std::ceil(static_cast<float>(refined_size.width) / params.image_size)),
  3170. static_cast<int>(std::ceil(static_cast<float>(refined_size.height) / params.image_size)),
  3171. };
  3172. for (int y = 0; y < refined_size.height; y += params.image_size) {
  3173. for (int x = 0; x < refined_size.width; x += params.image_size) {
  3174. // LOG_INF("%s: adding slice at x=%d, y=%d\n", __func__, x, y);
  3175. instructions.slices.push_back(llava_uhd::slice_coordinates{
  3176. /* x */x,
  3177. /* y */y,
  3178. /* size */clip_image_size{
  3179. std::min(params.image_size, refined_size.width - x),
  3180. std::min(params.image_size, refined_size.height - y)
  3181. }
  3182. });
  3183. }
  3184. }
  3185. auto imgs = llava_uhd::slice_image(img, instructions);
  3186. // cast and normalize to f32
  3187. for (size_t i = 0; i < imgs.size(); ++i) {
  3188. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  3189. clip_image_f32_ptr res(clip_image_f32_init());
  3190. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3191. res_imgs->entries.push_back(std::move(res));
  3192. }
  3193. res_imgs->grid_x = instructions.grid_size.width;
  3194. res_imgs->grid_y = instructions.grid_size.height;
  3195. return true;
  3196. } else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE
  3197. || ctx->proj_type() == PROJECTOR_TYPE_GEMMA3
  3198. || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
  3199. ) {
  3200. clip_image_u8 resized_image;
  3201. int sz = params.image_size;
  3202. image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
  3203. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3204. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  3205. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  3206. res_imgs->entries.push_back(std::move(img_f32));
  3207. return true;
  3208. } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL
  3209. || ctx->proj_type() == PROJECTOR_TYPE_LIGHTONOCR
  3210. ) {
  3211. clip_image_u8 resized_image;
  3212. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
  3213. image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
  3214. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3215. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  3216. res_imgs->entries.push_back(std::move(img_f32));
  3217. return true;
  3218. } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) {
  3219. GGML_ASSERT(!params.image_res_candidates.empty());
  3220. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  3221. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  3222. for (size_t i = 0; i < imgs.size(); ++i) {
  3223. clip_image_f32_ptr res(clip_image_f32_init());
  3224. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3225. res_imgs->entries.push_back(std::move(res));
  3226. }
  3227. res_imgs->grid_x = inst.grid_size.width;
  3228. res_imgs->grid_y = inst.grid_size.height;
  3229. return true;
  3230. } else if ( ctx->proj_type() == PROJECTOR_TYPE_LFM2
  3231. || ctx->proj_type() == PROJECTOR_TYPE_KIMIVL
  3232. ) {
  3233. GGML_ASSERT(params.proj_scale_factor);
  3234. // smart resize
  3235. const int width = img->nx;
  3236. const int height = img->ny;
  3237. const int total_factor = params.patch_size * params.proj_scale_factor;
  3238. constexpr int min_image_tokens = 64;
  3239. constexpr int max_image_tokens = 1024;
  3240. const float min_pixels = min_image_tokens * total_factor * total_factor;
  3241. const float max_pixels = max_image_tokens * total_factor * total_factor;
  3242. auto round_by_factor = [f = total_factor](float x) { return static_cast<int>(std::nearbyintf(x / static_cast<float>(f))) * f; };
  3243. auto ceil_by_factor = [f = total_factor](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  3244. auto floor_by_factor = [f = total_factor](float x) { return static_cast<int>(std::floor(x / static_cast<float>(f))) * f; };
  3245. int h_bar = std::max(total_factor, round_by_factor(height));
  3246. int w_bar = std::max(total_factor, round_by_factor(width));
  3247. if (h_bar * w_bar > max_pixels) {
  3248. const auto beta = std::sqrt((height * width) / max_pixels);
  3249. h_bar = std::max(total_factor, floor_by_factor(height / beta));
  3250. w_bar = std::max(total_factor, floor_by_factor(width / beta));
  3251. } else if (h_bar * w_bar < min_pixels) {
  3252. const auto beta = std::sqrt(min_pixels / (height * width));
  3253. h_bar = ceil_by_factor(height * beta);
  3254. w_bar = ceil_by_factor(width * beta);
  3255. }
  3256. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  3257. clip_image_u8 resized_img;
  3258. image_manipulation::resize_and_pad_image(*img, resized_img, clip_image_size{w_bar, h_bar}, pad_color);
  3259. clip_image_f32_ptr res(clip_image_f32_init());
  3260. normalize_image_u8_to_f32(resized_img, *res, params.image_mean, params.image_std);
  3261. res_imgs->entries.push_back(std::move(res));
  3262. return true;
  3263. }
  3264. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  3265. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  3266. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  3267. if (pad_to_square) {
  3268. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  3269. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  3270. const int longer_side = std::max(img->nx, img->ny);
  3271. temp->nx = longer_side;
  3272. temp->ny = longer_side;
  3273. temp->buf.resize(3 * longer_side * longer_side);
  3274. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  3275. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  3276. // resize the image to the target_size
  3277. image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
  3278. clip_image_f32_ptr res(clip_image_f32_init());
  3279. normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
  3280. res_imgs->entries.push_back(std::move(res));
  3281. return true;
  3282. } else if (!params.image_res_candidates.empty()) {
  3283. // "spatial_unpad" with "anyres" processing for llava-1.6
  3284. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  3285. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  3286. for (size_t i = 0; i < imgs.size(); ++i) {
  3287. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  3288. clip_image_f32_ptr res(clip_image_f32_init());
  3289. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3290. res_imgs->entries.push_back(std::move(res));
  3291. }
  3292. return true;
  3293. } else {
  3294. GGML_ABORT("Unknown image preprocessing type");
  3295. }
  3296. }
  3297. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  3298. return ctx->model.image_newline;
  3299. }
  3300. void clip_free(clip_ctx * ctx) {
  3301. if (ctx == nullptr) {
  3302. return;
  3303. }
  3304. delete ctx;
  3305. }
  3306. // deprecated
  3307. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  3308. const int32_t nx = ctx->model.hparams.image_size;
  3309. const int32_t ny = ctx->model.hparams.image_size;
  3310. return clip_embd_nbytes_by_img(ctx, nx, ny);
  3311. }
  3312. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  3313. clip_image_f32 img;
  3314. img.nx = img_w;
  3315. img.ny = img_h;
  3316. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  3317. }
  3318. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  3319. return ctx->model.hparams.image_size;
  3320. }
  3321. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  3322. return ctx->model.hparams.patch_size;
  3323. }
  3324. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  3325. return ctx->model.hparams.n_embd;
  3326. }
  3327. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  3328. return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  3329. }
  3330. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  3331. const auto & params = ctx->model.hparams;
  3332. const int n_total = clip_n_output_tokens(ctx, img);
  3333. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
  3334. return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0);
  3335. }
  3336. return n_total;
  3337. }
  3338. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  3339. const auto & params = ctx->model.hparams;
  3340. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
  3341. return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0);
  3342. }
  3343. return 1;
  3344. }
  3345. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  3346. const auto & params = ctx->model.hparams;
  3347. // for models with fixed size image, the input image is already pre-processed and resized to square
  3348. int patch_size = params.patch_size;
  3349. int n_patches = (img->nx / patch_size) * (img->ny / patch_size);
  3350. projector_type proj = ctx->proj_type();
  3351. switch (proj) {
  3352. case PROJECTOR_TYPE_MLP:
  3353. case PROJECTOR_TYPE_MLP_NORM:
  3354. {
  3355. // do nothing
  3356. } break;
  3357. case PROJECTOR_TYPE_LDP:
  3358. case PROJECTOR_TYPE_LDPV2:
  3359. case PROJECTOR_TYPE_GLM_EDGE:
  3360. {
  3361. n_patches /= 4;
  3362. if (ctx->model.mm_boi) {
  3363. n_patches += 2; // for BOI and EOI token embeddings
  3364. }
  3365. } break;
  3366. case PROJECTOR_TYPE_MINICPMV:
  3367. {
  3368. // Use actual config value if available, otherwise fall back to hardcoded values
  3369. if (params.minicpmv_query_num > 0) {
  3370. n_patches = params.minicpmv_query_num;
  3371. } else {
  3372. // Fallback to hardcoded values for legacy models
  3373. if (params.minicpmv_version == 2) {
  3374. n_patches = 96;
  3375. } else if (params.minicpmv_version == 3) {
  3376. n_patches = 64;
  3377. } else if (params.minicpmv_version == 4) {
  3378. n_patches = 64;
  3379. } else if (params.minicpmv_version == 5) {
  3380. // MiniCPM-V 4.0
  3381. n_patches = 64;
  3382. } else if (params.minicpmv_version == 6) {
  3383. // MiniCPM-V 4.5
  3384. n_patches = 64;
  3385. } else {
  3386. GGML_ABORT("Unknown minicpmv version");
  3387. }
  3388. }
  3389. } break;
  3390. case PROJECTOR_TYPE_QWEN2VL:
  3391. case PROJECTOR_TYPE_QWEN25VL:
  3392. {
  3393. // dynamic size (2 conv, so double patch size)
  3394. int patch_size = params.patch_size * 2;
  3395. int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
  3396. int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
  3397. n_patches = x_patch * y_patch;
  3398. } break;
  3399. case PROJECTOR_TYPE_GEMMA3:
  3400. case PROJECTOR_TYPE_IDEFICS3:
  3401. case PROJECTOR_TYPE_INTERNVL:
  3402. case PROJECTOR_TYPE_LLAMA4:
  3403. {
  3404. // both X and Y are downscaled by the scale factor
  3405. int scale_factor = ctx->model.hparams.proj_scale_factor;
  3406. n_patches /= (scale_factor * scale_factor);
  3407. } break;
  3408. case PROJECTOR_TYPE_LFM2:
  3409. case PROJECTOR_TYPE_KIMIVL:
  3410. {
  3411. // dynamic size
  3412. int scale_factor = ctx->model.hparams.proj_scale_factor;
  3413. int out_patch_size = params.patch_size * scale_factor;
  3414. int x_patch = CLIP_ALIGN(img->nx, out_patch_size) / out_patch_size;
  3415. int y_patch = CLIP_ALIGN(img->ny, out_patch_size) / out_patch_size;
  3416. n_patches = x_patch * y_patch;
  3417. } break;
  3418. case PROJECTOR_TYPE_PIXTRAL:
  3419. case PROJECTOR_TYPE_LIGHTONOCR:
  3420. {
  3421. // dynamic size
  3422. int n_merge = params.spatial_merge_size;
  3423. int n_patches_x = img->nx / patch_size / (n_merge > 0 ? n_merge : 1);
  3424. int n_patches_y = img->ny / patch_size / (n_merge > 0 ? n_merge : 1);
  3425. if (ctx->model.token_embd_img_break) {
  3426. n_patches = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  3427. } else {
  3428. n_patches = n_patches_y * n_patches_x;
  3429. }
  3430. } break;
  3431. case PROJECTOR_TYPE_VOXTRAL:
  3432. case PROJECTOR_TYPE_ULTRAVOX:
  3433. case PROJECTOR_TYPE_QWEN2A:
  3434. {
  3435. n_patches = img->nx;
  3436. const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
  3437. if (ctx->model.audio_has_stack_frames()) {
  3438. GGML_ASSERT(proj_stack_factor > 0);
  3439. const int n_len = CLIP_ALIGN(n_patches, proj_stack_factor);
  3440. n_patches = n_len / proj_stack_factor;
  3441. }
  3442. // whisper downscales input token by half after conv1d
  3443. n_patches /= 2;
  3444. if (ctx->model.audio_has_avgpool()) {
  3445. // divide by 2 because of nn.AvgPool1d(2, stride=2)
  3446. n_patches /= 2;
  3447. }
  3448. } break;
  3449. case PROJECTOR_TYPE_COGVLM:
  3450. {
  3451. n_patches += 2; // for BOI and EOI token embeddings
  3452. } break;
  3453. default:
  3454. GGML_ABORT("unsupported projector type");
  3455. }
  3456. return n_patches;
  3457. }
  3458. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  3459. assert(embed_dim % 2 == 0);
  3460. int H = pos.size();
  3461. int W = pos[0].size();
  3462. std::vector<float> omega(embed_dim / 2);
  3463. for (int i = 0; i < embed_dim / 2; ++i) {
  3464. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  3465. }
  3466. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  3467. for (int h = 0; h < H; ++h) {
  3468. for (int w = 0; w < W; ++w) {
  3469. for (int d = 0; d < embed_dim / 2; ++d) {
  3470. float out_value = pos[h][w] * omega[d];
  3471. emb[h][w][d] = sin(out_value);
  3472. emb[h][w][d + embed_dim / 2] = cos(out_value);
  3473. }
  3474. }
  3475. }
  3476. return emb;
  3477. }
  3478. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  3479. assert(embed_dim % 2 == 0);
  3480. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  3481. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  3482. int H = emb_h.size();
  3483. int W = emb_h[0].size();
  3484. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  3485. for (int h = 0; h < H; ++h) {
  3486. for (int w = 0; w < W; ++w) {
  3487. for (int d = 0; d < embed_dim / 2; ++d) {
  3488. emb[h][w][d] = emb_h[h][w][d];
  3489. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  3490. }
  3491. }
  3492. }
  3493. return emb;
  3494. }
  3495. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  3496. int grid_h_size = image_size.first;
  3497. int grid_w_size = image_size.second;
  3498. std::vector<float> grid_h(grid_h_size);
  3499. std::vector<float> grid_w(grid_w_size);
  3500. for (int i = 0; i < grid_h_size; ++i) {
  3501. grid_h[i] = static_cast<float>(i);
  3502. }
  3503. for (int i = 0; i < grid_w_size; ++i) {
  3504. grid_w[i] = static_cast<float>(i);
  3505. }
  3506. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  3507. for (int h = 0; h < grid_h_size; ++h) {
  3508. for (int w = 0; w < grid_w_size; ++w) {
  3509. grid[h][w] = grid_w[w];
  3510. }
  3511. }
  3512. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  3513. for (int h = 0; h < grid_h_size; ++h) {
  3514. for (int w = 0; w < grid_w_size; ++w) {
  3515. grid_2d[0][h][w] = grid_h[h];
  3516. grid_2d[1][h][w] = grid_w[w];
  3517. }
  3518. }
  3519. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  3520. int H = image_size.first;
  3521. int W = image_size.second;
  3522. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  3523. for (int h = 0; h < H; ++h) {
  3524. for (int w = 0; w < W; ++w) {
  3525. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  3526. }
  3527. }
  3528. return pos_embed_2d;
  3529. }
  3530. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  3531. clip_image_f32_batch imgs;
  3532. clip_image_f32_ptr img_copy(clip_image_f32_init());
  3533. *img_copy = *img;
  3534. imgs.entries.push_back(std::move(img_copy));
  3535. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  3536. }
  3537. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  3538. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  3539. int batch_size = imgs.entries.size();
  3540. // TODO @ngxson : implement batch size > 1 as a loop
  3541. // we don't need true batching support because the cgraph will gonna be big anyway
  3542. if (batch_size != 1) {
  3543. return false; // only support batch size of 1
  3544. }
  3545. // build the inference graph
  3546. ctx->debug_print_tensors.clear();
  3547. ggml_backend_sched_reset(ctx->sched.get());
  3548. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  3549. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  3550. // set inputs
  3551. const auto & model = ctx->model;
  3552. const auto & hparams = model.hparams;
  3553. const int image_size_width = imgs.entries[0]->nx;
  3554. const int image_size_height = imgs.entries[0]->ny;
  3555. const int patch_size = hparams.patch_size;
  3556. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  3557. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  3558. const int pos_w = image_size_width / patch_size;
  3559. const int pos_h = image_size_height / patch_size;
  3560. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  3561. auto get_inp_tensor = [&gf](const char * name) {
  3562. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  3563. if (inp == nullptr) {
  3564. GGML_ABORT("Failed to get tensor %s", name);
  3565. }
  3566. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  3567. GGML_ABORT("Tensor %s is not an input tensor", name);
  3568. }
  3569. return inp;
  3570. };
  3571. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  3572. ggml_tensor * cur = get_inp_tensor(name);
  3573. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  3574. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3575. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3576. };
  3577. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  3578. ggml_tensor * cur = get_inp_tensor(name);
  3579. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  3580. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3581. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3582. };
  3583. // set input pixel values
  3584. if (!imgs.is_audio) {
  3585. size_t nelem = 0;
  3586. for (const auto & img : imgs.entries) {
  3587. nelem += img->nx * img->ny * 3;
  3588. }
  3589. std::vector<float> inp_raw(nelem);
  3590. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  3591. //
  3592. // ┌──W──┐
  3593. // │ H │ channel = R
  3594. // ├─────┤ │
  3595. // │ H │ channel = G
  3596. // ├─────┤ │
  3597. // │ H │ channel = B
  3598. // └─────┘ │
  3599. // ──────┘ x B
  3600. for (size_t i = 0; i < imgs.entries.size(); i++) {
  3601. const int nx = imgs.entries[i]->nx;
  3602. const int ny = imgs.entries[i]->ny;
  3603. const int n = nx * ny;
  3604. for (int b = 0; b < batch_size; b++) {
  3605. float * batch_entry = inp_raw.data() + b * (3*n);
  3606. for (int y = 0; y < ny; y++) {
  3607. for (int x = 0; x < nx; x++) {
  3608. size_t base_src = 3*(y * nx + x); // idx of the first channel
  3609. size_t base_dst = y * nx + x; // idx of the first channel
  3610. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  3611. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  3612. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  3613. }
  3614. }
  3615. }
  3616. }
  3617. set_input_f32("inp_raw", inp_raw);
  3618. } else {
  3619. // audio input
  3620. GGML_ASSERT(imgs.entries.size() == 1);
  3621. const auto & mel_inp = imgs.entries[0];
  3622. const int n_step = mel_inp->nx;
  3623. const int n_mel = mel_inp->ny;
  3624. std::vector<float> inp_raw(n_step * n_mel);
  3625. std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
  3626. set_input_f32("inp_raw", inp_raw);
  3627. }
  3628. // set input per projector
  3629. switch (ctx->model.proj_type) {
  3630. case PROJECTOR_TYPE_MINICPMV:
  3631. {
  3632. // inspired from siglip:
  3633. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  3634. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  3635. std::vector<int32_t> positions(pos_h * pos_w);
  3636. int bucket_coords_h[1024];
  3637. int bucket_coords_w[1024];
  3638. for (int i = 0; i < pos_h; i++){
  3639. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  3640. }
  3641. for (int i = 0; i < pos_w; i++){
  3642. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  3643. }
  3644. for (int i = 0, id = 0; i < pos_h; i++){
  3645. for (int j = 0; j < pos_w; j++){
  3646. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  3647. }
  3648. }
  3649. set_input_i32("positions", positions);
  3650. // inspired from resampler of Qwen-VL:
  3651. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  3652. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  3653. int embed_dim = clip_n_mmproj_embd(ctx);
  3654. // TODO @ngxson : this is very inefficient, can we do this using ggml_sin and ggml_cos?
  3655. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  3656. std::vector<float> pos_embed(embed_dim * pos_w * pos_h);
  3657. for(int i = 0; i < pos_w * pos_h; ++i){
  3658. for(int j = 0; j < embed_dim; ++j){
  3659. pos_embed[i * embed_dim + j] = pos_embed_t[i][j];
  3660. }
  3661. }
  3662. set_input_f32("pos_embed", pos_embed);
  3663. } break;
  3664. case PROJECTOR_TYPE_QWEN2VL:
  3665. {
  3666. const int merge_ratio = 2;
  3667. const int pw = image_size_width / patch_size;
  3668. const int ph = image_size_height / patch_size;
  3669. std::vector<int> positions(n_pos * 4);
  3670. int ptr = 0;
  3671. for (int y = 0; y < ph; y += merge_ratio) {
  3672. for (int x = 0; x < pw; x += merge_ratio) {
  3673. for (int dy = 0; dy < 2; dy++) {
  3674. for (int dx = 0; dx < 2; dx++) {
  3675. positions[ ptr] = y + dy;
  3676. positions[ num_patches + ptr] = x + dx;
  3677. positions[2 * num_patches + ptr] = y + dy;
  3678. positions[3 * num_patches + ptr] = x + dx;
  3679. ptr++;
  3680. }
  3681. }
  3682. }
  3683. }
  3684. set_input_i32("positions", positions);
  3685. } break;
  3686. case PROJECTOR_TYPE_QWEN25VL:
  3687. {
  3688. // pw * ph = number of tokens output by ViT after apply patch merger
  3689. // ipw * ipw = number of vision token been processed inside ViT
  3690. const int merge_ratio = 2;
  3691. const int pw = image_size_width / patch_size / merge_ratio;
  3692. const int ph = image_size_height / patch_size / merge_ratio;
  3693. const int ipw = image_size_width / patch_size;
  3694. const int iph = image_size_height / patch_size;
  3695. std::vector<int> idx (ph * pw);
  3696. std::vector<int> inv_idx(ph * pw);
  3697. if (use_window_attn) {
  3698. const int attn_window_size = 112;
  3699. const int grid_window = attn_window_size / patch_size / merge_ratio;
  3700. int dst = 0;
  3701. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  3702. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  3703. int mask_row = 0;
  3704. for (int y = 0; y < ph; y += grid_window) {
  3705. for (int x = 0; x < pw; x += grid_window) {
  3706. const int win_h = std::min(grid_window, ph - y);
  3707. const int win_w = std::min(grid_window, pw - x);
  3708. const int dst_0 = dst;
  3709. // group all tokens belong to the same window togather (to a continue range)
  3710. for (int dy = 0; dy < win_h; dy++) {
  3711. for (int dx = 0; dx < win_w; dx++) {
  3712. const int src = (y + dy) * pw + (x + dx);
  3713. GGML_ASSERT(src < (int)idx.size());
  3714. GGML_ASSERT(dst < (int)inv_idx.size());
  3715. idx [src] = dst;
  3716. inv_idx[dst] = src;
  3717. dst++;
  3718. }
  3719. }
  3720. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  3721. int row_offset = mask_row * (ipw * iph);
  3722. std::fill(
  3723. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  3724. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  3725. 0.0);
  3726. mask_row++;
  3727. }
  3728. }
  3729. }
  3730. set_input_i32("window_idx", idx);
  3731. set_input_i32("inv_window_idx", inv_idx);
  3732. set_input_f32("window_mask", mask);
  3733. } else {
  3734. for (int i = 0; i < ph * pw; i++) {
  3735. idx[i] = i;
  3736. }
  3737. }
  3738. const int mpow = merge_ratio * merge_ratio;
  3739. std::vector<int> positions(n_pos * 4);
  3740. int ptr = 0;
  3741. for (int y = 0; y < iph; y += merge_ratio) {
  3742. for (int x = 0; x < ipw; x += merge_ratio) {
  3743. for (int dy = 0; dy < 2; dy++) {
  3744. for (int dx = 0; dx < 2; dx++) {
  3745. auto remap = idx[ptr / mpow];
  3746. remap = (remap * mpow) + (ptr % mpow);
  3747. positions[ remap] = y + dy;
  3748. positions[ num_patches + remap] = x + dx;
  3749. positions[2 * num_patches + remap] = y + dy;
  3750. positions[3 * num_patches + remap] = x + dx;
  3751. ptr++;
  3752. }
  3753. }
  3754. }
  3755. }
  3756. set_input_i32("positions", positions);
  3757. } break;
  3758. case PROJECTOR_TYPE_PIXTRAL:
  3759. case PROJECTOR_TYPE_KIMIVL:
  3760. case PROJECTOR_TYPE_LIGHTONOCR:
  3761. {
  3762. // set the 2D positions
  3763. int n_patches_per_col = image_size_width / patch_size;
  3764. std::vector<int> pos_data(n_pos);
  3765. // dimension H
  3766. for (int i = 0; i < n_pos; i++) {
  3767. pos_data[i] = i / n_patches_per_col;
  3768. }
  3769. set_input_i32("pos_h", pos_data);
  3770. // dimension W
  3771. for (int i = 0; i < n_pos; i++) {
  3772. pos_data[i] = i % n_patches_per_col;
  3773. }
  3774. set_input_i32("pos_w", pos_data);
  3775. } break;
  3776. case PROJECTOR_TYPE_GLM_EDGE:
  3777. {
  3778. // llava and other models
  3779. std::vector<int32_t> positions(n_pos);
  3780. for (int i = 0; i < n_pos; i++) {
  3781. positions[i] = i;
  3782. }
  3783. set_input_i32("positions", positions);
  3784. } break;
  3785. case PROJECTOR_TYPE_MLP:
  3786. case PROJECTOR_TYPE_MLP_NORM:
  3787. case PROJECTOR_TYPE_LDP:
  3788. case PROJECTOR_TYPE_LDPV2:
  3789. {
  3790. // llava and other models
  3791. std::vector<int32_t> positions(n_pos);
  3792. for (int i = 0; i < n_pos; i++) {
  3793. positions[i] = i;
  3794. }
  3795. set_input_i32("positions", positions);
  3796. // The patches vector is used to get rows to index into the embeds with;
  3797. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  3798. // when retrieving the rows.
  3799. int patch_offset = model.class_embedding ? 1 : 0;
  3800. std::vector<int32_t> patches(num_patches);
  3801. for (int i = 0; i < num_patches; i++) {
  3802. patches[i] = i + patch_offset;
  3803. }
  3804. set_input_i32("patches", patches);
  3805. } break;
  3806. case PROJECTOR_TYPE_GEMMA3:
  3807. case PROJECTOR_TYPE_IDEFICS3:
  3808. case PROJECTOR_TYPE_INTERNVL:
  3809. case PROJECTOR_TYPE_QWEN2A:
  3810. case PROJECTOR_TYPE_ULTRAVOX:
  3811. case PROJECTOR_TYPE_LFM2:
  3812. case PROJECTOR_TYPE_VOXTRAL:
  3813. case PROJECTOR_TYPE_COGVLM:
  3814. {
  3815. // do nothing
  3816. } break;
  3817. case PROJECTOR_TYPE_LLAMA4:
  3818. {
  3819. // set the 2D positions
  3820. int n_patches_per_col = image_size_width / patch_size;
  3821. std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
  3822. // last pos is always kept 0, it's for CLS
  3823. // dimension H
  3824. for (int i = 0; i < num_patches; i++) {
  3825. pos_data[i] = (i / n_patches_per_col) + 1;
  3826. }
  3827. set_input_i32("pos_h", pos_data);
  3828. // dimension W
  3829. for (int i = 0; i < num_patches; i++) {
  3830. pos_data[i] = (i % n_patches_per_col) + 1;
  3831. }
  3832. set_input_i32("pos_w", pos_data);
  3833. } break;
  3834. default:
  3835. GGML_ABORT("Unknown projector type");
  3836. }
  3837. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  3838. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  3839. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  3840. if (reg) {
  3841. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  3842. if (ggml_backend_set_n_threads_fn) {
  3843. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  3844. }
  3845. }
  3846. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  3847. if (status != GGML_STATUS_SUCCESS) {
  3848. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  3849. return false;
  3850. }
  3851. // print debug nodes
  3852. if (ctx->debug_graph) {
  3853. LOG_INF("\n\n---\n\n");
  3854. LOG_INF("\n\nDebug graph:\n\n");
  3855. for (ggml_tensor * t : ctx->debug_print_tensors) {
  3856. std::vector<uint8_t> data(ggml_nbytes(t));
  3857. ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
  3858. print_tensor_shape(t);
  3859. print_tensor_data(t, data.data(), 3);
  3860. }
  3861. }
  3862. // the last node is the embedding tensor
  3863. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  3864. // sanity check (only support batch size of 1 for now)
  3865. const int n_tokens_out = embeddings->ne[1];
  3866. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  3867. if (n_tokens_out != expected_n_tokens_out) {
  3868. LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  3869. GGML_ABORT("Invalid number of output tokens");
  3870. }
  3871. // copy the embeddings to the location passed by the user
  3872. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  3873. return true;
  3874. }
  3875. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  3876. switch (ctx->model.proj_type) {
  3877. case PROJECTOR_TYPE_LDP:
  3878. return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
  3879. case PROJECTOR_TYPE_LDPV2:
  3880. return ctx->model.mm_model_peg_0_b->ne[0];
  3881. case PROJECTOR_TYPE_MLP:
  3882. case PROJECTOR_TYPE_PIXTRAL:
  3883. case PROJECTOR_TYPE_LIGHTONOCR:
  3884. return ctx->model.mm_2_w->ne[1];
  3885. case PROJECTOR_TYPE_MLP_NORM:
  3886. return ctx->model.mm_3_b->ne[0];
  3887. case PROJECTOR_TYPE_MINICPMV:
  3888. return ctx->model.mm_model_proj->ne[0];
  3889. case PROJECTOR_TYPE_GLM_EDGE:
  3890. return ctx->model.mm_model_mlp_3_w->ne[1];
  3891. case PROJECTOR_TYPE_QWEN2VL:
  3892. case PROJECTOR_TYPE_QWEN25VL:
  3893. return ctx->model.mm_1_b->ne[0];
  3894. case PROJECTOR_TYPE_GEMMA3:
  3895. return ctx->model.mm_input_proj_w->ne[0];
  3896. case PROJECTOR_TYPE_IDEFICS3:
  3897. return ctx->model.projection->ne[1];
  3898. case PROJECTOR_TYPE_ULTRAVOX:
  3899. case PROJECTOR_TYPE_VOXTRAL:
  3900. return ctx->model.mm_2_w->ne[1];
  3901. case PROJECTOR_TYPE_INTERNVL:
  3902. return ctx->model.mm_3_w->ne[1];
  3903. case PROJECTOR_TYPE_LLAMA4:
  3904. return ctx->model.mm_model_proj->ne[1];
  3905. case PROJECTOR_TYPE_QWEN2A:
  3906. return ctx->model.mm_fc_w->ne[1];
  3907. case PROJECTOR_TYPE_LFM2:
  3908. case PROJECTOR_TYPE_KIMIVL:
  3909. return ctx->model.mm_2_w->ne[1];
  3910. case PROJECTOR_TYPE_COGVLM:
  3911. return ctx->model.mm_4h_to_h_w->ne[1];
  3912. default:
  3913. GGML_ABORT("Unknown projector type");
  3914. }
  3915. }
  3916. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  3917. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
  3918. return ctx->model.hparams.minicpmv_version;
  3919. }
  3920. return 0;
  3921. }
  3922. bool clip_is_glm(const struct clip_ctx * ctx) {
  3923. return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
  3924. }
  3925. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  3926. return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
  3927. || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL;
  3928. }
  3929. bool clip_is_llava(const struct clip_ctx * ctx) {
  3930. return ctx->model.hparams.has_llava_projector;
  3931. }
  3932. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  3933. return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
  3934. }
  3935. bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
  3936. return ctx->model.modality == CLIP_MODALITY_VISION;
  3937. }
  3938. bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
  3939. return ctx->model.modality == CLIP_MODALITY_AUDIO;
  3940. }
  3941. bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
  3942. return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
  3943. || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A
  3944. || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL;
  3945. }
  3946. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  3947. clip_image_f32 clip_img;
  3948. clip_img.buf.resize(h * w * 3);
  3949. for (int i = 0; i < h*w*3; i++)
  3950. {
  3951. clip_img.buf[i] = img[i];
  3952. }
  3953. clip_img.nx = w;
  3954. clip_img.ny = h;
  3955. clip_image_encode(ctx, n_threads, &clip_img, vec);
  3956. return true;
  3957. }
  3958. //
  3959. // API used internally with mtmd
  3960. //
  3961. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  3962. return ctx->proj_type();
  3963. }
  3964. void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
  3965. clip_image_f32 * audio = new clip_image_f32;
  3966. audio->nx = n_frames;
  3967. audio->ny = n_mel;
  3968. audio->buf.resize(n_frames * n_mel);
  3969. std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
  3970. batch->entries.push_back(clip_image_f32_ptr(audio));
  3971. batch->is_audio = true;
  3972. }