clip.cpp 218 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-alloc.h"
  10. #include "ggml-backend.h"
  11. #include "gguf.h"
  12. #include <cassert>
  13. #include <cmath>
  14. #include <cstdlib>
  15. #include <cstring>
  16. #include <fstream>
  17. #include <map>
  18. #include <stdexcept>
  19. #include <unordered_set>
  20. #include <vector>
  21. #include <cinttypes>
  22. #include <limits>
  23. #include <array>
  24. #include <functional>
  25. struct clip_logger_state g_logger_state = {clip_log_callback_default, NULL};
  26. enum ffn_op_type {
  27. FFN_GELU,
  28. FFN_GELU_ERF,
  29. FFN_SILU,
  30. FFN_GELU_QUICK,
  31. };
  32. enum norm_type {
  33. NORM_TYPE_NORMAL,
  34. NORM_TYPE_RMS,
  35. };
  36. //#define CLIP_DEBUG_FUNCTIONS
  37. #ifdef CLIP_DEBUG_FUNCTIONS
  38. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  39. std::ofstream file(filename, std::ios::binary);
  40. if (!file.is_open()) {
  41. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  42. return;
  43. }
  44. // PPM header: P6 format, width, height, and max color value
  45. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  46. // Write pixel data
  47. for (size_t i = 0; i < img.buf.size(); i += 3) {
  48. // PPM expects binary data in RGB format, which matches our image buffer
  49. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  50. }
  51. file.close();
  52. }
  53. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  54. std::ofstream file(filename, std::ios::binary);
  55. if (!file.is_open()) {
  56. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  57. return;
  58. }
  59. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  60. int bytesPerPixel = 3;
  61. int widthInBytes = img.nx * bytesPerPixel;
  62. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  63. int stride = widthInBytes + paddingAmount;
  64. // Bitmap file header
  65. unsigned char fileHeader[14] = {
  66. 'B','M', // Signature
  67. 0,0,0,0, // Image file size in bytes
  68. 0,0,0,0, // Reserved
  69. 54,0,0,0 // Start of pixel array
  70. };
  71. // Total file size
  72. fileSize = 54 + (stride * img.ny);
  73. fileHeader[2] = (unsigned char)(fileSize);
  74. fileHeader[3] = (unsigned char)(fileSize >> 8);
  75. fileHeader[4] = (unsigned char)(fileSize >> 16);
  76. fileHeader[5] = (unsigned char)(fileSize >> 24);
  77. // Bitmap information header (BITMAPINFOHEADER)
  78. unsigned char infoHeader[40] = {
  79. 40,0,0,0, // Size of this header (40 bytes)
  80. 0,0,0,0, // Image width
  81. 0,0,0,0, // Image height
  82. 1,0, // Number of color planes
  83. 24,0, // Bits per pixel
  84. 0,0,0,0, // No compression
  85. 0,0,0,0, // Image size (can be 0 for no compression)
  86. 0,0,0,0, // X pixels per meter (not specified)
  87. 0,0,0,0, // Y pixels per meter (not specified)
  88. 0,0,0,0, // Total colors (color table not used)
  89. 0,0,0,0 // Important colors (all are important)
  90. };
  91. // Width and height in the information header
  92. infoHeader[4] = (unsigned char)(img.nx);
  93. infoHeader[5] = (unsigned char)(img.nx >> 8);
  94. infoHeader[6] = (unsigned char)(img.nx >> 16);
  95. infoHeader[7] = (unsigned char)(img.nx >> 24);
  96. infoHeader[8] = (unsigned char)(img.ny);
  97. infoHeader[9] = (unsigned char)(img.ny >> 8);
  98. infoHeader[10] = (unsigned char)(img.ny >> 16);
  99. infoHeader[11] = (unsigned char)(img.ny >> 24);
  100. // Write file headers
  101. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  102. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  103. // Pixel data
  104. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  105. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  106. for (int x = 0; x < img.nx; ++x) {
  107. // Each pixel
  108. size_t pixelIndex = (y * img.nx + x) * 3;
  109. unsigned char pixel[3] = {
  110. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  111. img.buf[pixelIndex + 1],
  112. img.buf[pixelIndex]
  113. };
  114. file.write(reinterpret_cast<char*>(pixel), 3);
  115. }
  116. // Write padding for the row
  117. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  118. }
  119. file.close();
  120. }
  121. // debug function to convert f32 to u8
  122. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  123. dst.nx = src.nx;
  124. dst.ny = src.ny;
  125. dst.buf.resize(3 * src.nx * src.ny);
  126. for (size_t i = 0; i < src.buf.size(); ++i) {
  127. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  128. }
  129. }
  130. #endif
  131. //
  132. // clip layers
  133. //
  134. enum patch_merge_type {
  135. PATCH_MERGE_FLAT,
  136. PATCH_MERGE_SPATIAL_UNPAD,
  137. };
  138. struct clip_hparams {
  139. int32_t image_size = 0;
  140. int32_t patch_size = 0;
  141. int32_t n_embd = 0;
  142. int32_t n_ff = 0;
  143. int32_t projection_dim = 0;
  144. int32_t n_head = 0;
  145. int32_t n_layer = 0;
  146. // idefics3
  147. int32_t image_longest_edge = 0;
  148. int32_t image_min_pixels = -1;
  149. int32_t image_max_pixels = -1;
  150. int32_t n_merge = 0; // number of patch merges **per-side**
  151. float image_mean[3];
  152. float image_std[3];
  153. // for models using dynamic image size, we need to have a smaller image size to warmup
  154. // otherwise, user will get OOM everytime they load the model
  155. int32_t warmup_image_size = 0;
  156. int32_t warmup_audio_size = 3000;
  157. ffn_op_type ffn_op = FFN_GELU;
  158. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  159. float eps = 1e-6;
  160. float rope_theta = 0.0;
  161. std::vector<clip_image_size> image_res_candidates; // for llava-uhd style models
  162. int32_t image_crop_resolution;
  163. std::unordered_set<int32_t> vision_feature_layer;
  164. int32_t attn_window_size = 0;
  165. int32_t n_wa_pattern = 0;
  166. // audio
  167. int32_t n_mel_bins = 0; // whisper preprocessor
  168. int32_t proj_stack_factor = 0; // ultravox
  169. // legacy
  170. bool has_llava_projector = false;
  171. int minicpmv_version = 0;
  172. int32_t minicpmv_query_num = 0; // MiniCPM-V query number
  173. // custom value provided by user, can be undefined if not set
  174. int32_t custom_image_min_tokens = -1;
  175. int32_t custom_image_max_tokens = -1;
  176. void set_limit_image_tokens(int n_tokens_min, int n_tokens_max) {
  177. const int cur_merge = n_merge == 0 ? 1 : n_merge;
  178. const int patch_area = patch_size * patch_size * cur_merge * cur_merge;
  179. image_min_pixels = (custom_image_min_tokens > 0 ? custom_image_min_tokens : n_tokens_min) * patch_area;
  180. image_max_pixels = (custom_image_max_tokens > 0 ? custom_image_max_tokens : n_tokens_max) * patch_area;
  181. warmup_image_size = static_cast<int>(std::sqrt(image_max_pixels));
  182. }
  183. void set_warmup_n_tokens(int n_tokens) {
  184. int n_tok_per_side = static_cast<int>(std::sqrt(n_tokens));
  185. GGML_ASSERT(n_tok_per_side * n_tok_per_side == n_tokens && "n_tokens must be n*n");
  186. const int cur_merge = n_merge == 0 ? 1 : n_merge;
  187. warmup_image_size = n_tok_per_side * patch_size * cur_merge;
  188. // TODO: support warmup size for custom token numbers
  189. }
  190. };
  191. struct clip_layer {
  192. // attention
  193. ggml_tensor * k_w = nullptr;
  194. ggml_tensor * k_b = nullptr;
  195. ggml_tensor * q_w = nullptr;
  196. ggml_tensor * q_b = nullptr;
  197. ggml_tensor * v_w = nullptr;
  198. ggml_tensor * v_b = nullptr;
  199. ggml_tensor * qkv_w = nullptr;
  200. ggml_tensor * qkv_b = nullptr;
  201. ggml_tensor * o_w = nullptr;
  202. ggml_tensor * o_b = nullptr;
  203. ggml_tensor * k_norm = nullptr;
  204. ggml_tensor * q_norm = nullptr;
  205. // layernorm 1
  206. ggml_tensor * ln_1_w = nullptr;
  207. ggml_tensor * ln_1_b = nullptr;
  208. ggml_tensor * ff_up_w = nullptr;
  209. ggml_tensor * ff_up_b = nullptr;
  210. ggml_tensor * ff_gate_w = nullptr;
  211. ggml_tensor * ff_gate_b = nullptr;
  212. ggml_tensor * ff_down_w = nullptr;
  213. ggml_tensor * ff_down_b = nullptr;
  214. // layernorm 2
  215. ggml_tensor * ln_2_w = nullptr;
  216. ggml_tensor * ln_2_b = nullptr;
  217. // layer scale (no bias)
  218. ggml_tensor * ls_1_w = nullptr;
  219. ggml_tensor * ls_2_w = nullptr;
  220. // qwen3vl deepstack merger
  221. ggml_tensor * deepstack_norm_w = nullptr;
  222. ggml_tensor * deepstack_norm_b = nullptr;
  223. ggml_tensor * deepstack_fc1_w = nullptr;
  224. ggml_tensor * deepstack_fc1_b = nullptr;
  225. ggml_tensor * deepstack_fc2_w = nullptr;
  226. ggml_tensor * deepstack_fc2_b = nullptr;
  227. bool has_deepstack() const {
  228. return deepstack_fc1_w != nullptr;
  229. }
  230. };
  231. struct clip_model {
  232. clip_modality modality = CLIP_MODALITY_VISION;
  233. projector_type proj_type = PROJECTOR_TYPE_MLP;
  234. clip_hparams hparams;
  235. // embeddings
  236. ggml_tensor * class_embedding = nullptr;
  237. ggml_tensor * patch_embeddings_0 = nullptr;
  238. ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  239. ggml_tensor * patch_bias = nullptr;
  240. ggml_tensor * position_embeddings = nullptr;
  241. ggml_tensor * pre_ln_w = nullptr;
  242. ggml_tensor * pre_ln_b = nullptr;
  243. std::vector<clip_layer> layers;
  244. int32_t n_deepstack_layers = 0; // used by Qwen3-VL, calculated from clip_layer
  245. ggml_tensor * post_ln_w;
  246. ggml_tensor * post_ln_b;
  247. ggml_tensor * projection; // TODO: rename it to fc (fully connected layer)
  248. ggml_tensor * mm_fc_w;
  249. ggml_tensor * mm_fc_b;
  250. // LLaVA projection
  251. ggml_tensor * mm_input_norm_w = nullptr;
  252. ggml_tensor * mm_input_norm_b = nullptr;
  253. ggml_tensor * mm_0_w = nullptr;
  254. ggml_tensor * mm_0_b = nullptr;
  255. ggml_tensor * mm_2_w = nullptr;
  256. ggml_tensor * mm_2_b = nullptr;
  257. ggml_tensor * image_newline = nullptr;
  258. // Yi type models with mlp+normalization projection
  259. ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  260. ggml_tensor * mm_1_b = nullptr;
  261. ggml_tensor * mm_3_w = nullptr;
  262. ggml_tensor * mm_3_b = nullptr;
  263. ggml_tensor * mm_4_w = nullptr;
  264. ggml_tensor * mm_4_b = nullptr;
  265. // GLMV-Edge projection
  266. ggml_tensor * mm_model_adapter_conv_w = nullptr;
  267. ggml_tensor * mm_model_adapter_conv_b = nullptr;
  268. // MobileVLM projection
  269. ggml_tensor * mm_model_mlp_1_w = nullptr;
  270. ggml_tensor * mm_model_mlp_1_b = nullptr;
  271. ggml_tensor * mm_model_mlp_3_w = nullptr;
  272. ggml_tensor * mm_model_mlp_3_b = nullptr;
  273. ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  274. ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  275. ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  276. ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  277. ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  278. ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  279. ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  280. ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  281. ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  282. ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  283. ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  284. ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  285. ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  286. ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  287. ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  288. ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  289. ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  290. ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  291. ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  292. ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  293. // MobileVLM_V2 projection
  294. ggml_tensor * mm_model_mlp_0_w = nullptr;
  295. ggml_tensor * mm_model_mlp_0_b = nullptr;
  296. ggml_tensor * mm_model_mlp_2_w = nullptr;
  297. ggml_tensor * mm_model_mlp_2_b = nullptr;
  298. ggml_tensor * mm_model_peg_0_w = nullptr;
  299. ggml_tensor * mm_model_peg_0_b = nullptr;
  300. // MINICPMV projection
  301. ggml_tensor * mm_model_pos_embed_k = nullptr;
  302. ggml_tensor * mm_model_query = nullptr;
  303. ggml_tensor * mm_model_proj = nullptr;
  304. ggml_tensor * mm_model_kv_proj = nullptr;
  305. ggml_tensor * mm_model_attn_q_w = nullptr;
  306. ggml_tensor * mm_model_attn_q_b = nullptr;
  307. ggml_tensor * mm_model_attn_k_w = nullptr;
  308. ggml_tensor * mm_model_attn_k_b = nullptr;
  309. ggml_tensor * mm_model_attn_v_w = nullptr;
  310. ggml_tensor * mm_model_attn_v_b = nullptr;
  311. ggml_tensor * mm_model_attn_o_w = nullptr;
  312. ggml_tensor * mm_model_attn_o_b = nullptr;
  313. ggml_tensor * mm_model_ln_q_w = nullptr;
  314. ggml_tensor * mm_model_ln_q_b = nullptr;
  315. ggml_tensor * mm_model_ln_kv_w = nullptr;
  316. ggml_tensor * mm_model_ln_kv_b = nullptr;
  317. ggml_tensor * mm_model_ln_post_w = nullptr;
  318. ggml_tensor * mm_model_ln_post_b = nullptr;
  319. // gemma3
  320. ggml_tensor * mm_input_proj_w = nullptr;
  321. ggml_tensor * mm_soft_emb_norm_w = nullptr;
  322. // pixtral
  323. ggml_tensor * token_embd_img_break = nullptr;
  324. ggml_tensor * mm_patch_merger_w = nullptr;
  325. // ultravox / whisper encoder
  326. ggml_tensor * conv1d_1_w = nullptr;
  327. ggml_tensor * conv1d_1_b = nullptr;
  328. ggml_tensor * conv1d_2_w = nullptr;
  329. ggml_tensor * conv1d_2_b = nullptr;
  330. ggml_tensor * mm_norm_pre_w = nullptr;
  331. ggml_tensor * mm_norm_mid_w = nullptr;
  332. // cogvlm
  333. ggml_tensor * mm_post_fc_norm_w = nullptr;
  334. ggml_tensor * mm_post_fc_norm_b = nullptr;
  335. ggml_tensor * mm_h_to_4h_w = nullptr;
  336. ggml_tensor * mm_gate_w = nullptr;
  337. ggml_tensor * mm_4h_to_h_w = nullptr;
  338. ggml_tensor * mm_boi = nullptr;
  339. ggml_tensor * mm_eoi = nullptr;
  340. bool audio_has_avgpool() const {
  341. return proj_type == PROJECTOR_TYPE_QWEN2A
  342. || proj_type == PROJECTOR_TYPE_VOXTRAL;
  343. }
  344. bool audio_has_stack_frames() const {
  345. return proj_type == PROJECTOR_TYPE_ULTRAVOX
  346. || proj_type == PROJECTOR_TYPE_VOXTRAL;
  347. }
  348. };
  349. struct clip_ctx {
  350. clip_model model;
  351. gguf_context_ptr ctx_gguf;
  352. ggml_context_ptr ctx_data;
  353. std::vector<uint8_t> buf_compute_meta;
  354. std::vector<ggml_backend_t> backend_ptrs;
  355. std::vector<ggml_backend_buffer_type_t> backend_buft;
  356. ggml_backend_t backend = nullptr;
  357. ggml_backend_t backend_cpu = nullptr;
  358. ggml_backend_buffer_ptr buf;
  359. int max_nodes = 8192;
  360. ggml_backend_sched_ptr sched;
  361. clip_flash_attn_type flash_attn_type = CLIP_FLASH_ATTN_TYPE_AUTO;
  362. bool is_allocated = false;
  363. // for debugging
  364. bool debug_graph = false;
  365. std::vector<ggml_tensor *> debug_print_tensors;
  366. clip_ctx(clip_context_params & ctx_params) {
  367. flash_attn_type = ctx_params.flash_attn_type;
  368. debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
  369. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  370. if (!backend_cpu) {
  371. throw std::runtime_error("failed to initialize CPU backend");
  372. }
  373. if (ctx_params.use_gpu) {
  374. auto backend_name = std::getenv("MTMD_BACKEND_DEVICE");
  375. if (backend_name != nullptr) {
  376. backend = ggml_backend_init_by_name(backend_name, nullptr);
  377. if (!backend) {
  378. LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name);
  379. }
  380. }
  381. if (!backend) {
  382. backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
  383. backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr);
  384. }
  385. }
  386. if (backend) {
  387. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  388. backend_ptrs.push_back(backend);
  389. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  390. } else {
  391. backend = backend_cpu;
  392. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  393. }
  394. if (ctx_params.image_min_tokens > 0) {
  395. model.hparams.custom_image_min_tokens = ctx_params.image_min_tokens;
  396. }
  397. if (ctx_params.image_max_tokens > 0) {
  398. model.hparams.custom_image_max_tokens = ctx_params.image_max_tokens;
  399. }
  400. backend_ptrs.push_back(backend_cpu);
  401. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  402. sched.reset(
  403. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  404. );
  405. }
  406. ~clip_ctx() {
  407. ggml_backend_free(backend);
  408. if (backend != backend_cpu) {
  409. ggml_backend_free(backend_cpu);
  410. }
  411. }
  412. // this function is added so that we don't change too much of the existing code
  413. projector_type proj_type() const {
  414. return model.proj_type;
  415. }
  416. };
  417. struct clip_graph {
  418. clip_ctx * ctx;
  419. const clip_model & model;
  420. const clip_hparams & hparams;
  421. // we only support single image per batch
  422. const clip_image_f32 & img;
  423. const int patch_size;
  424. const int n_patches_x;
  425. const int n_patches_y;
  426. const int n_patches;
  427. const int n_embd;
  428. const int n_head;
  429. const int d_head;
  430. const int n_layer;
  431. const float eps;
  432. const float kq_scale;
  433. ggml_context_ptr ctx0_ptr;
  434. ggml_context * ctx0;
  435. ggml_cgraph * gf;
  436. clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  437. ctx(ctx),
  438. model(ctx->model),
  439. hparams(model.hparams),
  440. img(img),
  441. patch_size(hparams.patch_size),
  442. n_patches_x(img.nx / patch_size),
  443. n_patches_y(img.ny / patch_size),
  444. n_patches(n_patches_x * n_patches_y),
  445. n_embd(hparams.n_embd),
  446. n_head(hparams.n_head),
  447. d_head(n_embd / n_head),
  448. n_layer(hparams.n_layer),
  449. eps(hparams.eps),
  450. kq_scale(1.0f / sqrtf((float)d_head)) {
  451. struct ggml_init_params params = {
  452. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  453. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  454. /*.no_alloc =*/ true,
  455. };
  456. ctx0_ptr.reset(ggml_init(params));
  457. ctx0 = ctx0_ptr.get();
  458. gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
  459. }
  460. ggml_cgraph * build_siglip() {
  461. ggml_tensor * inp = build_inp();
  462. ggml_tensor * learned_pos_embd = model.position_embeddings;
  463. if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
  464. learned_pos_embd = resize_position_embeddings();
  465. }
  466. ggml_tensor * cur = build_vit(
  467. inp, n_patches,
  468. NORM_TYPE_NORMAL,
  469. hparams.ffn_op,
  470. learned_pos_embd,
  471. nullptr);
  472. if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) {
  473. const int batch_size = 1;
  474. GGML_ASSERT(n_patches_x == n_patches_y);
  475. const int patches_per_image = n_patches_x;
  476. const int kernel_size = hparams.n_merge;
  477. cur = ggml_transpose(ctx0, cur);
  478. cur = ggml_cont_4d(ctx0, cur, patches_per_image, patches_per_image, n_embd, batch_size);
  479. // doing a pool2d to reduce the number of output tokens
  480. cur = ggml_pool_2d(ctx0, cur, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  481. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0] * cur->ne[0], n_embd, batch_size);
  482. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  483. // apply norm before projection
  484. cur = ggml_rms_norm(ctx0, cur, eps);
  485. cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
  486. // apply projection
  487. cur = ggml_mul_mat(ctx0,
  488. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  489. cur);
  490. } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
  491. // pixel_shuffle
  492. // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
  493. const int scale_factor = model.hparams.n_merge;
  494. cur = build_patch_merge_permute(cur, scale_factor);
  495. cur = ggml_mul_mat(ctx0, model.projection, cur);
  496. } else if (ctx->proj_type() == PROJECTOR_TYPE_LFM2) {
  497. // pixel unshuffle block
  498. const int scale_factor = model.hparams.n_merge;
  499. cur = build_patch_merge_permute(cur, scale_factor);
  500. // projection
  501. cur = ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
  502. cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
  503. cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
  504. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  505. cur = ggml_add(ctx0, cur, model.mm_1_b);
  506. cur = ggml_gelu(ctx0, cur);
  507. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  508. cur = ggml_add(ctx0, cur, model.mm_2_b);
  509. } else if (ctx->proj_type() == PROJECTOR_TYPE_JANUS_PRO) {
  510. cur = build_ffn(cur,
  511. model.mm_0_w, model.mm_0_b,
  512. nullptr, nullptr,
  513. model.mm_1_w, model.mm_1_b,
  514. hparams.ffn_op,
  515. -1);
  516. } else {
  517. GGML_ABORT("SigLIP: Unsupported projector type");
  518. }
  519. // build the graph
  520. ggml_build_forward_expand(gf, cur);
  521. return gf;
  522. }
  523. ggml_cgraph * build_pixtral() {
  524. const int n_merge = hparams.n_merge;
  525. // 2D input positions
  526. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  527. ggml_set_name(pos_h, "pos_h");
  528. ggml_set_input(pos_h);
  529. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  530. ggml_set_name(pos_w, "pos_w");
  531. ggml_set_input(pos_w);
  532. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  533. return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta, true);
  534. };
  535. ggml_tensor * inp = build_inp();
  536. ggml_tensor * cur = build_vit(
  537. inp, n_patches,
  538. NORM_TYPE_RMS,
  539. hparams.ffn_op,
  540. nullptr, // no learned pos embd
  541. add_pos);
  542. // mistral small 3.1 patch merger
  543. // ref: https://github.com/huggingface/transformers/blob/7a3e208892c06a5e278144eaf38c8599a42f53e7/src/transformers/models/mistral3/modeling_mistral3.py#L67
  544. if (model.mm_patch_merger_w) {
  545. GGML_ASSERT(hparams.n_merge > 0);
  546. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.mm_input_norm_w);
  547. // reshape image tokens to 2D grid
  548. cur = ggml_reshape_3d(ctx0, cur, n_embd, n_patches_x, n_patches_y);
  549. cur = ggml_permute(ctx0, cur, 2, 0, 1, 3); // [x, y, n_embd]
  550. cur = ggml_cont(ctx0, cur);
  551. // torch.nn.functional.unfold is just an im2col under the hood
  552. // we just need a dummy kernel to make it work
  553. ggml_tensor * kernel = ggml_view_3d(ctx0, cur, n_merge, n_merge, cur->ne[2], 0, 0, 0);
  554. cur = ggml_im2col(ctx0, kernel, cur, n_merge, n_merge, 0, 0, 1, 1, true, inp->type);
  555. // project to n_embd
  556. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  557. cur = ggml_mul_mat(ctx0, model.mm_patch_merger_w, cur);
  558. }
  559. // LlavaMultiModalProjector (always using GELU activation)
  560. {
  561. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  562. if (model.mm_1_b) {
  563. cur = ggml_add(ctx0, cur, model.mm_1_b);
  564. }
  565. cur = ggml_gelu(ctx0, cur);
  566. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  567. if (model.mm_2_b) {
  568. cur = ggml_add(ctx0, cur, model.mm_2_b);
  569. }
  570. }
  571. // arrangement of the [IMG_BREAK] token
  572. if (model.token_embd_img_break) {
  573. // not efficient, but works
  574. // the trick is to view the embeddings as a 3D tensor with shape [n_embd, n_patches_per_row, n_rows]
  575. // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
  576. // after the concatenation, we have a tensor with shape [n_embd, n_patches_per_row + 1, n_rows]
  577. const int p_y = n_merge > 0 ? n_patches_y / n_merge : n_patches_y;
  578. const int p_x = n_merge > 0 ? n_patches_x / n_merge : n_patches_x;
  579. const int p_total = p_x * p_y;
  580. const int n_embd_text = cur->ne[0];
  581. const int n_tokens_output = p_total + p_y - 1; // one [IMG_BREAK] per row, except the last row
  582. ggml_tensor * tmp = ggml_reshape_3d(ctx0, cur, n_embd_text, p_x, p_y);
  583. ggml_tensor * tok = ggml_new_tensor_3d(ctx0, tmp->type, n_embd_text, 1, p_y);
  584. tok = ggml_scale(ctx0, tok, 0.0); // clear the tensor
  585. tok = ggml_add(ctx0, tok, model.token_embd_img_break);
  586. tmp = ggml_concat(ctx0, tmp, tok, 1);
  587. cur = ggml_view_2d(ctx0, tmp,
  588. n_embd_text, n_tokens_output,
  589. ggml_row_size(tmp->type, n_embd_text), 0);
  590. }
  591. // build the graph
  592. ggml_build_forward_expand(gf, cur);
  593. return gf;
  594. }
  595. // Qwen2VL and Qwen2.5VL use M-RoPE
  596. ggml_cgraph * build_qwen2vl() {
  597. GGML_ASSERT(model.patch_bias == nullptr);
  598. GGML_ASSERT(model.class_embedding == nullptr);
  599. const int batch_size = 1;
  600. const bool use_window_attn = hparams.n_wa_pattern > 0;
  601. const int n_wa_pattern = hparams.n_wa_pattern;
  602. const int n_pos = n_patches;
  603. const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
  604. norm_type norm_t = ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
  605. ? NORM_TYPE_RMS // qwen 2.5 vl
  606. : NORM_TYPE_NORMAL; // qwen 2 vl
  607. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  608. ggml_tensor * inp_raw = build_inp_raw();
  609. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  610. GGML_ASSERT(img.nx % (patch_size * 2) == 0);
  611. GGML_ASSERT(img.ny % (patch_size * 2) == 0);
  612. // second conv dimension
  613. {
  614. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  615. inp = ggml_add(ctx0, inp, inp_1);
  616. inp = ggml_permute(ctx0, inp, 1, 2, 0, 3); // [w, h, c, b] -> [c, w, h, b]
  617. inp = ggml_cont_4d(
  618. ctx0, inp,
  619. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  620. inp = ggml_reshape_4d(
  621. ctx0, inp,
  622. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  623. inp = ggml_permute(ctx0, inp, 0, 2, 1, 3);
  624. inp = ggml_cont_3d(
  625. ctx0, inp,
  626. n_embd, n_patches_x * n_patches_y, batch_size);
  627. }
  628. ggml_tensor * inpL = inp;
  629. ggml_tensor * window_mask = nullptr;
  630. ggml_tensor * window_idx = nullptr;
  631. ggml_tensor * inv_window_idx = nullptr;
  632. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  633. ggml_set_name(positions, "positions");
  634. ggml_set_input(positions);
  635. // pre-layernorm
  636. if (model.pre_ln_w) {
  637. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  638. }
  639. if (use_window_attn) {
  640. // handle window attention inputs
  641. inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  642. ggml_set_name(inv_window_idx, "inv_window_idx");
  643. ggml_set_input(inv_window_idx);
  644. // mask for window attention
  645. window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_pos, n_pos);
  646. ggml_set_name(window_mask, "window_mask");
  647. ggml_set_input(window_mask);
  648. // if flash attn is used, we need to pad the mask and cast to f16
  649. if (ctx->flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  650. window_mask = ggml_cast(ctx0, window_mask, GGML_TYPE_F16);
  651. }
  652. // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  653. GGML_ASSERT(batch_size == 1);
  654. inpL = ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
  655. inpL = ggml_get_rows(ctx0, inpL, inv_window_idx);
  656. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
  657. }
  658. // loop over layers
  659. for (int il = 0; il < n_layer; il++) {
  660. const auto & layer = model.layers[il];
  661. const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
  662. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  663. // layernorm1
  664. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  665. cb(cur, "ln1", il);
  666. // self-attention
  667. {
  668. ggml_tensor * Qcur = ggml_add(ctx0,
  669. ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
  670. ggml_tensor * Kcur = ggml_add(ctx0,
  671. ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
  672. ggml_tensor * Vcur = ggml_add(ctx0,
  673. ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
  674. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
  675. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
  676. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
  677. cb(Qcur, "Qcur", il);
  678. cb(Kcur, "Kcur", il);
  679. cb(Vcur, "Vcur", il);
  680. // apply M-RoPE
  681. Qcur = ggml_rope_multi(
  682. ctx0, Qcur, positions, nullptr,
  683. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  684. Kcur = ggml_rope_multi(
  685. ctx0, Kcur, positions, nullptr,
  686. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  687. cb(Qcur, "Qcur_rope", il);
  688. cb(Kcur, "Kcur_rope", il);
  689. ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
  690. cur = build_attn(layer.o_w, layer.o_b,
  691. Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
  692. cb(cur, "attn_out", il);
  693. }
  694. // re-add the layer input, e.g., residual
  695. cur = ggml_add(ctx0, cur, inpL);
  696. inpL = cur; // inpL = residual, cur = hidden_states
  697. cb(cur, "ffn_inp", il);
  698. // layernorm2
  699. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  700. cb(cur, "ffn_inp_normed", il);
  701. // ffn
  702. cur = build_ffn(cur,
  703. layer.ff_up_w, layer.ff_up_b,
  704. layer.ff_gate_w, layer.ff_gate_b,
  705. layer.ff_down_w, layer.ff_down_b,
  706. hparams.ffn_op, il);
  707. cb(cur, "ffn_out", il);
  708. // residual 2
  709. cur = ggml_add(ctx0, inpL, cur);
  710. cb(cur, "layer_out", il);
  711. inpL = cur;
  712. }
  713. // post-layernorm
  714. if (model.post_ln_w) {
  715. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
  716. }
  717. // multimodal projection
  718. ggml_tensor * embeddings = inpL;
  719. embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
  720. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  721. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  722. // GELU activation
  723. embeddings = ggml_gelu(ctx0, embeddings);
  724. // Second linear layer
  725. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  726. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  727. if (use_window_attn) {
  728. window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
  729. ggml_set_name(window_idx, "window_idx");
  730. ggml_set_input(window_idx);
  731. // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
  732. GGML_ASSERT(batch_size == 1);
  733. embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
  734. embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
  735. embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
  736. }
  737. // build the graph
  738. ggml_build_forward_expand(gf, embeddings);
  739. return gf;
  740. }
  741. // Qwen3VL
  742. ggml_cgraph * build_qwen3vl() {
  743. GGML_ASSERT(model.patch_bias != nullptr);
  744. GGML_ASSERT(model.position_embeddings != nullptr);
  745. GGML_ASSERT(model.class_embedding == nullptr);
  746. const int batch_size = 1;
  747. const int n_pos = n_patches;
  748. const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
  749. norm_type norm_t = NORM_TYPE_NORMAL;
  750. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  751. ggml_tensor * inp_raw = build_inp_raw();
  752. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  753. GGML_ASSERT(img.nx % (patch_size * 2) == 0);
  754. GGML_ASSERT(img.ny % (patch_size * 2) == 0);
  755. // second conv dimension
  756. {
  757. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  758. inp = ggml_add(ctx0, inp, inp_1);
  759. inp = ggml_permute(ctx0, inp, 1, 2, 0, 3); // [w, h, c, b] -> [c, w, h, b]
  760. inp = ggml_cont_4d(
  761. ctx0, inp,
  762. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  763. inp = ggml_reshape_4d(
  764. ctx0, inp,
  765. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  766. inp = ggml_permute(ctx0, inp, 0, 2, 1, 3);
  767. inp = ggml_cont_3d(
  768. ctx0, inp,
  769. n_embd, n_patches_x * n_patches_y, batch_size);
  770. }
  771. // add patch bias
  772. if (model.patch_bias != nullptr) {
  773. inp = ggml_add(ctx0, inp, model.patch_bias);
  774. cb(inp, "patch_bias", -1);
  775. }
  776. // calculate absolute position embedding and apply
  777. ggml_tensor * learned_pos_embd = resize_position_embeddings();
  778. learned_pos_embd = ggml_cont_4d(
  779. ctx0, learned_pos_embd,
  780. n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
  781. learned_pos_embd = ggml_reshape_4d(
  782. ctx0, learned_pos_embd,
  783. n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
  784. learned_pos_embd = ggml_permute(ctx0, learned_pos_embd, 0, 2, 1, 3);
  785. learned_pos_embd = ggml_cont_3d(
  786. ctx0, learned_pos_embd,
  787. n_embd, n_patches_x * n_patches_y, batch_size);
  788. inp = ggml_add(ctx0, inp, learned_pos_embd);
  789. cb(inp, "inp_pos_emb", -1);
  790. ggml_tensor * inpL = inp;
  791. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  792. ggml_set_name(positions, "positions");
  793. ggml_set_input(positions);
  794. // pre-layernorm
  795. if (model.pre_ln_w) {
  796. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  797. }
  798. // deepstack features (stack along the feature dimension), [n_embd * len(deepstack_layers), n_patches_x * n_patches_y, batch_size]
  799. ggml_tensor * deepstack_features = nullptr;
  800. const int merge_factor = hparams.n_merge > 0 ? hparams.n_merge * hparams.n_merge : 4; // default 2x2=4 for qwen3vl
  801. // loop over layers
  802. for (int il = 0; il < n_layer; il++) {
  803. auto & layer = model.layers[il];
  804. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  805. // layernorm1
  806. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  807. cb(cur, "ln1", il);
  808. // self-attention
  809. {
  810. cur = ggml_mul_mat(ctx0, layer.qkv_w, cur);
  811. cur = ggml_add(ctx0, cur, layer.qkv_b);
  812. ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  813. /* nb1 */ ggml_row_size(cur->type, d_head),
  814. /* nb2 */ cur->nb[1],
  815. /* offset */ 0);
  816. ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  817. /* nb1 */ ggml_row_size(cur->type, d_head),
  818. /* nb2 */ cur->nb[1],
  819. /* offset */ ggml_row_size(cur->type, n_embd));
  820. ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  821. /* nb1 */ ggml_row_size(cur->type, d_head),
  822. /* nb2 */ cur->nb[1],
  823. /* offset */ ggml_row_size(cur->type, 2 * n_embd));
  824. cb(Qcur, "Qcur", il);
  825. cb(Kcur, "Kcur", il);
  826. cb(Vcur, "Vcur", il);
  827. // apply M-RoPE
  828. Qcur = ggml_rope_multi(
  829. ctx0, Qcur, positions, nullptr,
  830. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  831. Kcur = ggml_rope_multi(
  832. ctx0, Kcur, positions, nullptr,
  833. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  834. cb(Qcur, "Qcur_rope", il);
  835. cb(Kcur, "Kcur_rope", il);
  836. cur = build_attn(layer.o_w, layer.o_b,
  837. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  838. cb(cur, "attn_out", il);
  839. }
  840. // re-add the layer input, e.g., residual
  841. cur = ggml_add(ctx0, cur, inpL);
  842. inpL = cur; // inpL = residual, cur = hidden_states
  843. cb(cur, "ffn_inp", il);
  844. // layernorm2
  845. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  846. cb(cur, "ffn_inp_normed", il);
  847. // ffn
  848. cur = build_ffn(cur,
  849. layer.ff_up_w, layer.ff_up_b,
  850. layer.ff_gate_w, layer.ff_gate_b,
  851. layer.ff_down_w, layer.ff_down_b,
  852. hparams.ffn_op, il);
  853. cb(cur, "ffn_out", il);
  854. // residual 2
  855. cur = ggml_add(ctx0, inpL, cur);
  856. cb(cur, "layer_out", il);
  857. if (layer.has_deepstack()) {
  858. ggml_tensor * feat = ggml_reshape_3d(ctx0, cur, n_embd * merge_factor, n_pos / merge_factor, batch_size);
  859. feat = build_norm(feat, layer.deepstack_norm_w, layer.deepstack_norm_b, norm_t, eps, il);
  860. feat = build_ffn(feat,
  861. layer.deepstack_fc1_w, layer.deepstack_fc1_b,
  862. nullptr, nullptr,
  863. layer.deepstack_fc2_w, layer.deepstack_fc2_b,
  864. ffn_op_type::FFN_GELU, il);
  865. if(!deepstack_features) {
  866. deepstack_features = feat;
  867. } else {
  868. // concat along the feature dimension
  869. deepstack_features = ggml_concat(ctx0, deepstack_features, feat, 0);
  870. }
  871. }
  872. inpL = cur;
  873. }
  874. // post-layernorm
  875. if (model.post_ln_w) {
  876. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
  877. }
  878. // multimodal projection
  879. ggml_tensor * embeddings = inpL;
  880. embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
  881. embeddings = build_ffn(embeddings,
  882. model.mm_0_w, model.mm_0_b,
  883. nullptr, nullptr,
  884. model.mm_1_w, model.mm_1_b,
  885. ffn_op_type::FFN_GELU, -1);
  886. embeddings = ggml_concat(ctx0, embeddings, deepstack_features, 0); // concat along the feature dimension
  887. // build the graph
  888. ggml_build_forward_expand(gf, embeddings);
  889. return gf;
  890. }
  891. ggml_cgraph * build_minicpmv() {
  892. GGML_ASSERT(model.class_embedding == nullptr);
  893. const int n_pos = n_patches;
  894. const int n_embd_proj = clip_n_mmproj_embd(ctx);
  895. // position embeddings for the projector (not for ViT)
  896. // see: https://huggingface.co/openbmb/MiniCPM-o-2_6/blob/main/resampler.py#L70
  897. // base frequency omega
  898. ggml_tensor * omega = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_embd_proj / 4);
  899. ggml_set_name(omega, "omega");
  900. ggml_set_input(omega);
  901. // 2D input positions (using float for sinusoidal embeddings)
  902. ggml_tensor * pos_h = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_pos);
  903. ggml_set_name(pos_h, "pos_h");
  904. ggml_set_input(pos_h);
  905. ggml_tensor * pos_w = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_pos);
  906. ggml_set_name(pos_w, "pos_w");
  907. ggml_set_input(pos_w);
  908. // for selecting learned pos embd, used by ViT
  909. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  910. ggml_set_name(positions, "positions");
  911. ggml_set_input(positions);
  912. ggml_tensor * learned_pos_embd = ggml_get_rows(ctx0, model.position_embeddings, positions);
  913. ggml_tensor * inp = build_inp();
  914. ggml_tensor * embeddings = build_vit(
  915. inp, n_pos,
  916. NORM_TYPE_NORMAL,
  917. hparams.ffn_op,
  918. learned_pos_embd,
  919. nullptr);
  920. // resampler projector (it is just another transformer)
  921. ggml_tensor * q = model.mm_model_query;
  922. ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  923. // norm
  924. q = build_norm(q, model.mm_model_ln_q_w, model.mm_model_ln_q_b, NORM_TYPE_NORMAL, eps, -1);
  925. v = build_norm(v, model.mm_model_ln_kv_w, model.mm_model_ln_kv_b, NORM_TYPE_NORMAL, eps, -1);
  926. // calculate sinusoidal pos embd
  927. ggml_tensor * pos_embed = nullptr;
  928. {
  929. // outer product
  930. ggml_tensor * omega_b = ggml_repeat_4d(ctx0, omega, omega->ne[0], n_pos, 1, 1); // n_pos rows
  931. ggml_tensor * theta_x = ggml_mul(ctx0, omega_b, pos_w);
  932. ggml_tensor * theta_y = ggml_mul(ctx0, omega_b, pos_h);
  933. // sin and cos
  934. ggml_tensor * pos_embd_x = ggml_concat(
  935. ctx0,
  936. ggml_sin(ctx0, theta_x),
  937. ggml_cos(ctx0, theta_x),
  938. 0 // concat on first dim
  939. );
  940. ggml_tensor * pos_embd_y = ggml_concat(
  941. ctx0,
  942. ggml_sin(ctx0, theta_y),
  943. ggml_cos(ctx0, theta_y),
  944. 0 // concat on first dim
  945. );
  946. pos_embed = ggml_concat(ctx0, pos_embd_x, pos_embd_y, 0);
  947. }
  948. // k = v + pos_embed
  949. ggml_tensor * k = ggml_add(ctx0, v, pos_embed);
  950. // attention
  951. {
  952. const int d_head = 128;
  953. int n_head = n_embd_proj/d_head;
  954. // Use actual config value if available, otherwise fall back to hardcoded values
  955. int num_query = ctx->model.hparams.minicpmv_query_num;
  956. ggml_tensor * Q = ggml_add(ctx0,
  957. ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q),
  958. model.mm_model_attn_q_b);
  959. ggml_tensor * K = ggml_add(ctx0,
  960. ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k),
  961. model.mm_model_attn_k_b);
  962. ggml_tensor * V = ggml_add(ctx0,
  963. ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v),
  964. model.mm_model_attn_v_b);
  965. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_query);
  966. K = ggml_reshape_3d(ctx0, K, d_head, n_head, n_pos);
  967. V = ggml_reshape_3d(ctx0, V, d_head, n_head, n_pos);
  968. cb(Q, "resampler_Q", -1);
  969. cb(K, "resampler_K", -1);
  970. cb(V, "resampler_V", -1);
  971. float resampler_kq_scale = 1.0f/ sqrtf(float(d_head));
  972. embeddings = build_attn(
  973. model.mm_model_attn_o_w,
  974. model.mm_model_attn_o_b,
  975. Q, K, V, nullptr, resampler_kq_scale, -1);
  976. cb(embeddings, "resampler_attn_out", -1);
  977. }
  978. // layernorm
  979. embeddings = build_norm(embeddings, model.mm_model_ln_post_w, model.mm_model_ln_post_b, NORM_TYPE_NORMAL, eps, -1);
  980. // projection
  981. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  982. // build the graph
  983. ggml_build_forward_expand(gf, embeddings);
  984. return gf;
  985. }
  986. ggml_cgraph * build_internvl() {
  987. GGML_ASSERT(model.class_embedding != nullptr);
  988. GGML_ASSERT(model.position_embeddings != nullptr);
  989. const int n_pos = n_patches + 1;
  990. ggml_tensor * inp = build_inp();
  991. // add CLS token
  992. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  993. // The larger models use a different ViT, which uses RMS norm instead of layer norm
  994. // ref: https://github.com/ggml-org/llama.cpp/pull/13443#issuecomment-2869786188
  995. norm_type norm_t = (hparams.n_embd == 3200 && hparams.n_layer == 45)
  996. ? NORM_TYPE_RMS // 6B ViT (Used by InternVL 2.5/3 - 26B, 38B, 78B)
  997. : NORM_TYPE_NORMAL; // 300M ViT (Used by all smaller InternVL models)
  998. ggml_tensor * cur = build_vit(
  999. inp, n_pos,
  1000. norm_t,
  1001. hparams.ffn_op,
  1002. model.position_embeddings,
  1003. nullptr);
  1004. // remove CLS token
  1005. cur = ggml_view_2d(ctx0, cur,
  1006. n_embd, n_patches,
  1007. ggml_row_size(cur->type, n_embd), 0);
  1008. // pixel shuffle
  1009. {
  1010. const int scale_factor = model.hparams.n_merge;
  1011. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  1012. const int height = n_patches_y;
  1013. const int width = n_patches_x;
  1014. GGML_ASSERT(scale_factor > 0);
  1015. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, height / scale_factor, width, bsz);
  1016. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1017. cur = ggml_cont_4d(ctx0, cur,
  1018. n_embd * scale_factor * scale_factor,
  1019. height / scale_factor,
  1020. width / scale_factor,
  1021. bsz);
  1022. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1023. // flatten to 2D
  1024. cur = ggml_cont_2d(ctx0, cur,
  1025. n_embd * scale_factor * scale_factor,
  1026. cur->ne[1] * cur->ne[2]);
  1027. }
  1028. // projector (always using GELU activation)
  1029. {
  1030. // projector LayerNorm uses pytorch's default eps = 1e-5
  1031. // ref: https://huggingface.co/OpenGVLab/InternVL3-8B-Instruct/blob/a34d3e4e129a5856abfd6aa6de79776484caa14e/modeling_internvl_chat.py#L79
  1032. cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
  1033. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1034. cur = ggml_add(ctx0, cur, model.mm_1_b);
  1035. cur = ggml_gelu(ctx0, cur);
  1036. cur = ggml_mul_mat(ctx0, model.mm_3_w, cur);
  1037. cur = ggml_add(ctx0, cur, model.mm_3_b);
  1038. }
  1039. // build the graph
  1040. ggml_build_forward_expand(gf, cur);
  1041. return gf;
  1042. }
  1043. ggml_cgraph * build_llama4() {
  1044. GGML_ASSERT(model.class_embedding != nullptr);
  1045. GGML_ASSERT(model.position_embeddings != nullptr);
  1046. const int n_pos = n_patches + 1; // +1 for [CLS]
  1047. // 2D input positions
  1048. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  1049. ggml_set_name(pos_h, "pos_h");
  1050. ggml_set_input(pos_h);
  1051. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  1052. ggml_set_name(pos_w, "pos_w");
  1053. ggml_set_input(pos_w);
  1054. ggml_tensor * inp = build_inp_raw();
  1055. // Llama4UnfoldConvolution
  1056. {
  1057. ggml_tensor * kernel = ggml_reshape_4d(ctx0, model.patch_embeddings_0,
  1058. patch_size, patch_size, 3, n_embd);
  1059. inp = ggml_im2col(ctx0, kernel, inp, patch_size, patch_size, 0, 0, 1, 1, true, inp->type);
  1060. inp = ggml_mul_mat(ctx0, model.patch_embeddings_0, inp);
  1061. inp = ggml_reshape_2d(ctx0, inp, n_embd, n_patches);
  1062. cb(inp, "patch_conv", -1);
  1063. }
  1064. // add CLS token
  1065. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  1066. // build ViT with 2D position embeddings
  1067. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  1068. // first half is X axis and second half is Y axis
  1069. // ref: https://github.com/huggingface/transformers/blob/40a493c7ed4f19f08eadb0639cf26d49bfa5e180/src/transformers/models/llama4/modeling_llama4.py#L1312
  1070. // ref: https://github.com/Blaizzy/mlx-vlm/blob/a57156aa87b33cca6e5ee6cfc14dd4ef8f611be6/mlx_vlm/models/llama4/vision.py#L441
  1071. return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
  1072. };
  1073. ggml_tensor * cur = build_vit(
  1074. inp, n_pos,
  1075. NORM_TYPE_NORMAL,
  1076. hparams.ffn_op,
  1077. model.position_embeddings,
  1078. add_pos);
  1079. // remove CLS token
  1080. cur = ggml_view_2d(ctx0, cur,
  1081. n_embd, n_patches,
  1082. ggml_row_size(cur->type, n_embd), 0);
  1083. // pixel shuffle
  1084. // based on Llama4VisionPixelShuffleMLP
  1085. // https://github.com/huggingface/transformers/blob/2932f318a20d9e54cc7aea052e040164d85de7d6/src/transformers/models/llama4/modeling_llama4.py#L1151
  1086. {
  1087. const int scale_factor = model.hparams.n_merge;
  1088. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  1089. GGML_ASSERT(scale_factor > 0);
  1090. GGML_ASSERT(n_patches_x == n_patches_y); // llama4 only supports square images
  1091. cur = ggml_reshape_4d(ctx0, cur,
  1092. n_embd * scale_factor,
  1093. n_patches_x / scale_factor,
  1094. n_patches_y,
  1095. bsz);
  1096. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1097. cur = ggml_cont_4d(ctx0, cur,
  1098. n_embd * scale_factor * scale_factor,
  1099. n_patches_x / scale_factor,
  1100. n_patches_y / scale_factor,
  1101. bsz);
  1102. //cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  1103. // flatten to 2D
  1104. cur = ggml_cont_2d(ctx0, cur,
  1105. n_embd * scale_factor * scale_factor,
  1106. n_patches / scale_factor / scale_factor);
  1107. cb(cur, "pixel_shuffle", -1);
  1108. }
  1109. // based on Llama4VisionMLP2 (always uses GELU activation, no bias)
  1110. {
  1111. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, cur);
  1112. cur = ggml_gelu(ctx0, cur);
  1113. cur = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, cur);
  1114. cur = ggml_gelu(ctx0, cur);
  1115. cb(cur, "adapter_mlp", -1);
  1116. }
  1117. // Llama4MultiModalProjector
  1118. cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
  1119. cb(cur, "projected", -1);
  1120. // build the graph
  1121. ggml_build_forward_expand(gf, cur);
  1122. return gf;
  1123. }
  1124. ggml_cgraph * build_kimivl() {
  1125. // 2D input positions
  1126. ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  1127. ggml_set_name(pos_h, "pos_h");
  1128. ggml_set_input(pos_h);
  1129. ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  1130. ggml_set_name(pos_w, "pos_w");
  1131. ggml_set_input(pos_w);
  1132. ggml_tensor * learned_pos_embd = resize_position_embeddings();
  1133. // build ViT with 2D position embeddings
  1134. auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
  1135. // first half is X axis and second half is Y axis
  1136. return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
  1137. };
  1138. ggml_tensor * inp = build_inp();
  1139. ggml_tensor * cur = build_vit(
  1140. inp, n_patches,
  1141. NORM_TYPE_NORMAL,
  1142. hparams.ffn_op,
  1143. learned_pos_embd,
  1144. add_pos);
  1145. cb(cur, "vit_out", -1);
  1146. {
  1147. // patch_merger
  1148. const int scale_factor = model.hparams.n_merge;
  1149. cur = build_patch_merge_permute(cur, scale_factor);
  1150. // projection norm
  1151. int proj_inp_dim = cur->ne[0];
  1152. cur = ggml_view_2d(ctx0, cur,
  1153. n_embd, cur->ne[1] * scale_factor * scale_factor,
  1154. ggml_row_size(cur->type, n_embd), 0);
  1155. cur = ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
  1156. cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
  1157. cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
  1158. cur = ggml_view_2d(ctx0, cur,
  1159. proj_inp_dim, cur->ne[1] / scale_factor / scale_factor,
  1160. ggml_row_size(cur->type, proj_inp_dim), 0);
  1161. cb(cur, "proj_inp_normed", -1);
  1162. // projection mlp
  1163. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1164. cur = ggml_add(ctx0, cur, model.mm_1_b);
  1165. cur = ggml_gelu(ctx0, cur);
  1166. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1167. cur = ggml_add(ctx0, cur, model.mm_2_b);
  1168. cb(cur, "proj_out", -1);
  1169. }
  1170. // build the graph
  1171. ggml_build_forward_expand(gf, cur);
  1172. return gf;
  1173. }
  1174. // this graph is used by llava, granite and glm
  1175. // due to having embedding_stack (used by granite), we cannot reuse build_vit
  1176. ggml_cgraph * build_llava() {
  1177. const int batch_size = 1;
  1178. const int n_pos = n_patches + (model.class_embedding ? 1 : 0);
  1179. GGML_ASSERT(n_patches_x == n_patches_y && "only square images supported");
  1180. // Calculate the deepest feature layer based on hparams and projector type
  1181. int max_feature_layer = n_layer;
  1182. {
  1183. // Get the index of the second to last layer; this is the default for models that have a llava projector
  1184. int il_last = hparams.n_layer - 1;
  1185. int deepest_feature_layer = -1;
  1186. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
  1187. il_last += 1;
  1188. }
  1189. // If we set explicit vision feature layers, only go up to the deepest one
  1190. // NOTE: only used by granite-vision models for now
  1191. for (const auto & feature_layer : hparams.vision_feature_layer) {
  1192. if (feature_layer > deepest_feature_layer) {
  1193. deepest_feature_layer = feature_layer;
  1194. }
  1195. }
  1196. max_feature_layer = deepest_feature_layer < 0 ? il_last : deepest_feature_layer;
  1197. }
  1198. ggml_tensor * inp = build_inp();
  1199. // concat class_embeddings and patch_embeddings
  1200. if (model.class_embedding) {
  1201. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  1202. }
  1203. ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
  1204. ggml_set_name(positions, "positions");
  1205. ggml_set_input(positions);
  1206. inp = ggml_add(ctx0, inp, ggml_get_rows(ctx0, model.position_embeddings, positions));
  1207. ggml_tensor * inpL = inp;
  1208. // pre-layernorm
  1209. if (model.pre_ln_w) {
  1210. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, NORM_TYPE_NORMAL, eps, -1);
  1211. cb(inpL, "pre_ln", -1);
  1212. }
  1213. std::vector<ggml_tensor *> embedding_stack;
  1214. const auto & vision_feature_layer = hparams.vision_feature_layer;
  1215. // loop over layers
  1216. for (int il = 0; il < max_feature_layer; il++) {
  1217. auto & layer = model.layers[il];
  1218. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  1219. // If this is an embedding feature layer, save the output.
  1220. // NOTE: 0 index here refers to the input to the encoder.
  1221. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  1222. embedding_stack.push_back(cur);
  1223. }
  1224. // layernorm1
  1225. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  1226. cb(cur, "layer_inp_normed", il);
  1227. // self-attention
  1228. {
  1229. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1230. if (layer.q_b) {
  1231. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1232. }
  1233. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1234. if (layer.k_b) {
  1235. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1236. }
  1237. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1238. if (layer.v_b) {
  1239. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1240. }
  1241. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1242. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1243. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1244. cb(Qcur, "Qcur", il);
  1245. cb(Kcur, "Kcur", il);
  1246. cb(Vcur, "Vcur", il);
  1247. cur = build_attn(layer.o_w, layer.o_b,
  1248. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1249. cb(cur, "attn_out", il);
  1250. }
  1251. // re-add the layer input, e.g., residual
  1252. cur = ggml_add(ctx0, cur, inpL);
  1253. inpL = cur; // inpL = residual, cur = hidden_states
  1254. cb(cur, "ffn_inp", il);
  1255. // layernorm2
  1256. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  1257. cb(cur, "ffn_inp_normed", il);
  1258. // ffn
  1259. cur = build_ffn(cur,
  1260. layer.ff_up_w, layer.ff_up_b,
  1261. layer.ff_gate_w, layer.ff_gate_b,
  1262. layer.ff_down_w, layer.ff_down_b,
  1263. hparams.ffn_op, il);
  1264. cb(cur, "ffn_out", il);
  1265. // residual 2
  1266. cur = ggml_add(ctx0, inpL, cur);
  1267. cb(cur, "layer_out", il);
  1268. inpL = cur;
  1269. }
  1270. // post-layernorm
  1271. if (model.post_ln_w) {
  1272. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, NORM_TYPE_NORMAL, eps, -1);
  1273. }
  1274. ggml_tensor * embeddings = inpL;
  1275. // process vision feature layers (used by granite)
  1276. {
  1277. // final layer is a vision feature layer
  1278. if (vision_feature_layer.find(max_feature_layer) != vision_feature_layer.end()) {
  1279. embedding_stack.push_back(inpL);
  1280. }
  1281. // If feature layers are explicitly set, stack them (if we have multiple)
  1282. if (!embedding_stack.empty()) {
  1283. embeddings = embedding_stack[0];
  1284. for (size_t i = 1; i < embedding_stack.size(); i++) {
  1285. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  1286. }
  1287. }
  1288. }
  1289. // llava projector (also used by granite)
  1290. if (ctx->model.hparams.has_llava_projector) {
  1291. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  1292. ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
  1293. ggml_set_name(patches, "patches");
  1294. ggml_set_input(patches);
  1295. // shape [1, 576, 1024]
  1296. // ne is whcn, ne = [1024, 576, 1, 1]
  1297. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  1298. // print_tensor_info(embeddings, "embeddings");
  1299. // llava projector
  1300. if (ctx->proj_type() == PROJECTOR_TYPE_MLP) {
  1301. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1302. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1303. embeddings = ggml_gelu(ctx0, embeddings);
  1304. if (model.mm_2_w) {
  1305. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  1306. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  1307. }
  1308. }
  1309. else if (ctx->proj_type() == PROJECTOR_TYPE_MLP_NORM) {
  1310. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1311. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1312. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  1313. // First LayerNorm
  1314. embeddings = ggml_norm(ctx0, embeddings, eps);
  1315. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  1316. model.mm_1_b);
  1317. // GELU activation
  1318. embeddings = ggml_gelu(ctx0, embeddings);
  1319. // Second linear layer
  1320. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  1321. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  1322. // Second LayerNorm
  1323. embeddings = ggml_norm(ctx0, embeddings, eps);
  1324. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  1325. model.mm_4_b);
  1326. }
  1327. else if (ctx->proj_type() == PROJECTOR_TYPE_LDP) {
  1328. // MobileVLM projector
  1329. int n_patch = 24;
  1330. ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  1331. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  1332. mlp_1 = ggml_gelu(ctx0, mlp_1);
  1333. ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  1334. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  1335. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  1336. // block 1
  1337. ggml_tensor * block_1 = nullptr;
  1338. {
  1339. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  1340. mlp_3 = ggml_permute(ctx0, mlp_3, 1, 0, 2, 3);
  1341. mlp_3 = ggml_cont_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  1342. // stride = 1, padding = 1, bias is nullptr
  1343. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  1344. // layer norm
  1345. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1346. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1347. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1348. block_1 = ggml_norm(ctx0, block_1, eps);
  1349. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  1350. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1351. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1352. // hardswish
  1353. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1354. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1355. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1356. // pointwise conv
  1357. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1358. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  1359. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  1360. block_1 = ggml_relu(ctx0, block_1);
  1361. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  1362. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  1363. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1364. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  1365. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1366. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1367. int w = block_1->ne[0], h = block_1->ne[1];
  1368. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1369. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1370. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1371. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  1372. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1373. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1374. block_1 = ggml_norm(ctx0, block_1, eps);
  1375. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  1376. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1377. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1378. // residual
  1379. block_1 = ggml_add(ctx0, mlp_3, block_1);
  1380. }
  1381. // block_2
  1382. {
  1383. // stride = 2
  1384. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  1385. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1386. // layer norm
  1387. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1388. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1389. block_1 = ggml_norm(ctx0, block_1, eps);
  1390. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  1391. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1392. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1393. // hardswish
  1394. ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1395. // not sure the parameters is right for globalAvgPooling
  1396. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1397. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1398. // pointwise conv
  1399. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1400. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  1401. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  1402. block_1 = ggml_relu(ctx0, block_1);
  1403. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  1404. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  1405. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1406. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1407. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1408. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1409. int w = block_1->ne[0], h = block_1->ne[1];
  1410. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1411. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1412. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1413. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  1414. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1415. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1416. block_1 = ggml_norm(ctx0, block_1, eps);
  1417. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  1418. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  1419. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  1420. }
  1421. embeddings = block_1;
  1422. }
  1423. else if (ctx->proj_type() == PROJECTOR_TYPE_LDPV2)
  1424. {
  1425. int n_patch = 24;
  1426. ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1427. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  1428. mlp_0 = ggml_gelu(ctx0, mlp_0);
  1429. ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  1430. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  1431. // mlp_2 ne = [2048, 576, 1, 1]
  1432. // // AVG Pool Layer 2*2, strides = 2
  1433. mlp_2 = ggml_permute(ctx0, mlp_2, 1, 0, 2, 3);
  1434. // mlp_2 ne = [576, 2048, 1, 1]
  1435. mlp_2 = ggml_cont_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  1436. // mlp_2 ne [24, 24, 2048, 1]
  1437. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  1438. // weight ne = [3, 3, 2048, 1]
  1439. ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  1440. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  1441. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  1442. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  1443. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  1444. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  1445. embeddings = peg_0;
  1446. }
  1447. else {
  1448. GGML_ABORT("fatal error");
  1449. }
  1450. }
  1451. // glm projector
  1452. else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
  1453. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  1454. embeddings = ggml_permute(ctx0,embeddings,1,0,2,3);
  1455. embeddings = ggml_cont_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  1456. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  1457. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  1458. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  1459. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  1460. // GLU
  1461. {
  1462. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1463. embeddings = ggml_norm(ctx0, embeddings, eps);
  1464. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1465. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  1466. ggml_tensor * x = embeddings;
  1467. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  1468. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  1469. embeddings = ggml_swiglu_split(ctx0, embeddings, x);
  1470. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  1471. }
  1472. // arrangement of BOI/EOI token embeddings
  1473. // note: these embeddings are not present in text model, hence we cannot process them as text tokens
  1474. // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
  1475. {
  1476. embeddings = ggml_concat(ctx0, model.mm_boi, embeddings, 1); // BOI
  1477. embeddings = ggml_concat(ctx0, embeddings, model.mm_eoi, 1); // EOI
  1478. }
  1479. }
  1480. else {
  1481. GGML_ABORT("llava: unknown projector type");
  1482. }
  1483. // build the graph
  1484. ggml_build_forward_expand(gf, embeddings);
  1485. return gf;
  1486. }
  1487. // whisper encoder with custom projector
  1488. ggml_cgraph * build_whisper_enc() {
  1489. const int n_frames = img.nx;
  1490. const int n_pos = n_frames / 2;
  1491. GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
  1492. ggml_tensor * inp = build_inp_raw(1);
  1493. // conv1d block
  1494. {
  1495. // convolution + gelu
  1496. ggml_tensor * cur = ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
  1497. cur = ggml_add(ctx0, cur, model.conv1d_1_b);
  1498. cur = ggml_gelu_erf(ctx0, cur);
  1499. cur = ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
  1500. cur = ggml_add(ctx0, cur, model.conv1d_2_b);
  1501. cur = ggml_gelu_erf(ctx0, cur);
  1502. // transpose
  1503. inp = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  1504. cb(inp, "after_conv1d", -1);
  1505. }
  1506. // sanity check (only check one layer, but it should be the same for all)
  1507. GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
  1508. GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
  1509. GGML_ASSERT(model.layers[0].q_b);
  1510. GGML_ASSERT(model.layers[0].v_b);
  1511. GGML_ASSERT(!model.layers[0].k_b); // no bias for k
  1512. GGML_ASSERT(model.post_ln_w && model.post_ln_b);
  1513. ggml_tensor * pos_embd_selected = ggml_view_2d(
  1514. ctx0, model.position_embeddings,
  1515. model.position_embeddings->ne[0], n_pos,
  1516. model.position_embeddings->nb[1], 0
  1517. );
  1518. ggml_tensor * cur = build_vit(
  1519. inp, n_pos,
  1520. NORM_TYPE_NORMAL,
  1521. hparams.ffn_op,
  1522. pos_embd_selected,
  1523. nullptr);
  1524. cb(cur, "after_transformer", -1);
  1525. if (model.audio_has_stack_frames()) {
  1526. // StackAudioFrames
  1527. // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
  1528. int64_t stride = n_embd * hparams.proj_stack_factor;
  1529. int64_t padded_len = GGML_PAD(ggml_nelements(cur), stride);
  1530. int64_t pad = padded_len - ggml_nelements(cur);
  1531. if (pad > 0) {
  1532. cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0);
  1533. cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
  1534. }
  1535. cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
  1536. ggml_row_size(cur->type, stride), 0);
  1537. cb(cur, "after_stacked", -1);
  1538. }
  1539. if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) {
  1540. // UltravoxProjector
  1541. // pre-norm
  1542. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1543. cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
  1544. // ffn in
  1545. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1546. // swiglu
  1547. // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
  1548. cur = ggml_swiglu_swapped(ctx0, cur);
  1549. // mid-norm
  1550. cur = ggml_rms_norm(ctx0, cur, 1e-6);
  1551. cur = ggml_mul(ctx0, cur, model.mm_norm_mid_w);
  1552. // ffn out
  1553. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1554. } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) {
  1555. // projector
  1556. cur = ggml_mul_mat(ctx0, model.mm_fc_w, cur);
  1557. cur = ggml_add(ctx0, cur, model.mm_fc_b);
  1558. } else if (ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL) {
  1559. // projector
  1560. cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
  1561. cur = ggml_gelu_erf(ctx0, cur);
  1562. cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
  1563. } else {
  1564. GGML_ABORT("%s: unknown projector type", __func__);
  1565. }
  1566. cb(cur, "projected", -1);
  1567. ggml_build_forward_expand(gf, cur);
  1568. return gf;
  1569. }
  1570. // cogvlm vision encoder
  1571. ggml_cgraph * build_cogvlm() {
  1572. GGML_ASSERT(model.class_embedding != nullptr);
  1573. GGML_ASSERT(model.position_embeddings != nullptr);
  1574. const int n_pos = n_patches + 1; // +1 for [CLS]
  1575. // build input and concatenate class embedding
  1576. ggml_tensor * inp = build_inp();
  1577. inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
  1578. inp = ggml_add(ctx0, inp, model.position_embeddings);
  1579. cb(inp, "inp_pos", -1);
  1580. ggml_tensor * inpL = inp;
  1581. for (int il = 0; il < n_layer; il++) {
  1582. auto & layer = model.layers[il];
  1583. ggml_tensor * cur = inpL;
  1584. cur = ggml_mul_mat(ctx0, layer.qkv_w, cur);
  1585. cur = ggml_add(ctx0, cur, layer.qkv_b);
  1586. ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, d_head*sizeof(float),
  1587. cur->nb[1], 0);
  1588. ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, d_head*sizeof(float),
  1589. cur->nb[1], n_embd * sizeof(float));
  1590. ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, d_head*sizeof(float),
  1591. cur->nb[1], 2 * n_embd * sizeof(float));
  1592. cb(Qcur, "Qcur", il);
  1593. cb(Kcur, "Kcur", il);
  1594. cb(Vcur, "Vcur", il);
  1595. cur = build_attn(layer.o_w, layer.o_b,
  1596. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1597. cb(cur, "attn_out", il);
  1598. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, eps, il);
  1599. cb(cur, "attn_post_norm", il);
  1600. cur = ggml_add(ctx0, cur, inpL);
  1601. inpL = cur;
  1602. cur = build_ffn(cur,
  1603. layer.ff_up_w, layer.ff_up_b,
  1604. layer.ff_gate_w, layer.ff_gate_b,
  1605. layer.ff_down_w, layer.ff_down_b,
  1606. hparams.ffn_op, il);
  1607. cb(cur, "ffn_out", il);
  1608. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, eps, il);
  1609. cb(cur, "ffn_post_norm", il);
  1610. cur = ggml_add(ctx0, cur, inpL);
  1611. cb(cur, "layer_out", il);
  1612. inpL = cur;
  1613. }
  1614. // remove CLS token (like build_llama4 does)
  1615. ggml_tensor * cur = ggml_view_2d(ctx0, inpL,
  1616. n_embd, n_patches,
  1617. ggml_row_size(inpL->type, n_embd), 0);
  1618. // Multiply with mm_model_proj
  1619. cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
  1620. // Apply layernorm, weight, bias
  1621. cur = build_norm(cur, model.mm_post_fc_norm_w, model.mm_post_fc_norm_b, NORM_TYPE_NORMAL, 1e-5, -1);
  1622. // Apply GELU
  1623. cur = ggml_gelu_inplace(ctx0, cur);
  1624. // Branch 1: multiply with mm_h_to_4h_w
  1625. ggml_tensor * h_to_4h = ggml_mul_mat(ctx0, model.mm_h_to_4h_w, cur);
  1626. // Branch 2: multiply with mm_gate_w
  1627. ggml_tensor * gate = ggml_mul_mat(ctx0, model.mm_gate_w, cur);
  1628. // Apply silu
  1629. gate = ggml_swiglu_split(ctx0, gate, h_to_4h);
  1630. // Apply mm_4h_to_h_w
  1631. cur = ggml_mul_mat(ctx0, model.mm_4h_to_h_w, gate);
  1632. // Concatenate with boi and eoi
  1633. cur = ggml_concat(ctx0, model.mm_boi, cur, 1);
  1634. cur = ggml_concat(ctx0, cur, model.mm_eoi, 1);
  1635. // build the graph
  1636. ggml_build_forward_expand(gf, cur);
  1637. return gf;
  1638. }
  1639. private:
  1640. //
  1641. // utility functions
  1642. //
  1643. void cb(ggml_tensor * cur0, const char * name, int il) const {
  1644. if (ctx->debug_graph) {
  1645. ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
  1646. std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
  1647. ggml_set_name(cur, cur_name.c_str());
  1648. ggml_set_output(cur);
  1649. ggml_build_forward_expand(gf, cur);
  1650. ctx->debug_print_tensors.push_back(cur);
  1651. }
  1652. }
  1653. // siglip2 naflex
  1654. ggml_tensor * resize_position_embeddings() {
  1655. ggml_tensor * pos_embd = model.position_embeddings;
  1656. const int height = img.ny / patch_size;
  1657. const int width = img.nx / patch_size;
  1658. const uint32_t mode = GGML_SCALE_MODE_BILINEAR | GGML_SCALE_FLAG_ANTIALIAS;
  1659. const int n_per_side = (int)std::sqrt(pos_embd->ne[1]);
  1660. GGML_ASSERT(pos_embd);
  1661. if (height == n_per_side && width == n_per_side) {
  1662. return pos_embd;
  1663. }
  1664. pos_embd = ggml_reshape_3d(ctx0, pos_embd, n_embd, n_per_side, n_per_side); // -> (n_embd, n_per_side, n_per_side)
  1665. pos_embd = ggml_permute(ctx0, pos_embd, 2, 0, 1, 3); // -> (n_per_side, n_per_side, n_embd)
  1666. pos_embd = ggml_interpolate(ctx0, pos_embd, width, height, n_embd, 1, mode); // -> (width, height, n_embd)
  1667. pos_embd = ggml_permute(ctx0, pos_embd, 1, 2, 0, 3); // -> (n_embd, width, height)
  1668. pos_embd = ggml_cont_2d(ctx0, pos_embd, n_embd, width * height); // -> (n_embd, width * height)
  1669. return pos_embd;
  1670. }
  1671. // build vision transformer (ViT) cgraph
  1672. // this function should cover most of the models
  1673. // if your model has specific features, you should probably duplicate this function
  1674. ggml_tensor * build_vit(
  1675. ggml_tensor * inp,
  1676. int64_t n_pos,
  1677. norm_type norm_t,
  1678. ffn_op_type ffn_t,
  1679. ggml_tensor * learned_pos_embd,
  1680. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  1681. ) {
  1682. if (learned_pos_embd) {
  1683. inp = ggml_add(ctx0, inp, learned_pos_embd);
  1684. cb(inp, "pos_embed", -1);
  1685. }
  1686. ggml_tensor * inpL = inp;
  1687. // pre-layernorm
  1688. if (model.pre_ln_w) {
  1689. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  1690. cb(inpL, "pre_ln", -1);
  1691. }
  1692. // loop over layers
  1693. for (int il = 0; il < n_layer; il++) {
  1694. auto & layer = model.layers[il];
  1695. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  1696. // layernorm1
  1697. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  1698. cb(cur, "layer_inp_normed", il);
  1699. // self-attention
  1700. {
  1701. ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  1702. if (layer.q_b) {
  1703. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  1704. }
  1705. ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  1706. if (layer.k_b) {
  1707. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  1708. }
  1709. ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  1710. if (layer.v_b) {
  1711. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  1712. }
  1713. if (layer.q_norm) {
  1714. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  1715. cb(Qcur, "Qcur_norm", il);
  1716. }
  1717. if (layer.k_norm) {
  1718. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  1719. cb(Kcur, "Kcur_norm", il);
  1720. }
  1721. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  1722. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  1723. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  1724. cb(Qcur, "Qcur", il);
  1725. cb(Kcur, "Kcur", il);
  1726. cb(Vcur, "Vcur", il);
  1727. if (add_pos) {
  1728. Qcur = add_pos(Qcur, layer);
  1729. Kcur = add_pos(Kcur, layer);
  1730. cb(Qcur, "Qcur_pos", il);
  1731. cb(Kcur, "Kcur_pos", il);
  1732. }
  1733. cur = build_attn(layer.o_w, layer.o_b,
  1734. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  1735. cb(cur, "attn_out", il);
  1736. }
  1737. if (layer.ls_1_w) {
  1738. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  1739. cb(cur, "attn_out_scaled", il);
  1740. }
  1741. // re-add the layer input, e.g., residual
  1742. cur = ggml_add(ctx0, cur, inpL);
  1743. inpL = cur; // inpL = residual, cur = hidden_states
  1744. cb(cur, "ffn_inp", il);
  1745. // layernorm2
  1746. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  1747. cb(cur, "ffn_inp_normed", il);
  1748. // ffn
  1749. cur = build_ffn(cur,
  1750. layer.ff_up_w, layer.ff_up_b,
  1751. layer.ff_gate_w, layer.ff_gate_b,
  1752. layer.ff_down_w, layer.ff_down_b,
  1753. ffn_t, il);
  1754. cb(cur, "ffn_out", il);
  1755. if (layer.ls_2_w) {
  1756. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  1757. cb(cur, "ffn_out_scaled", il);
  1758. }
  1759. // residual 2
  1760. cur = ggml_add(ctx0, inpL, cur);
  1761. cb(cur, "layer_out", il);
  1762. inpL = cur;
  1763. }
  1764. if (ctx->model.audio_has_avgpool()) {
  1765. ggml_tensor * cur = inpL;
  1766. cur = ggml_transpose(ctx0, cur);
  1767. cur = ggml_cont(ctx0, cur);
  1768. cur = ggml_pool_1d(ctx0, cur, GGML_OP_POOL_AVG, 2, 2, 0);
  1769. cur = ggml_transpose(ctx0, cur);
  1770. cur = ggml_cont(ctx0, cur);
  1771. inpL = cur;
  1772. }
  1773. // post-layernorm
  1774. if (model.post_ln_w) {
  1775. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  1776. }
  1777. return inpL;
  1778. }
  1779. // build the input after conv2d (inp_raw --> patches)
  1780. // returns tensor with shape [n_embd, n_patches]
  1781. ggml_tensor * build_inp() {
  1782. ggml_tensor * inp_raw = build_inp_raw();
  1783. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  1784. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  1785. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  1786. if (model.patch_bias) {
  1787. inp = ggml_add(ctx0, inp, model.patch_bias);
  1788. cb(inp, "patch_bias", -1);
  1789. }
  1790. return inp;
  1791. }
  1792. ggml_tensor * build_inp_raw(int channels = 3) {
  1793. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
  1794. ggml_set_name(inp_raw, "inp_raw");
  1795. ggml_set_input(inp_raw);
  1796. return inp_raw;
  1797. }
  1798. ggml_tensor * build_norm(
  1799. ggml_tensor * cur,
  1800. ggml_tensor * mw,
  1801. ggml_tensor * mb,
  1802. norm_type type,
  1803. float norm_eps,
  1804. int il) const {
  1805. cur = type == NORM_TYPE_RMS
  1806. ? ggml_rms_norm(ctx0, cur, norm_eps)
  1807. : ggml_norm(ctx0, cur, norm_eps);
  1808. if (mw || mb) {
  1809. cb(cur, "norm", il);
  1810. }
  1811. if (mw) {
  1812. cur = ggml_mul(ctx0, cur, mw);
  1813. if (mb) {
  1814. cb(cur, "norm_w", il);
  1815. }
  1816. }
  1817. if (mb) {
  1818. cur = ggml_add(ctx0, cur, mb);
  1819. }
  1820. return cur;
  1821. }
  1822. ggml_tensor * build_ffn(
  1823. ggml_tensor * cur,
  1824. ggml_tensor * up,
  1825. ggml_tensor * up_b,
  1826. ggml_tensor * gate,
  1827. ggml_tensor * gate_b,
  1828. ggml_tensor * down,
  1829. ggml_tensor * down_b,
  1830. ffn_op_type type_op,
  1831. int il) const {
  1832. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  1833. cb(tmp, "ffn_up", il);
  1834. if (up_b) {
  1835. tmp = ggml_add(ctx0, tmp, up_b);
  1836. cb(tmp, "ffn_up_b", il);
  1837. }
  1838. if (gate) {
  1839. cur = ggml_mul_mat(ctx0, gate, cur);
  1840. cb(cur, "ffn_gate", il);
  1841. if (gate_b) {
  1842. cur = ggml_add(ctx0, cur, gate_b);
  1843. cb(cur, "ffn_gate_b", il);
  1844. }
  1845. } else {
  1846. cur = tmp;
  1847. }
  1848. // we only support parallel ffn for now
  1849. switch (type_op) {
  1850. case FFN_SILU:
  1851. if (gate) {
  1852. cur = ggml_swiglu_split(ctx0, cur, tmp);
  1853. cb(cur, "ffn_swiglu", il);
  1854. } else {
  1855. cur = ggml_silu(ctx0, cur);
  1856. cb(cur, "ffn_silu", il);
  1857. } break;
  1858. case FFN_GELU:
  1859. if (gate) {
  1860. cur = ggml_geglu_split(ctx0, cur, tmp);
  1861. cb(cur, "ffn_geglu", il);
  1862. } else {
  1863. cur = ggml_gelu(ctx0, cur);
  1864. cb(cur, "ffn_gelu", il);
  1865. } break;
  1866. case FFN_GELU_ERF:
  1867. if (gate) {
  1868. cur = ggml_geglu_erf_split(ctx0, cur, tmp);
  1869. cb(cur, "ffn_geglu_erf", il);
  1870. } else {
  1871. cur = ggml_gelu_erf(ctx0, cur);
  1872. cb(cur, "ffn_gelu_erf", il);
  1873. } break;
  1874. case FFN_GELU_QUICK:
  1875. if (gate) {
  1876. cur = ggml_geglu_quick_split(ctx0, cur, tmp);
  1877. cb(cur, "ffn_geglu_quick", il);
  1878. } else {
  1879. cur = ggml_gelu_quick(ctx0, cur);
  1880. cb(cur, "ffn_gelu_quick", il);
  1881. } break;
  1882. }
  1883. if (down) {
  1884. cur = ggml_mul_mat(ctx0, down, cur);
  1885. }
  1886. if (down_b) {
  1887. cb(cur, "ffn_down", il);
  1888. }
  1889. if (down_b) {
  1890. cur = ggml_add(ctx0, cur, down_b);
  1891. }
  1892. return cur;
  1893. }
  1894. ggml_tensor * build_attn(
  1895. ggml_tensor * wo,
  1896. ggml_tensor * wo_b,
  1897. ggml_tensor * q_cur,
  1898. ggml_tensor * k_cur,
  1899. ggml_tensor * v_cur,
  1900. ggml_tensor * kq_mask,
  1901. float kq_scale,
  1902. int il) const {
  1903. // these nodes are added to the graph together so that they are not reordered
  1904. // by doing so, the number of splits in the graph is reduced
  1905. ggml_build_forward_expand(gf, q_cur);
  1906. ggml_build_forward_expand(gf, k_cur);
  1907. ggml_build_forward_expand(gf, v_cur);
  1908. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  1909. //cb(q, "q", il);
  1910. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  1911. //cb(k, "k", il);
  1912. ggml_tensor * cur;
  1913. if (ctx->flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  1914. ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3);
  1915. k = ggml_cast(ctx0, k, GGML_TYPE_F16);
  1916. v = ggml_cast(ctx0, v, GGML_TYPE_F16);
  1917. cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, 0.0f, 0.0f);
  1918. ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
  1919. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
  1920. } else {
  1921. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  1922. v = ggml_cont(ctx0, v);
  1923. const auto n_tokens = q->ne[1];
  1924. const auto n_head = q->ne[2];
  1925. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  1926. // F32 may not needed for vision encoders?
  1927. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  1928. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  1929. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  1930. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  1931. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  1932. }
  1933. cb(cur, "kqv_out", il);
  1934. if (wo) {
  1935. cur = ggml_mul_mat(ctx0, wo, cur);
  1936. }
  1937. if (wo_b) {
  1938. cur = ggml_add(ctx0, cur, wo_b);
  1939. }
  1940. return cur;
  1941. }
  1942. // implementation of the 2D RoPE without adding a new op in ggml
  1943. // this is not efficient (use double the memory), but works on all backends
  1944. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  1945. static ggml_tensor * build_rope_2d(
  1946. ggml_context * ctx0,
  1947. ggml_tensor * cur,
  1948. ggml_tensor * pos_a, // first half
  1949. ggml_tensor * pos_b, // second half
  1950. const float freq_base,
  1951. const bool interleave_freq
  1952. ) {
  1953. const int64_t n_dim = cur->ne[0];
  1954. const int64_t n_head = cur->ne[1];
  1955. const int64_t n_pos = cur->ne[2];
  1956. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  1957. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  1958. // first half of cur will use 1e-0, 1e-2 (even)
  1959. // second half of cur will use 1e-1, 1e-3 (odd)
  1960. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  1961. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  1962. // then for the second half, we use freq_scale to shift the inv_freq
  1963. // ^ why? replace (2i) with (2i+1) in the above equation
  1964. const float freq_scale_odd = interleave_freq
  1965. ? std::pow(freq_base, (float)-2/n_dim)
  1966. : 1.0;
  1967. // first half
  1968. ggml_tensor * first;
  1969. {
  1970. first = ggml_view_3d(ctx0, cur,
  1971. n_dim/2, n_head, n_pos,
  1972. ggml_row_size(cur->type, n_dim),
  1973. ggml_row_size(cur->type, n_dim*n_head),
  1974. 0);
  1975. first = ggml_rope_ext(
  1976. ctx0,
  1977. first,
  1978. pos_a, // positions
  1979. nullptr, // freq factors
  1980. n_dim/2, // n_dims
  1981. 0, 0, freq_base,
  1982. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  1983. );
  1984. }
  1985. // second half
  1986. ggml_tensor * second;
  1987. {
  1988. second = ggml_view_3d(ctx0, cur,
  1989. n_dim/2, n_head, n_pos,
  1990. ggml_row_size(cur->type, n_dim),
  1991. ggml_row_size(cur->type, n_dim*n_head),
  1992. n_dim/2 * ggml_element_size(cur));
  1993. second = ggml_rope_ext(
  1994. ctx0,
  1995. second,
  1996. pos_b, // positions
  1997. nullptr, // freq factors
  1998. n_dim/2, // n_dims
  1999. 0, 0, freq_base,
  2000. freq_scale_odd,
  2001. 0.0f, 1.0f, 0.0f, 0.0f
  2002. );
  2003. }
  2004. cur = ggml_concat(ctx0, first, second, 0);
  2005. return cur;
  2006. }
  2007. // aka pixel_shuffle / pixel_unshuffle / patch_merger (Kimi-VL)
  2008. // support dynamic resolution
  2009. ggml_tensor * build_patch_merge_permute(ggml_tensor * cur, int scale_factor) {
  2010. GGML_ASSERT(scale_factor > 1);
  2011. const int n_embd = cur->ne[0];
  2012. int width = img.nx / patch_size;
  2013. int height = img.ny / patch_size;
  2014. // pad width and height to factor
  2015. const int64_t pad_width = CLIP_ALIGN(width, scale_factor) - width;
  2016. const int64_t pad_height = CLIP_ALIGN(height, scale_factor) - height;
  2017. cur = ggml_reshape_3d(ctx0, cur, n_embd, width, height);
  2018. if (pad_width || pad_height) {
  2019. cur = ggml_pad(ctx0, cur, 0, pad_width, pad_height, 0);
  2020. width += pad_width;
  2021. height += pad_height;
  2022. }
  2023. // unshuffle h
  2024. cur = ggml_reshape_3d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height);
  2025. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  2026. // unshuffle w
  2027. cur = ggml_cont_3d(ctx0, cur, n_embd * scale_factor * scale_factor, height / scale_factor, width / scale_factor);
  2028. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  2029. cur = ggml_cont_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  2030. cb(cur, "pixel_shuffle", -1);
  2031. return cur;
  2032. }
  2033. };
  2034. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  2035. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  2036. clip_graph graph(ctx, *imgs.entries[0]);
  2037. ggml_cgraph * res;
  2038. switch (ctx->proj_type()) {
  2039. case PROJECTOR_TYPE_GEMMA3:
  2040. case PROJECTOR_TYPE_IDEFICS3:
  2041. case PROJECTOR_TYPE_LFM2:
  2042. {
  2043. res = graph.build_siglip();
  2044. } break;
  2045. case PROJECTOR_TYPE_PIXTRAL:
  2046. case PROJECTOR_TYPE_LIGHTONOCR:
  2047. {
  2048. res = graph.build_pixtral();
  2049. } break;
  2050. case PROJECTOR_TYPE_QWEN2VL:
  2051. case PROJECTOR_TYPE_QWEN25VL:
  2052. {
  2053. res = graph.build_qwen2vl();
  2054. } break;
  2055. case PROJECTOR_TYPE_QWEN3VL:
  2056. {
  2057. res = graph.build_qwen3vl();
  2058. } break;
  2059. case PROJECTOR_TYPE_MINICPMV:
  2060. {
  2061. res = graph.build_minicpmv();
  2062. } break;
  2063. case PROJECTOR_TYPE_INTERNVL:
  2064. {
  2065. res = graph.build_internvl();
  2066. } break;
  2067. case PROJECTOR_TYPE_LLAMA4:
  2068. {
  2069. res = graph.build_llama4();
  2070. } break;
  2071. case PROJECTOR_TYPE_ULTRAVOX:
  2072. case PROJECTOR_TYPE_VOXTRAL:
  2073. case PROJECTOR_TYPE_QWEN2A:
  2074. {
  2075. res = graph.build_whisper_enc();
  2076. } break;
  2077. case PROJECTOR_TYPE_KIMIVL:
  2078. {
  2079. res = graph.build_kimivl();
  2080. } break;
  2081. case PROJECTOR_TYPE_JANUS_PRO:
  2082. {
  2083. res = graph.build_siglip();
  2084. } break;
  2085. case PROJECTOR_TYPE_COGVLM:
  2086. {
  2087. res = graph.build_cogvlm();
  2088. } break;
  2089. default:
  2090. {
  2091. res = graph.build_llava();
  2092. } break;
  2093. }
  2094. return res;
  2095. }
  2096. struct clip_model_loader {
  2097. ggml_context_ptr ctx_meta;
  2098. gguf_context_ptr ctx_gguf;
  2099. std::string fname;
  2100. size_t model_size = 0; // in bytes
  2101. bool has_vision = false;
  2102. bool has_audio = false;
  2103. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
  2104. clip_model_loader(const char * fname) : fname(fname) {
  2105. struct ggml_context * meta = nullptr;
  2106. struct gguf_init_params params = {
  2107. /*.no_alloc = */ true,
  2108. /*.ctx = */ &meta,
  2109. };
  2110. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  2111. if (!ctx_gguf.get()) {
  2112. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  2113. }
  2114. ctx_meta.reset(meta);
  2115. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  2116. // print gguf info
  2117. {
  2118. std::string name;
  2119. get_string(KEY_NAME, name, false);
  2120. std::string description;
  2121. get_string(KEY_DESCRIPTION, description, false);
  2122. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  2123. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  2124. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  2125. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  2126. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  2127. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  2128. LOG_INF("\n");
  2129. }
  2130. // modalities
  2131. {
  2132. get_bool(KEY_HAS_VISION_ENC, has_vision, false);
  2133. get_bool(KEY_HAS_AUDIO_ENC, has_audio, false);
  2134. if (has_vision) {
  2135. LOG_INF("%s: has vision encoder\n", __func__);
  2136. }
  2137. if (has_audio) {
  2138. LOG_INF("%s: has audio encoder\n", __func__);
  2139. }
  2140. }
  2141. // tensors
  2142. {
  2143. for (int i = 0; i < n_tensors; ++i) {
  2144. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  2145. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  2146. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  2147. ggml_tensor * cur = ggml_get_tensor(meta, name);
  2148. size_t tensor_size = ggml_nbytes(cur);
  2149. model_size += tensor_size;
  2150. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  2151. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  2152. }
  2153. }
  2154. }
  2155. void load_hparams(clip_model & model, clip_modality modality) {
  2156. auto & hparams = model.hparams;
  2157. std::string log_ffn_op; // for logging
  2158. // sanity check
  2159. if (modality == CLIP_MODALITY_VISION) {
  2160. GGML_ASSERT(has_vision);
  2161. } else if (modality == CLIP_MODALITY_AUDIO) {
  2162. GGML_ASSERT(has_audio);
  2163. }
  2164. model.modality = modality;
  2165. // projector type
  2166. std::string proj_type;
  2167. {
  2168. // default key
  2169. get_string(KEY_PROJ_TYPE, proj_type, false);
  2170. // for models with mixed modalities
  2171. if (proj_type.empty()) {
  2172. if (modality == CLIP_MODALITY_VISION) {
  2173. get_string(KEY_VISION_PROJ_TYPE, proj_type, false);
  2174. } else if (modality == CLIP_MODALITY_AUDIO) {
  2175. get_string(KEY_AUDIO_PROJ_TYPE, proj_type, false);
  2176. } else {
  2177. GGML_ABORT("unknown modality");
  2178. }
  2179. }
  2180. model.proj_type = clip_projector_type_from_string(proj_type);
  2181. if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  2182. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  2183. }
  2184. // correct arch for multimodal models (legacy method)
  2185. if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
  2186. model.proj_type = modality == CLIP_MODALITY_VISION
  2187. ? PROJECTOR_TYPE_QWEN25VL
  2188. : PROJECTOR_TYPE_QWEN2A;
  2189. }
  2190. }
  2191. const bool is_vision = model.modality == CLIP_MODALITY_VISION;
  2192. const bool is_audio = model.modality == CLIP_MODALITY_AUDIO;
  2193. // other hparams
  2194. {
  2195. const char * prefix = is_vision ? "vision" : "audio";
  2196. get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
  2197. get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
  2198. get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
  2199. get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
  2200. get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
  2201. get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
  2202. if (is_vision) {
  2203. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  2204. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  2205. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  2206. get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
  2207. get_u32(KEY_MINICPMV_QUERY_NUM, hparams.minicpmv_query_num, false);
  2208. if (hparams.minicpmv_query_num == 0) {
  2209. // Fallback to hardcoded values for legacy models
  2210. if (hparams.minicpmv_version == 3) {
  2211. hparams.minicpmv_query_num = 64;
  2212. } else if (hparams.minicpmv_version == 4) {
  2213. hparams.minicpmv_query_num = 64;
  2214. } else if (hparams.minicpmv_version == 5) {
  2215. hparams.minicpmv_query_num = 64;
  2216. } else if (hparams.minicpmv_version == 6) {
  2217. hparams.minicpmv_query_num = 64;
  2218. } else {
  2219. hparams.minicpmv_query_num = 96;
  2220. }
  2221. }
  2222. } else if (is_audio) {
  2223. get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
  2224. // some hparams are unused, but still need to set to avoid issues
  2225. hparams.image_size = 0;
  2226. hparams.patch_size = 1;
  2227. } else {
  2228. GGML_ASSERT(false && "unknown modality");
  2229. }
  2230. // for pinpoints, we need to convert it into a list of resolution candidates
  2231. {
  2232. std::vector<int> pinpoints;
  2233. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
  2234. if (!pinpoints.empty()) {
  2235. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  2236. hparams.image_res_candidates.push_back({
  2237. pinpoints[i],
  2238. pinpoints[i+1],
  2239. });
  2240. }
  2241. }
  2242. }
  2243. // default warmup value
  2244. hparams.warmup_image_size = hparams.image_size;
  2245. hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
  2246. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  2247. || model.proj_type == PROJECTOR_TYPE_LDP
  2248. || model.proj_type == PROJECTOR_TYPE_LDPV2;
  2249. {
  2250. bool use_gelu = false;
  2251. bool use_silu = false;
  2252. get_bool(KEY_USE_GELU, use_gelu, false);
  2253. get_bool(KEY_USE_SILU, use_silu, false);
  2254. if (use_gelu && use_silu) {
  2255. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  2256. }
  2257. if (use_gelu) {
  2258. hparams.ffn_op = FFN_GELU;
  2259. log_ffn_op = "gelu";
  2260. } else if (use_silu) {
  2261. hparams.ffn_op = FFN_SILU;
  2262. log_ffn_op = "silu";
  2263. } else {
  2264. hparams.ffn_op = FFN_GELU_QUICK;
  2265. log_ffn_op = "gelu_quick";
  2266. }
  2267. }
  2268. {
  2269. std::string mm_patch_merge_type;
  2270. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  2271. if (mm_patch_merge_type == "spatial_unpad") {
  2272. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  2273. }
  2274. }
  2275. if (is_vision) {
  2276. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  2277. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  2278. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  2279. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  2280. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  2281. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  2282. for (int i = 0; i < 3; ++i) {
  2283. hparams.image_mean[i] = mean_data[i];
  2284. hparams.image_std[i] = std_data[i];
  2285. }
  2286. }
  2287. // Load the vision feature layer indices if they are explicitly provided;
  2288. // if multiple vision feature layers are present, the values will be concatenated
  2289. // to form the final visual features.
  2290. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  2291. // be non-negative, since we use -1 to mark values as unset here.
  2292. std::vector<int> vision_feature_layer;
  2293. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  2294. // convert std::vector to std::unordered_set
  2295. for (auto & layer : vision_feature_layer) {
  2296. hparams.vision_feature_layer.insert(layer);
  2297. }
  2298. // model-specific params
  2299. switch (model.proj_type) {
  2300. case PROJECTOR_TYPE_MINICPMV:
  2301. {
  2302. if (hparams.minicpmv_version == 0) {
  2303. hparams.minicpmv_version = 2; // default to 2 if not set
  2304. }
  2305. } break;
  2306. case PROJECTOR_TYPE_INTERNVL:
  2307. {
  2308. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  2309. } break;
  2310. case PROJECTOR_TYPE_IDEFICS3:
  2311. {
  2312. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  2313. get_u32(KEY_PREPROC_IMAGE_SIZE, hparams.image_longest_edge, false);
  2314. } break;
  2315. case PROJECTOR_TYPE_LFM2:
  2316. {
  2317. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  2318. // ref: https://huggingface.co/LiquidAI/LFM2-VL-3B/blob/main/preprocessor_config.json
  2319. // config above specifies number of tokens after downsampling, while here it is before, relax lowerbound to 64
  2320. hparams.set_limit_image_tokens(64, 1024);
  2321. } break;
  2322. case PROJECTOR_TYPE_PIXTRAL:
  2323. case PROJECTOR_TYPE_LIGHTONOCR:
  2324. {
  2325. // ref: https://huggingface.co/mistral-community/pixtral-12b/blob/main/preprocessor_config.json
  2326. // TODO: verify the image_min_tokens
  2327. hparams.n_merge = 1; // the original pixtral does not use patch merging
  2328. hparams.rope_theta = 10000.0f;
  2329. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  2330. hparams.set_limit_image_tokens(8, 1024);
  2331. hparams.set_warmup_n_tokens(256); // avoid OOM on warmup
  2332. } break;
  2333. case PROJECTOR_TYPE_KIMIVL:
  2334. {
  2335. hparams.rope_theta = 10000.0f;
  2336. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  2337. // TODO: check kimivl preprocessor for exact values
  2338. hparams.set_limit_image_tokens(8, 1024);
  2339. hparams.set_warmup_n_tokens(256); // avoid OOM on warmup
  2340. } break;
  2341. case PROJECTOR_TYPE_GEMMA3:
  2342. {
  2343. // default value (used by all model sizes in gemma 3 family)
  2344. // number of patches for each **side** is reduced by a factor of 4
  2345. hparams.n_merge = 4;
  2346. // test model (tinygemma3) has a different value, we optionally read it
  2347. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  2348. } break;
  2349. case PROJECTOR_TYPE_QWEN2VL:
  2350. case PROJECTOR_TYPE_QWEN25VL:
  2351. case PROJECTOR_TYPE_QWEN3VL:
  2352. {
  2353. hparams.n_merge = 2; // default value for Qwen 2 and 2.5
  2354. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  2355. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern, model.proj_type == PROJECTOR_TYPE_QWEN25VL); // only 2.5 requires it
  2356. // ref: https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  2357. hparams.set_limit_image_tokens(8, 4096);
  2358. hparams.set_warmup_n_tokens(46*46); // avoid OOM on warmup
  2359. const int warn_min_pixels = 1024 * hparams.n_merge * hparams.n_merge * hparams.patch_size * hparams.patch_size;
  2360. if (hparams.image_min_pixels < warn_min_pixels) {
  2361. LOG_WRN("%s: Qwen-VL models require at minimum 1024 image tokens to function correctly on grounding tasks\n", __func__);
  2362. LOG_WRN("%s: if you encounter problems with accuracy, try adding --image-min-tokens 1024\n", __func__);
  2363. LOG_WRN("%s: more info: https://github.com/ggml-org/llama.cpp/issues/16842\n\n", __func__);
  2364. }
  2365. } break;
  2366. case PROJECTOR_TYPE_LLAMA4:
  2367. {
  2368. hparams.rope_theta = 10000.0f;
  2369. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  2370. set_llava_uhd_res_candidates(model, 3);
  2371. } break;
  2372. case PROJECTOR_TYPE_ULTRAVOX:
  2373. case PROJECTOR_TYPE_QWEN2A:
  2374. case PROJECTOR_TYPE_VOXTRAL:
  2375. {
  2376. bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX ||
  2377. model.proj_type == PROJECTOR_TYPE_VOXTRAL;
  2378. get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
  2379. if (hparams.n_mel_bins != 128) {
  2380. throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
  2381. }
  2382. hparams.ffn_op = FFN_GELU_ERF;
  2383. log_ffn_op = "gelu_erf"; // temporary solution for logging
  2384. } break;
  2385. default:
  2386. break;
  2387. }
  2388. // sanity check
  2389. {
  2390. if (hparams.image_max_pixels < hparams.image_min_pixels) {
  2391. throw std::runtime_error(string_format("%s: image_max_pixels (%d) is less than image_min_pixels (%d)\n", __func__, hparams.image_max_pixels, hparams.image_min_pixels));
  2392. }
  2393. }
  2394. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  2395. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  2396. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  2397. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  2398. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  2399. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  2400. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  2401. if (is_vision) {
  2402. LOG_INF("\n--- vision hparams ---\n");
  2403. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  2404. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  2405. LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector);
  2406. LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version);
  2407. LOG_INF("%s: n_merge: %d\n", __func__, hparams.n_merge);
  2408. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  2409. if (hparams.image_min_pixels > 0) {
  2410. LOG_INF("%s: image_min_pixels: %d%s\n", __func__, hparams.image_min_pixels, hparams.custom_image_min_tokens > 0 ? " (custom value)" : "");
  2411. }
  2412. if (hparams.image_max_pixels > 0) {
  2413. LOG_INF("%s: image_max_pixels: %d%s\n", __func__, hparams.image_max_pixels, hparams.custom_image_max_tokens > 0 ? " (custom value)" : "");
  2414. }
  2415. } else if (is_audio) {
  2416. LOG_INF("\n--- audio hparams ---\n");
  2417. LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
  2418. LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
  2419. }
  2420. LOG_INF("\n");
  2421. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  2422. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  2423. }
  2424. }
  2425. void load_tensors(clip_ctx & ctx_clip) {
  2426. auto & model = ctx_clip.model;
  2427. auto & hparams = model.hparams;
  2428. std::map<std::string, size_t> tensor_offset;
  2429. std::vector<ggml_tensor *> tensors_to_load;
  2430. // TODO @ngxson : support both audio and video in the future
  2431. const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
  2432. // get offsets
  2433. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  2434. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  2435. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  2436. }
  2437. // create data context
  2438. struct ggml_init_params params = {
  2439. /*.mem_size =*/ static_cast<size_t>(gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  2440. /*.mem_buffer =*/ NULL,
  2441. /*.no_alloc =*/ true,
  2442. };
  2443. ctx_clip.ctx_data.reset(ggml_init(params));
  2444. if (!ctx_clip.ctx_data) {
  2445. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  2446. }
  2447. // helper function
  2448. auto get_tensor = [&](const std::string & name, bool required = true) {
  2449. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  2450. if (!cur && required) {
  2451. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  2452. }
  2453. if (cur) {
  2454. tensors_to_load.push_back(cur);
  2455. // add tensors to context
  2456. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  2457. ggml_set_name(data_tensor, cur->name);
  2458. cur = data_tensor;
  2459. }
  2460. return cur;
  2461. };
  2462. model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  2463. model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
  2464. model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
  2465. model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
  2466. model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
  2467. model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  2468. model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  2469. model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  2470. model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
  2471. // layers
  2472. model.layers.resize(hparams.n_layer);
  2473. for (int il = 0; il < hparams.n_layer; ++il) {
  2474. auto & layer = model.layers[il];
  2475. layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"), false);
  2476. layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"), false);
  2477. layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"), false);
  2478. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
  2479. layer.qkv_w = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "weight"), false);
  2480. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
  2481. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
  2482. layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
  2483. layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
  2484. layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
  2485. layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
  2486. layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
  2487. layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
  2488. layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
  2489. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
  2490. layer.qkv_b = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "bias"), false);
  2491. layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
  2492. layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
  2493. // ffn
  2494. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
  2495. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
  2496. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
  2497. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
  2498. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
  2499. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
  2500. // qwen3vl deepstack layer
  2501. layer.deepstack_norm_w = get_tensor(string_format(TN_DEEPSTACK_NORM, il, "weight"), false);
  2502. layer.deepstack_norm_b = get_tensor(string_format(TN_DEEPSTACK_NORM, il, "bias"), false);
  2503. layer.deepstack_fc1_w = get_tensor(string_format(TN_DEEPSTACK_FC1, il, "weight"), false);
  2504. layer.deepstack_fc1_b = get_tensor(string_format(TN_DEEPSTACK_FC1, il, "bias"), false);
  2505. layer.deepstack_fc2_w = get_tensor(string_format(TN_DEEPSTACK_FC2, il, "weight"), false);
  2506. layer.deepstack_fc2_b = get_tensor(string_format(TN_DEEPSTACK_FC2, il, "bias"), false);
  2507. if (layer.has_deepstack()) {
  2508. model.n_deepstack_layers++;
  2509. }
  2510. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  2511. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  2512. bool is_ffn_swapped = (
  2513. // only old models need this fix
  2514. model.proj_type == PROJECTOR_TYPE_MLP
  2515. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  2516. || model.proj_type == PROJECTOR_TYPE_LDP
  2517. || model.proj_type == PROJECTOR_TYPE_LDPV2
  2518. || model.proj_type == PROJECTOR_TYPE_QWEN2VL
  2519. || model.proj_type == PROJECTOR_TYPE_QWEN25VL
  2520. || model.proj_type == PROJECTOR_TYPE_GLM_EDGE
  2521. || model.proj_type == PROJECTOR_TYPE_GEMMA3
  2522. || model.proj_type == PROJECTOR_TYPE_IDEFICS3
  2523. || model.proj_type == PROJECTOR_TYPE_MINICPMV
  2524. ) && layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd;
  2525. if (is_ffn_swapped) {
  2526. // swap up and down weights
  2527. ggml_tensor * tmp = layer.ff_up_w;
  2528. layer.ff_up_w = layer.ff_down_w;
  2529. layer.ff_down_w = tmp;
  2530. // swap up and down biases
  2531. tmp = layer.ff_up_b;
  2532. layer.ff_up_b = layer.ff_down_b;
  2533. layer.ff_down_b = tmp;
  2534. if (il == 0) {
  2535. LOG_WRN("%s: ffn up/down are swapped\n", __func__);
  2536. }
  2537. }
  2538. }
  2539. switch (model.proj_type) {
  2540. case PROJECTOR_TYPE_MLP:
  2541. case PROJECTOR_TYPE_MLP_NORM:
  2542. {
  2543. // LLaVA projection
  2544. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  2545. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  2546. // Yi-type llava
  2547. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  2548. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2549. // missing in Yi-type llava
  2550. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  2551. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2552. // Yi-type llava
  2553. model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  2554. model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  2555. model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  2556. model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  2557. if (model.mm_3_w) {
  2558. // TODO: this is a hack to support Yi-type llava
  2559. model.proj_type = PROJECTOR_TYPE_MLP_NORM;
  2560. }
  2561. model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  2562. } break;
  2563. case PROJECTOR_TYPE_LDP:
  2564. {
  2565. // MobileVLM projection
  2566. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2567. model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2568. model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2569. model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2570. model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  2571. model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  2572. model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  2573. model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  2574. model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  2575. model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  2576. model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  2577. model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  2578. model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  2579. model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  2580. model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  2581. model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  2582. model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  2583. model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  2584. model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  2585. model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  2586. model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  2587. model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  2588. model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  2589. model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  2590. } break;
  2591. case PROJECTOR_TYPE_LDPV2:
  2592. {
  2593. // MobilVLM_V2 projection
  2594. model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2595. model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2596. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2597. model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  2598. model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  2599. model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  2600. } break;
  2601. case PROJECTOR_TYPE_MINICPMV:
  2602. {
  2603. // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  2604. model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  2605. model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  2606. model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  2607. model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  2608. model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  2609. model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  2610. model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  2611. model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  2612. model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  2613. model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  2614. model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  2615. model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  2616. model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  2617. model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  2618. model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  2619. model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  2620. model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  2621. model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  2622. } break;
  2623. case PROJECTOR_TYPE_GLM_EDGE:
  2624. {
  2625. model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  2626. model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  2627. model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  2628. model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  2629. model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  2630. model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  2631. model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  2632. model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  2633. model.mm_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  2634. model.mm_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  2635. } break;
  2636. case PROJECTOR_TYPE_QWEN2VL:
  2637. case PROJECTOR_TYPE_QWEN25VL:
  2638. {
  2639. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  2640. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  2641. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2642. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2643. } break;
  2644. case PROJECTOR_TYPE_QWEN3VL:
  2645. {
  2646. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  2647. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  2648. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2649. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2650. } break;
  2651. case PROJECTOR_TYPE_GEMMA3:
  2652. {
  2653. model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  2654. model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  2655. } break;
  2656. case PROJECTOR_TYPE_IDEFICS3:
  2657. {
  2658. model.projection = get_tensor(TN_MM_PROJECTOR);
  2659. } break;
  2660. case PROJECTOR_TYPE_LFM2:
  2661. case PROJECTOR_TYPE_KIMIVL:
  2662. {
  2663. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
  2664. model.mm_input_norm_b = get_tensor(TN_MM_INP_NORM_B);
  2665. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2666. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  2667. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2668. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  2669. } break;
  2670. case PROJECTOR_TYPE_PIXTRAL:
  2671. {
  2672. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2673. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2674. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2675. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2676. // [IMG_BREAK] token embedding
  2677. model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  2678. // for mistral small 3.1
  2679. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  2680. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  2681. } break;
  2682. case PROJECTOR_TYPE_LIGHTONOCR:
  2683. {
  2684. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2685. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  2686. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  2687. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  2688. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  2689. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  2690. } break;
  2691. case PROJECTOR_TYPE_ULTRAVOX:
  2692. {
  2693. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2694. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2695. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2696. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2697. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  2698. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  2699. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  2700. model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
  2701. } break;
  2702. case PROJECTOR_TYPE_QWEN2A:
  2703. {
  2704. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2705. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2706. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2707. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2708. model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
  2709. model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
  2710. } break;
  2711. case PROJECTOR_TYPE_VOXTRAL:
  2712. {
  2713. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  2714. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  2715. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  2716. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  2717. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  2718. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  2719. } break;
  2720. case PROJECTOR_TYPE_INTERNVL:
  2721. {
  2722. model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  2723. model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  2724. model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2725. model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  2726. model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  2727. model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  2728. } break;
  2729. case PROJECTOR_TYPE_LLAMA4:
  2730. {
  2731. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  2732. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  2733. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  2734. } break;
  2735. case PROJECTOR_TYPE_COGVLM:
  2736. {
  2737. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  2738. model.mm_post_fc_norm_w = get_tensor(string_format(TN_MM_POST_FC_NORM, "weight"));
  2739. model.mm_post_fc_norm_b = get_tensor(string_format(TN_MM_POST_FC_NORM, "bias"));
  2740. model.mm_h_to_4h_w = get_tensor(string_format(TN_MM_H_TO_4H, "weight"));
  2741. model.mm_gate_w = get_tensor(string_format(TN_MM_GATE, "weight"));
  2742. model.mm_4h_to_h_w = get_tensor(string_format(TN_MM_4H_TO_H, "weight"));
  2743. model.mm_boi = get_tensor(TN_TOK_BOI);
  2744. model.mm_eoi = get_tensor(TN_TOK_EOI);
  2745. } break;
  2746. case PROJECTOR_TYPE_JANUS_PRO:
  2747. {
  2748. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  2749. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  2750. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  2751. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  2752. } break;
  2753. default:
  2754. GGML_ASSERT(false && "unknown projector type");
  2755. }
  2756. // load data
  2757. {
  2758. std::vector<uint8_t> read_buf;
  2759. auto fin = std::ifstream(fname, std::ios::binary);
  2760. if (!fin) {
  2761. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  2762. }
  2763. // alloc memory and offload data
  2764. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  2765. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  2766. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  2767. for (auto & t : tensors_to_load) {
  2768. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  2769. const size_t offset = tensor_offset[t->name];
  2770. fin.seekg(offset, std::ios::beg);
  2771. if (!fin) {
  2772. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  2773. }
  2774. size_t num_bytes = ggml_nbytes(cur);
  2775. if (ggml_backend_buft_is_host(buft)) {
  2776. // for the CPU and Metal backend, we can read directly into the tensor
  2777. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  2778. } else {
  2779. // read into a temporary buffer first, then copy to device memory
  2780. read_buf.resize(num_bytes);
  2781. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  2782. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  2783. }
  2784. }
  2785. fin.close();
  2786. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  2787. }
  2788. }
  2789. struct support_info_op {
  2790. ggml_tensor * op;
  2791. // true if the op runs on the accelerated ctx_clip.backend
  2792. bool is_accel = true;
  2793. };
  2794. struct support_info_graph {
  2795. // whether the clip_ctx.backend supports flash attention
  2796. bool fattn = true;
  2797. ggml_tensor * fattn_op = nullptr; // for debugging
  2798. std::vector<support_info_op> ops;
  2799. };
  2800. static void warmup(clip_ctx & ctx_clip) {
  2801. // create a fake batch
  2802. const auto & hparams = ctx_clip.model.hparams;
  2803. clip_image_f32_batch batch;
  2804. clip_image_f32_ptr img(clip_image_f32_init());
  2805. if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
  2806. img->nx = hparams.warmup_image_size;
  2807. img->ny = hparams.warmup_image_size;
  2808. LOG_INF("%s: warmup with image size = %d x %d\n", __func__, img->nx, img->ny);
  2809. } else {
  2810. img->nx = hparams.warmup_audio_size;
  2811. img->ny = hparams.n_mel_bins;
  2812. LOG_INF("%s: warmup with audio size = %d\n", __func__, img->nx);
  2813. }
  2814. batch.entries.push_back(std::move(img));
  2815. warmup(ctx_clip, batch);
  2816. }
  2817. static void warmup(clip_ctx & ctx_clip, const clip_image_f32_batch & batch) {
  2818. support_info_graph info;
  2819. if (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_AUTO) {
  2820. // try to enable flash attention to see if it's supported
  2821. ctx_clip.flash_attn_type = CLIP_FLASH_ATTN_TYPE_ENABLED;
  2822. info = alloc_compute_meta(ctx_clip, batch);
  2823. if (!info.fattn && info.fattn_op) {
  2824. auto op = info.fattn_op;
  2825. LOG_WRN("%s: *****************************************************************\n", __func__);
  2826. LOG_WRN("%s: WARNING: flash attention not supported by %s, memory usage will increase\n", __func__, ggml_backend_name(ctx_clip.backend));
  2827. LOG_WRN("%s: op params: \n", __func__);
  2828. static auto print_shape = [](const char * fn, const char * name, ggml_tensor * t) {
  2829. LOG_WRN("%s: %s: type = %s, ne = [%d %d %d %d], nb = [%d %d %d %d]\n", fn,
  2830. name, ggml_type_name(t->type),
  2831. t->ne[0], t->ne[1], t->ne[2], t->ne[3],
  2832. t->nb[0], t->nb[1], t->nb[2], t->nb[3]);
  2833. };
  2834. print_shape(__func__, " dst", op);
  2835. print_shape(__func__, "src0", op->src[0]);
  2836. print_shape(__func__, "src1", op->src[1]);
  2837. print_shape(__func__, "src2", op->src[2]);
  2838. LOG_WRN("%s: please report this on github as an issue\n", __func__);
  2839. LOG_WRN("%s: *****************************************************************\n", __func__);
  2840. ctx_clip.flash_attn_type = CLIP_FLASH_ATTN_TYPE_DISABLED;
  2841. alloc_compute_meta(ctx_clip, batch);
  2842. }
  2843. } else {
  2844. info = alloc_compute_meta(ctx_clip, batch);
  2845. if (!info.fattn && ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  2846. LOG_WRN("%s: flash attention is not supported by the current backend; falling back to CPU (performance will be degraded)\n", __func__);
  2847. }
  2848. }
  2849. ctx_clip.is_allocated = true; // mark buffers as allocated
  2850. LOG_INF("%s: flash attention is %s\n", __func__,
  2851. (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) ? "enabled" : "disabled");
  2852. // print ops that are not supported by the GPU backend (if there is one)
  2853. if (ctx_clip.backend && ctx_clip.backend != ctx_clip.backend_cpu) {
  2854. std::vector<support_info_op> unsupported_ops;
  2855. for (const auto & op : info.ops) {
  2856. if (!op.is_accel) {
  2857. unsupported_ops.push_back(op);
  2858. }
  2859. }
  2860. if (!unsupported_ops.empty()) {
  2861. LOG_WRN("%s: *****************************************************************\n", __func__);
  2862. LOG_WRN("%s: WARNING: the CLIP graph uses unsupported operators by the backend\n", __func__);
  2863. LOG_WRN("%s: the performance will be suboptimal \n", __func__);
  2864. LOG_WRN("%s: list of unsupported ops (backend=%s):\n", __func__, ggml_backend_name(ctx_clip.backend));
  2865. for (const auto & op : unsupported_ops) {
  2866. LOG_WRN("%s: %16s: type = %s, ne = [%d %d %d %d]\n", __func__,
  2867. ggml_op_name(op.op->op),
  2868. ggml_type_name(op.op->type),
  2869. op.op->ne[0], op.op->ne[1], op.op->ne[2], op.op->ne[3]);
  2870. }
  2871. LOG_WRN("%s: flash attention is %s\n", __func__,
  2872. (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) ? "enabled" : "disabled");
  2873. LOG_WRN("%s: please report this on github as an issue\n", __func__);
  2874. LOG_WRN("%s: ref: https://github.com/ggml-org/llama.cpp/pull/16837#issuecomment-3461676118\n", __func__);
  2875. LOG_WRN("%s: *****************************************************************\n", __func__);
  2876. }
  2877. }
  2878. }
  2879. static support_info_graph alloc_compute_meta(clip_ctx & ctx_clip, const clip_image_f32_batch & batch) {
  2880. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  2881. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  2882. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  2883. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  2884. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  2885. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  2886. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  2887. if (size > 1) {
  2888. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  2889. ggml_backend_buft_name(buft),
  2890. size / 1024.0 / 1024.0);
  2891. }
  2892. }
  2893. const int n_splits = ggml_backend_sched_get_n_splits(ctx_clip.sched.get());
  2894. const int n_nodes = ggml_graph_n_nodes(gf);
  2895. LOG_INF("%s: graph splits = %d, nodes = %d\n", __func__, n_splits, n_nodes);
  2896. support_info_graph res {
  2897. /*.fattn = */ true,
  2898. /*.fattn_op = */ nullptr,
  2899. /*.ops = */ {},
  2900. };
  2901. // check op support
  2902. for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
  2903. ggml_tensor * node = ggml_graph_node(gf, i);
  2904. res.ops.push_back({node, true});
  2905. if (!ggml_backend_supports_op(ctx_clip.backend, node)) {
  2906. res.ops.back().is_accel = false;
  2907. if (node->op == GGML_OP_FLASH_ATTN_EXT) {
  2908. res.fattn = false;
  2909. res.fattn_op = node;
  2910. }
  2911. }
  2912. }
  2913. return res;
  2914. }
  2915. void get_bool(const std::string & key, bool & output, bool required = true) const {
  2916. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2917. if (i < 0) {
  2918. if (required) {
  2919. throw std::runtime_error("Key not found: " + key);
  2920. }
  2921. return;
  2922. }
  2923. output = gguf_get_val_bool(ctx_gguf.get(), i);
  2924. }
  2925. void get_i32(const std::string & key, int & output, bool required = true) const {
  2926. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2927. if (i < 0) {
  2928. if (required) {
  2929. throw std::runtime_error("Key not found: " + key);
  2930. }
  2931. return;
  2932. }
  2933. output = gguf_get_val_i32(ctx_gguf.get(), i);
  2934. }
  2935. void get_u32(const std::string & key, int & output, bool required = true) const {
  2936. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2937. if (i < 0) {
  2938. if (required) {
  2939. throw std::runtime_error("Key not found: " + key);
  2940. }
  2941. return;
  2942. }
  2943. output = gguf_get_val_u32(ctx_gguf.get(), i);
  2944. }
  2945. void get_f32(const std::string & key, float & output, bool required = true) const {
  2946. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2947. if (i < 0) {
  2948. if (required) {
  2949. throw std::runtime_error("Key not found: " + key);
  2950. }
  2951. return;
  2952. }
  2953. output = gguf_get_val_f32(ctx_gguf.get(), i);
  2954. }
  2955. void get_string(const std::string & key, std::string & output, bool required = true) const {
  2956. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2957. if (i < 0) {
  2958. if (required) {
  2959. throw std::runtime_error("Key not found: " + key);
  2960. }
  2961. return;
  2962. }
  2963. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  2964. }
  2965. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) const {
  2966. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  2967. if (i < 0) {
  2968. if (required) {
  2969. throw std::runtime_error("Key not found: " + key);
  2970. }
  2971. return;
  2972. }
  2973. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  2974. output.resize(n);
  2975. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  2976. for (int i = 0; i < n; ++i) {
  2977. output[i] = values[i];
  2978. }
  2979. }
  2980. static void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
  2981. auto & hparams = model.hparams;
  2982. for (int x = 1; x <= max_patches_per_side; x++) {
  2983. for (int y = 1; y <= max_patches_per_side; y++) {
  2984. if (x == 1 && y == 1) {
  2985. continue; // skip the first point
  2986. }
  2987. hparams.image_res_candidates.push_back(clip_image_size{
  2988. x*hparams.image_size,
  2989. y*hparams.image_size,
  2990. });
  2991. }
  2992. }
  2993. }
  2994. };
  2995. struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
  2996. clip_ctx * ctx_vision = nullptr;
  2997. clip_ctx * ctx_audio = nullptr;
  2998. try {
  2999. clip_model_loader loader(fname);
  3000. if (loader.has_vision) {
  3001. ctx_vision = new clip_ctx(ctx_params);
  3002. loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
  3003. loader.load_tensors(*ctx_vision);
  3004. if (ctx_params.warmup) {
  3005. loader.warmup(*ctx_vision);
  3006. }
  3007. }
  3008. if (loader.has_audio) {
  3009. ctx_audio = new clip_ctx(ctx_params);
  3010. loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
  3011. loader.load_tensors(*ctx_audio);
  3012. if (ctx_params.warmup) {
  3013. loader.warmup(*ctx_audio);
  3014. }
  3015. }
  3016. } catch (const std::exception & e) {
  3017. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  3018. delete ctx_vision;
  3019. delete ctx_audio;
  3020. return {nullptr, nullptr};
  3021. }
  3022. return {ctx_vision, ctx_audio};
  3023. }
  3024. struct clip_image_size * clip_image_size_init() {
  3025. struct clip_image_size * load_image_size = new struct clip_image_size();
  3026. load_image_size->width = 448;
  3027. load_image_size->height = 448;
  3028. return load_image_size;
  3029. }
  3030. struct clip_image_u8 * clip_image_u8_init() {
  3031. return new clip_image_u8();
  3032. }
  3033. struct clip_image_f32 * clip_image_f32_init() {
  3034. return new clip_image_f32();
  3035. }
  3036. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  3037. return new clip_image_f32_batch();
  3038. }
  3039. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  3040. if (nx) *nx = img->nx;
  3041. if (ny) *ny = img->ny;
  3042. return img->buf.data();
  3043. }
  3044. void clip_image_size_free(struct clip_image_size * load_image_size) {
  3045. if (load_image_size == nullptr) {
  3046. return;
  3047. }
  3048. delete load_image_size;
  3049. }
  3050. void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
  3051. void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
  3052. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { delete batch; }
  3053. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { delete batch; }
  3054. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  3055. return batch->entries.size();
  3056. }
  3057. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  3058. if (idx < 0 || idx >= (int)batch->entries.size()) {
  3059. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  3060. return 0;
  3061. }
  3062. return batch->entries[idx]->nx;
  3063. }
  3064. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  3065. if (idx < 0 || idx >= (int)batch->entries.size()) {
  3066. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  3067. return 0;
  3068. }
  3069. return batch->entries[idx]->ny;
  3070. }
  3071. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  3072. if (idx < 0 || idx >= (int)batch->entries.size()) {
  3073. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  3074. return nullptr;
  3075. }
  3076. return batch->entries[idx].get();
  3077. }
  3078. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  3079. img->nx = nx;
  3080. img->ny = ny;
  3081. img->buf.resize(3 * nx * ny);
  3082. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  3083. }
  3084. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  3085. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  3086. dst.nx = src.nx;
  3087. dst.ny = src.ny;
  3088. dst.buf.resize(src.buf.size());
  3089. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  3090. for (size_t i = 0; i < src.buf.size(); ++i) {
  3091. int c = i % 3; // rgb
  3092. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  3093. }
  3094. }
  3095. // set of tools to manupulate images
  3096. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  3097. struct img_tool {
  3098. enum resize_algo {
  3099. RESIZE_ALGO_BILINEAR,
  3100. RESIZE_ALGO_BICUBIC,
  3101. // RESIZE_ALGO_LANCZOS, // TODO
  3102. };
  3103. static void resize(
  3104. const clip_image_u8 & src,
  3105. clip_image_u8 & dst,
  3106. const clip_image_size & target_resolution,
  3107. resize_algo algo,
  3108. bool add_padding = true, // TODO: define the behavior for add_padding = false
  3109. std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  3110. dst.nx = target_resolution.width;
  3111. dst.ny = target_resolution.height;
  3112. dst.buf.resize(3 * dst.nx * dst.ny);
  3113. if (dst.nx == src.nx && dst.ny == src.ny) {
  3114. // no resize needed, simple copy
  3115. dst.buf = src.buf;
  3116. return;
  3117. }
  3118. if (!add_padding) {
  3119. // direct resize
  3120. switch (algo) {
  3121. case RESIZE_ALGO_BILINEAR:
  3122. resize_bilinear(src, dst, target_resolution.width, target_resolution.height);
  3123. break;
  3124. case RESIZE_ALGO_BICUBIC:
  3125. resize_bicubic(src, dst, target_resolution.width, target_resolution.height);
  3126. break;
  3127. default:
  3128. throw std::runtime_error("Unsupported resize algorithm");
  3129. }
  3130. } else {
  3131. // resize with padding
  3132. clip_image_u8 resized_image;
  3133. float scale_w = static_cast<float>(target_resolution.width) / src.nx;
  3134. float scale_h = static_cast<float>(target_resolution.height) / src.ny;
  3135. float scale = std::min(scale_w, scale_h);
  3136. int new_width = std::min(static_cast<int>(std::ceil(src.nx * scale)), target_resolution.width);
  3137. int new_height = std::min(static_cast<int>(std::ceil(src.ny * scale)), target_resolution.height);
  3138. switch (algo) {
  3139. case RESIZE_ALGO_BILINEAR:
  3140. resize_bilinear(src, resized_image, new_width, new_height);
  3141. break;
  3142. case RESIZE_ALGO_BICUBIC:
  3143. resize_bicubic(src, resized_image, new_width, new_height);
  3144. break;
  3145. default:
  3146. throw std::runtime_error("Unsupported resize algorithm");
  3147. }
  3148. // fill dst with pad_color
  3149. fill(dst, pad_color);
  3150. int offset_x = (target_resolution.width - new_width) / 2;
  3151. int offset_y = (target_resolution.height - new_height) / 2;
  3152. composite(dst, resized_image, offset_x, offset_y);
  3153. }
  3154. }
  3155. static void crop(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  3156. dst.nx = w;
  3157. dst.ny = h;
  3158. dst.buf.resize(3 * w * h);
  3159. for (int i = 0; i < h; ++i) {
  3160. for (int j = 0; j < w; ++j) {
  3161. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  3162. int dst_idx = 3 * (i*w + j);
  3163. dst.buf[dst_idx] = image.buf[src_idx];
  3164. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  3165. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  3166. }
  3167. }
  3168. }
  3169. // calculate the size of the **resized** image, while preserving the aspect ratio
  3170. // the calculated size will be aligned to the nearest multiple of align_size
  3171. // if H or W size is larger than longest_edge, it will be resized to longest_edge
  3172. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int longest_edge) {
  3173. GGML_ASSERT(align_size > 0);
  3174. if (inp_size.width <= 0 || inp_size.height <= 0 || longest_edge <= 0) {
  3175. return {0, 0};
  3176. }
  3177. float scale = std::min(static_cast<float>(longest_edge) / inp_size.width,
  3178. static_cast<float>(longest_edge) / inp_size.height);
  3179. float target_width_f = static_cast<float>(inp_size.width) * scale;
  3180. float target_height_f = static_cast<float>(inp_size.height) * scale;
  3181. auto ceil_by_factor = [f = align_size](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  3182. int aligned_width = ceil_by_factor(target_width_f);
  3183. int aligned_height = ceil_by_factor(target_height_f);
  3184. return {aligned_width, aligned_height};
  3185. }
  3186. // calculate the size of the **resized** image, while preserving the aspect ratio
  3187. // the calculated size will have min_pixels <= W*H <= max_pixels
  3188. // this is referred as "smart_resize" in transformers code
  3189. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int min_pixels, const int max_pixels) {
  3190. GGML_ASSERT(align_size > 0);
  3191. const int width = inp_size.width;
  3192. const int height = inp_size.height;
  3193. auto round_by_factor = [f = align_size](float x) { return static_cast<int>(std::round(x / static_cast<float>(f))) * f; };
  3194. auto ceil_by_factor = [f = align_size](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  3195. auto floor_by_factor = [f = align_size](float x) { return static_cast<int>(std::floor(x / static_cast<float>(f))) * f; };
  3196. // always align up first
  3197. int h_bar = std::max(align_size, round_by_factor(height));
  3198. int w_bar = std::max(align_size, round_by_factor(width));
  3199. if (h_bar * w_bar > max_pixels) {
  3200. const auto beta = std::sqrt(static_cast<float>(height * width) / max_pixels);
  3201. h_bar = std::max(align_size, floor_by_factor(height / beta));
  3202. w_bar = std::max(align_size, floor_by_factor(width / beta));
  3203. } else if (h_bar * w_bar < min_pixels) {
  3204. const auto beta = std::sqrt(static_cast<float>(min_pixels) / (height * width));
  3205. h_bar = ceil_by_factor(height * beta);
  3206. w_bar = ceil_by_factor(width * beta);
  3207. }
  3208. return {w_bar, h_bar};
  3209. }
  3210. // draw src image into dst image at offset (offset_x, offset_y)
  3211. static void composite(clip_image_u8 & dst, const clip_image_u8 & src, int offset_x, int offset_y) {
  3212. for (int y = 0; y < src.ny; ++y) {
  3213. for (int x = 0; x < src.nx; ++x) {
  3214. int dx = x + offset_x;
  3215. int dy = y + offset_y;
  3216. // skip pixels that would be out of bounds in the destination
  3217. if (dx < 0 || dy < 0 || dx >= dst.nx || dy >= dst.ny) {
  3218. continue;
  3219. }
  3220. size_t dst_idx = 3 * (static_cast<size_t>(dy) * dst.nx + static_cast<size_t>(dx));
  3221. size_t src_idx = 3 * (static_cast<size_t>(y) * src.nx + static_cast<size_t>(x));
  3222. dst.buf[dst_idx + 0] = src.buf[src_idx + 0];
  3223. dst.buf[dst_idx + 1] = src.buf[src_idx + 1];
  3224. dst.buf[dst_idx + 2] = src.buf[src_idx + 2];
  3225. }
  3226. }
  3227. }
  3228. // fill the image with a solid color
  3229. static void fill(clip_image_u8 & img, const std::array<uint8_t, 3> & color) {
  3230. for (size_t i = 0; i < img.buf.size(); i += 3) {
  3231. img.buf[i] = color[0];
  3232. img.buf[i + 1] = color[1];
  3233. img.buf[i + 2] = color[2];
  3234. }
  3235. }
  3236. private:
  3237. // Bilinear resize function
  3238. static void resize_bilinear(const clip_image_u8 & src, clip_image_u8 & dst, int target_width, int target_height) {
  3239. dst.nx = target_width;
  3240. dst.ny = target_height;
  3241. dst.buf.resize(3 * target_width * target_height);
  3242. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  3243. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  3244. for (int y = 0; y < target_height; y++) {
  3245. for (int x = 0; x < target_width; x++) {
  3246. float px = x_ratio * x;
  3247. float py = y_ratio * y;
  3248. int x_floor = static_cast<int>(px);
  3249. int y_floor = static_cast<int>(py);
  3250. float x_lerp = px - x_floor;
  3251. float y_lerp = py - y_floor;
  3252. for (int c = 0; c < 3; c++) {
  3253. float top = lerp(
  3254. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  3255. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  3256. x_lerp
  3257. );
  3258. float bottom = lerp(
  3259. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  3260. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  3261. x_lerp
  3262. );
  3263. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  3264. }
  3265. }
  3266. }
  3267. }
  3268. // Bicubic resize function
  3269. // part of image will be cropped if the aspect ratio is different
  3270. static bool resize_bicubic(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  3271. const int nx = img.nx;
  3272. const int ny = img.ny;
  3273. dst.nx = target_width;
  3274. dst.ny = target_height;
  3275. dst.buf.resize(3 * target_width * target_height);
  3276. float Cc;
  3277. float C[5] = {};
  3278. float d0, d2, d3, a0, a1, a2, a3;
  3279. int i, j, k, jj;
  3280. int x, y;
  3281. float dx, dy;
  3282. float tx, ty;
  3283. tx = (float)nx / (float)target_width;
  3284. ty = (float)ny / (float)target_height;
  3285. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  3286. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  3287. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  3288. for (i = 0; i < target_height; i++) {
  3289. for (j = 0; j < target_width; j++) {
  3290. x = (int)(tx * j);
  3291. y = (int)(ty * i);
  3292. dx = tx * j - x;
  3293. dy = ty * i - y;
  3294. for (k = 0; k < 3; k++) {
  3295. for (jj = 0; jj <= 3; jj++) {
  3296. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  3297. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  3298. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  3299. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  3300. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  3301. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  3302. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  3303. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  3304. d0 = C[0] - C[1];
  3305. d2 = C[2] - C[1];
  3306. d3 = C[3] - C[1];
  3307. a0 = C[1];
  3308. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  3309. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  3310. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  3311. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  3312. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  3313. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  3314. }
  3315. }
  3316. }
  3317. }
  3318. return true;
  3319. }
  3320. static inline int clip(int x, int lower, int upper) {
  3321. return std::max(lower, std::min(x, upper));
  3322. }
  3323. // Linear interpolation between two points
  3324. static inline float lerp(float s, float e, float t) {
  3325. return s + (e - s) * t;
  3326. }
  3327. };
  3328. /**
  3329. * implementation of LLaVA-UHD:
  3330. * - https://arxiv.org/pdf/2403.11703
  3331. * - https://github.com/thunlp/LLaVA-UHD
  3332. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  3333. *
  3334. * overview:
  3335. * - an image always have a single overview (downscaled image)
  3336. * - an image can have 0 or multiple slices, depending on the image size
  3337. * - each slice can then be considered as a separate image
  3338. *
  3339. * for example:
  3340. *
  3341. * [overview] --> [slice 1] --> [slice 2]
  3342. * | |
  3343. * +--> [slice 3] --> [slice 4]
  3344. */
  3345. struct llava_uhd {
  3346. struct slice_coordinates {
  3347. int x;
  3348. int y;
  3349. clip_image_size size;
  3350. };
  3351. struct slice_instructions {
  3352. clip_image_size overview_size; // size of downscaled image
  3353. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  3354. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  3355. std::vector<slice_coordinates> slices;
  3356. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  3357. };
  3358. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  3359. slice_instructions res;
  3360. const int patch_size = clip_get_patch_size(ctx);
  3361. const int slice_size = clip_get_image_size(ctx);
  3362. const int original_width = original_size.width;
  3363. const int original_height = original_size.height;
  3364. const bool has_slices = original_size.width > slice_size || original_size.height > slice_size;
  3365. const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
  3366. if (!has_slices) {
  3367. // skip slicing logic
  3368. res.overview_size = clip_image_size{slice_size, slice_size};
  3369. res.refined_size = clip_image_size{0, 0};
  3370. res.grid_size = clip_image_size{0, 0};
  3371. return res;
  3372. }
  3373. if (has_pinpoints) {
  3374. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  3375. auto refine_size = llava_uhd::select_best_resolution(
  3376. original_size,
  3377. ctx->model.hparams.image_res_candidates);
  3378. res.overview_size = clip_image_size{slice_size, slice_size};
  3379. res.refined_size = refine_size;
  3380. res.grid_size = clip_image_size{0, 0};
  3381. res.padding_refined = true;
  3382. LOG_DBG("%s: using pinpoints for slicing\n", __func__);
  3383. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
  3384. __func__, original_width, original_height,
  3385. res.overview_size.width, res.overview_size.height,
  3386. res.refined_size.width, res.refined_size.height);
  3387. for (int y = 0; y < refine_size.height; y += slice_size) {
  3388. for (int x = 0; x < refine_size.width; x += slice_size) {
  3389. slice_coordinates slice;
  3390. slice.x = x;
  3391. slice.y = y;
  3392. slice.size.width = std::min(slice_size, refine_size.width - x);
  3393. slice.size.height = std::min(slice_size, refine_size.height - y);
  3394. res.slices.push_back(slice);
  3395. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  3396. __func__, (int)res.slices.size() - 1,
  3397. slice.x, slice.y, slice.size.width, slice.size.height);
  3398. }
  3399. }
  3400. res.grid_size.height = refine_size.height / slice_size;
  3401. res.grid_size.width = refine_size.width / slice_size;
  3402. LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
  3403. return res;
  3404. }
  3405. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  3406. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  3407. res.overview_size = best_size;
  3408. {
  3409. const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
  3410. const float log_ratio = log((float)original_width / original_height);
  3411. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  3412. const int multiple = fmin(ceil(ratio), max_slice_nums);
  3413. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  3414. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  3415. res.grid_size = best_grid;
  3416. res.refined_size = refine_size;
  3417. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
  3418. __func__, original_width, original_height,
  3419. res.overview_size.width, res.overview_size.height,
  3420. res.refined_size.width, res.refined_size.height,
  3421. res.grid_size.width, res.grid_size.height);
  3422. int width = refine_size.width;
  3423. int height = refine_size.height;
  3424. int grid_x = int(width / best_grid.width);
  3425. int grid_y = int(height / best_grid.height);
  3426. for (int patches_y = 0, ic = 0;
  3427. patches_y < refine_size.height && ic < best_grid.height;
  3428. patches_y += grid_y, ic += 1) {
  3429. for (int patches_x = 0, jc = 0;
  3430. patches_x < refine_size.width && jc < best_grid.width;
  3431. patches_x += grid_x, jc += 1) {
  3432. slice_coordinates slice;
  3433. slice.x = patches_x;
  3434. slice.y = patches_y;
  3435. slice.size.width = grid_x;
  3436. slice.size.height = grid_y;
  3437. res.slices.push_back(slice);
  3438. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  3439. __func__, (int)res.slices.size() - 1,
  3440. slice.x, slice.y, slice.size.width, slice.size.height);
  3441. }
  3442. }
  3443. }
  3444. return res;
  3445. }
  3446. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  3447. std::vector<clip_image_u8_ptr> output;
  3448. img_tool::resize_algo interpolation = img_tool::RESIZE_ALGO_BILINEAR; // TODO: make it configurable
  3449. // resize to overview size
  3450. clip_image_u8_ptr resized_img(clip_image_u8_init());
  3451. img_tool::resize(*img, *resized_img, inst.overview_size, interpolation);
  3452. output.push_back(std::move(resized_img));
  3453. if (inst.slices.empty()) {
  3454. // no slices, just return the resized image
  3455. return output;
  3456. }
  3457. // resize to refined size
  3458. clip_image_u8_ptr refined_img(clip_image_u8_init());
  3459. if (inst.padding_refined) {
  3460. img_tool::resize(*img, *refined_img, inst.refined_size, interpolation);
  3461. } else {
  3462. // only algo bicubic preserves the ratio; old models rely on this behavior
  3463. // TODO: do we need to support other algos here?
  3464. img_tool::resize(*img, *refined_img, inst.refined_size, img_tool::RESIZE_ALGO_BICUBIC, false);
  3465. }
  3466. // create slices
  3467. for (const auto & slice : inst.slices) {
  3468. int x = slice.x;
  3469. int y = slice.y;
  3470. int w = slice.size.width;
  3471. int h = slice.size.height;
  3472. clip_image_u8_ptr img_slice(clip_image_u8_init());
  3473. img_tool::crop(*refined_img, *img_slice, x, y, w, h);
  3474. output.push_back(std::move(img_slice));
  3475. }
  3476. return output;
  3477. }
  3478. private:
  3479. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  3480. int width = original_size.width;
  3481. int height = original_size.height;
  3482. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  3483. float r = static_cast<float>(width) / height;
  3484. height = static_cast<int>(scale_resolution / std::sqrt(r));
  3485. width = static_cast<int>(height * r);
  3486. }
  3487. clip_image_size res;
  3488. res.width = ensure_divide(width, patch_size);
  3489. res.height = ensure_divide(height, patch_size);
  3490. return res;
  3491. }
  3492. static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
  3493. float scale_width = static_cast<float>(target_max.width) / orig.width;
  3494. float scale_height = static_cast<float>(target_max.height) / orig.height;
  3495. float scale = std::min(scale_width, scale_height);
  3496. return clip_image_size{
  3497. static_cast<int>(orig.width * scale),
  3498. static_cast<int>(orig.height * scale),
  3499. };
  3500. }
  3501. /**
  3502. * Selects the best resolution from a list of possible resolutions based on the original size.
  3503. *
  3504. * For example, when given a list of resolutions:
  3505. * - 100x100
  3506. * - 200x100
  3507. * - 100x200
  3508. * - 200x200
  3509. *
  3510. * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
  3511. *
  3512. * @param original_size The original size of the image
  3513. * @param possible_resolutions A list of possible resolutions
  3514. * @return The best fit resolution
  3515. */
  3516. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  3517. clip_image_size best_fit;
  3518. int min_wasted_area = std::numeric_limits<int>::max();
  3519. int max_effective_resolution = 0;
  3520. for (const clip_image_size & candidate : possible_resolutions) {
  3521. auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
  3522. int effective_resolution = std::min(
  3523. target_size.width * target_size.height,
  3524. original_size.width * original_size.height);
  3525. int wasted_area = (candidate.width * candidate.height) - effective_resolution;
  3526. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
  3527. max_effective_resolution = effective_resolution;
  3528. min_wasted_area = wasted_area;
  3529. best_fit = candidate;
  3530. }
  3531. LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
  3532. }
  3533. return best_fit;
  3534. }
  3535. static int ensure_divide(int length, int patch_size) {
  3536. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  3537. }
  3538. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  3539. int width = original_size.width;
  3540. int height = original_size.height;
  3541. int grid_x = grid.width;
  3542. int grid_y = grid.height;
  3543. int refine_width = ensure_divide(width, grid_x);
  3544. int refine_height = ensure_divide(height, grid_y);
  3545. clip_image_size grid_size;
  3546. grid_size.width = refine_width / grid_x;
  3547. grid_size.height = refine_height / grid_y;
  3548. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  3549. int best_grid_width = best_grid_size.width;
  3550. int best_grid_height = best_grid_size.height;
  3551. clip_image_size refine_size;
  3552. refine_size.width = best_grid_width * grid_x;
  3553. refine_size.height = best_grid_height * grid_y;
  3554. return refine_size;
  3555. }
  3556. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  3557. std::vector<int> candidate_split_grids_nums;
  3558. for (int i : {multiple - 1, multiple, multiple + 1}) {
  3559. if (i == 1 || i > max_slice_nums) {
  3560. continue;
  3561. }
  3562. candidate_split_grids_nums.push_back(i);
  3563. }
  3564. std::vector<clip_image_size> candidate_grids;
  3565. for (int split_grids_nums : candidate_split_grids_nums) {
  3566. int m = 1;
  3567. while (m <= split_grids_nums) {
  3568. if (split_grids_nums % m == 0) {
  3569. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  3570. }
  3571. ++m;
  3572. }
  3573. }
  3574. clip_image_size best_grid{1, 1};
  3575. float min_error = std::numeric_limits<float>::infinity();
  3576. for (const auto& grid : candidate_grids) {
  3577. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  3578. if (error < min_error) {
  3579. best_grid = grid;
  3580. min_error = error;
  3581. }
  3582. }
  3583. return best_grid;
  3584. }
  3585. };
  3586. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  3587. // res_imgs memory is being allocated here, previous allocations will be freed if found
  3588. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  3589. clip_image_size original_size{img->nx, img->ny};
  3590. auto & params = ctx->model.hparams;
  3591. switch (ctx->proj_type()) {
  3592. case PROJECTOR_TYPE_MINICPMV:
  3593. {
  3594. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  3595. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  3596. for (size_t i = 0; i < imgs.size(); ++i) {
  3597. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  3598. clip_image_f32_ptr res(clip_image_f32_init());
  3599. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3600. res_imgs->entries.push_back(std::move(res));
  3601. }
  3602. res_imgs->grid_x = inst.grid_size.width;
  3603. res_imgs->grid_y = inst.grid_size.height;
  3604. } break;
  3605. case PROJECTOR_TYPE_QWEN2VL:
  3606. case PROJECTOR_TYPE_QWEN25VL:
  3607. case PROJECTOR_TYPE_QWEN3VL:
  3608. {
  3609. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  3610. clip_image_u8 resized;
  3611. const clip_image_size new_size = img_tool::calc_size_preserved_ratio(
  3612. original_size,
  3613. params.patch_size * 2,
  3614. params.image_min_pixels,
  3615. params.image_max_pixels);
  3616. img_tool::resize(*img, resized, new_size, img_tool::RESIZE_ALGO_BILINEAR, false);
  3617. // clip_image_save_to_bmp(resized, "preproc.bmp");
  3618. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3619. // clip_image_f32_ptr res(clip_image_f32_init());
  3620. normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
  3621. // res_imgs->data[0] = *res;
  3622. res_imgs->entries.push_back(std::move(img_f32));
  3623. } break;
  3624. case PROJECTOR_TYPE_IDEFICS3:
  3625. {
  3626. // The refined size has two steps:
  3627. // 1. Resize w/ aspect-ratio preserving such that the longer side is
  3628. // the preprocessor longest size
  3629. // 2. Resize w/out preserving aspect ratio such that both sides are
  3630. // multiples of image_size (always rounding up)
  3631. //
  3632. // CITE: https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics3/image_processing_idefics3.py#L737
  3633. const clip_image_size refined_size = img_tool::calc_size_preserved_ratio(
  3634. original_size, params.image_size, params.image_longest_edge);
  3635. // LOG_INF("%s: original size: %d x %d, refined size: %d x %d\n",
  3636. // __func__, original_size.width, original_size.height,
  3637. // refined_size.width, refined_size.height);
  3638. llava_uhd::slice_instructions instructions;
  3639. instructions.overview_size = clip_image_size{params.image_size, params.image_size};
  3640. instructions.refined_size = refined_size;
  3641. instructions.grid_size = clip_image_size{
  3642. static_cast<int>(std::ceil(static_cast<float>(refined_size.width) / params.image_size)),
  3643. static_cast<int>(std::ceil(static_cast<float>(refined_size.height) / params.image_size)),
  3644. };
  3645. for (int y = 0; y < refined_size.height; y += params.image_size) {
  3646. for (int x = 0; x < refined_size.width; x += params.image_size) {
  3647. // LOG_INF("%s: adding slice at x=%d, y=%d\n", __func__, x, y);
  3648. instructions.slices.push_back(llava_uhd::slice_coordinates{
  3649. /* x */x,
  3650. /* y */y,
  3651. /* size */clip_image_size{
  3652. std::min(params.image_size, refined_size.width - x),
  3653. std::min(params.image_size, refined_size.height - y)
  3654. }
  3655. });
  3656. }
  3657. }
  3658. auto imgs = llava_uhd::slice_image(img, instructions);
  3659. // cast and normalize to f32
  3660. for (size_t i = 0; i < imgs.size(); ++i) {
  3661. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  3662. clip_image_f32_ptr res(clip_image_f32_init());
  3663. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3664. res_imgs->entries.push_back(std::move(res));
  3665. }
  3666. res_imgs->grid_x = instructions.grid_size.width;
  3667. res_imgs->grid_y = instructions.grid_size.height;
  3668. } break;
  3669. case PROJECTOR_TYPE_GLM_EDGE:
  3670. case PROJECTOR_TYPE_GEMMA3:
  3671. case PROJECTOR_TYPE_INTERNVL: // TODO @ngxson : support dynamic resolution
  3672. {
  3673. clip_image_u8 resized_image;
  3674. int sz = params.image_size;
  3675. img_tool::resize(*img, resized_image, {sz, sz}, img_tool::RESIZE_ALGO_BILINEAR);
  3676. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3677. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  3678. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  3679. res_imgs->entries.push_back(std::move(img_f32));
  3680. } break;
  3681. case PROJECTOR_TYPE_JANUS_PRO:
  3682. {
  3683. // Janus Pro preprocessing: pad to square with gray(127), resize to 384x384
  3684. const std::array<uint8_t, 3> pad_color = {127, 127, 127};
  3685. clip_image_u8 resized_image;
  3686. int sz = params.image_size;
  3687. img_tool::resize(*img, resized_image, {sz, sz}, img_tool::RESIZE_ALGO_BILINEAR, true, pad_color);
  3688. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3689. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  3690. res_imgs->entries.push_back(std::move(img_f32));
  3691. } break;
  3692. case PROJECTOR_TYPE_PIXTRAL:
  3693. case PROJECTOR_TYPE_LIGHTONOCR:
  3694. {
  3695. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  3696. clip_image_u8 resized_image;
  3697. // the original pixtral model doesn't have n_merge
  3698. const int cur_merge = params.n_merge == 0 ? 1 : params.n_merge;
  3699. const clip_image_size target_size = img_tool::calc_size_preserved_ratio(
  3700. original_size,
  3701. params.patch_size * cur_merge,
  3702. params.image_min_pixels,
  3703. params.image_max_pixels);
  3704. img_tool::resize(*img, resized_image, target_size, img_tool::RESIZE_ALGO_BILINEAR);
  3705. clip_image_f32_ptr img_f32(clip_image_f32_init());
  3706. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  3707. res_imgs->entries.push_back(std::move(img_f32));
  3708. } break;
  3709. case PROJECTOR_TYPE_LLAMA4:
  3710. {
  3711. GGML_ASSERT(!params.image_res_candidates.empty());
  3712. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  3713. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  3714. for (size_t i = 0; i < imgs.size(); ++i) {
  3715. clip_image_f32_ptr res(clip_image_f32_init());
  3716. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3717. res_imgs->entries.push_back(std::move(res));
  3718. }
  3719. res_imgs->grid_x = inst.grid_size.width;
  3720. res_imgs->grid_y = inst.grid_size.height;
  3721. } break;
  3722. case PROJECTOR_TYPE_LFM2:
  3723. case PROJECTOR_TYPE_KIMIVL:
  3724. {
  3725. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  3726. const clip_image_size target_size = img_tool::calc_size_preserved_ratio(
  3727. original_size,
  3728. params.patch_size * params.n_merge,
  3729. params.image_min_pixels,
  3730. params.image_max_pixels);
  3731. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  3732. clip_image_u8 resized_img;
  3733. const bool pad = (ctx->proj_type() != PROJECTOR_TYPE_LFM2);
  3734. img_tool::resize(*img, resized_img, target_size, img_tool::RESIZE_ALGO_BILINEAR, pad, pad_color);
  3735. clip_image_f32_ptr res(clip_image_f32_init());
  3736. normalize_image_u8_to_f32(resized_img, *res, params.image_mean, params.image_std);
  3737. res_imgs->entries.push_back(std::move(res));
  3738. } break;
  3739. case PROJECTOR_TYPE_MLP:
  3740. case PROJECTOR_TYPE_MLP_NORM:
  3741. case PROJECTOR_TYPE_LDP:
  3742. case PROJECTOR_TYPE_LDPV2:
  3743. case PROJECTOR_TYPE_COGVLM: // TODO @ngxson : is this correct for cogvlm?
  3744. {
  3745. // TODO @ngxson : refactor the code below to avoid duplicated logic
  3746. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  3747. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  3748. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  3749. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  3750. if (params.image_res_candidates.empty()) { // pad_to_square
  3751. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  3752. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  3753. const int longer_side = std::max(img->nx, img->ny);
  3754. temp->nx = longer_side;
  3755. temp->ny = longer_side;
  3756. temp->buf.resize(3 * longer_side * longer_side);
  3757. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  3758. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  3759. // resize the image to the target_size
  3760. img_tool::resize(*img, *temp, clip_image_size{params.image_size, params.image_size}, img_tool::RESIZE_ALGO_BILINEAR, true, pad_color);
  3761. clip_image_f32_ptr res(clip_image_f32_init());
  3762. normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
  3763. res_imgs->entries.push_back(std::move(res));
  3764. } else {
  3765. // "spatial_unpad" with "anyres" processing for llava-1.6
  3766. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  3767. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  3768. for (size_t i = 0; i < imgs.size(); ++i) {
  3769. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  3770. clip_image_f32_ptr res(clip_image_f32_init());
  3771. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  3772. res_imgs->entries.push_back(std::move(res));
  3773. }
  3774. }
  3775. } break;
  3776. default:
  3777. LOG_ERR("%s: unsupported projector type %d\n", __func__, ctx->proj_type());
  3778. return false;
  3779. }
  3780. return true;
  3781. }
  3782. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  3783. return ctx->model.image_newline;
  3784. }
  3785. void clip_free(clip_ctx * ctx) {
  3786. if (ctx == nullptr) {
  3787. return;
  3788. }
  3789. delete ctx;
  3790. }
  3791. // deprecated
  3792. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  3793. const int32_t nx = ctx->model.hparams.image_size;
  3794. const int32_t ny = ctx->model.hparams.image_size;
  3795. return clip_embd_nbytes_by_img(ctx, nx, ny);
  3796. }
  3797. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  3798. clip_image_f32 img;
  3799. img.nx = img_w;
  3800. img.ny = img_h;
  3801. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  3802. }
  3803. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  3804. return ctx->model.hparams.image_size;
  3805. }
  3806. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  3807. return ctx->model.hparams.patch_size;
  3808. }
  3809. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  3810. return ctx->model.hparams.n_embd;
  3811. }
  3812. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  3813. return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  3814. }
  3815. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  3816. const auto & params = ctx->model.hparams;
  3817. const int n_total = clip_n_output_tokens(ctx, img);
  3818. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL) {
  3819. return img->nx / (params.patch_size * 2);
  3820. }
  3821. return n_total;
  3822. }
  3823. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  3824. const auto & params = ctx->model.hparams;
  3825. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL) {
  3826. return img->ny / (params.patch_size * 2);
  3827. }
  3828. return 1;
  3829. }
  3830. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  3831. const auto & params = ctx->model.hparams;
  3832. // for models with fixed size image, the input image is already pre-processed and resized to square
  3833. int patch_size = params.patch_size;
  3834. int n_patches = (img->nx / patch_size) * (img->ny / patch_size);
  3835. projector_type proj = ctx->proj_type();
  3836. switch (proj) {
  3837. case PROJECTOR_TYPE_MLP:
  3838. case PROJECTOR_TYPE_MLP_NORM:
  3839. case PROJECTOR_TYPE_JANUS_PRO:
  3840. {
  3841. // do nothing
  3842. } break;
  3843. case PROJECTOR_TYPE_LDP:
  3844. case PROJECTOR_TYPE_LDPV2:
  3845. case PROJECTOR_TYPE_GLM_EDGE:
  3846. {
  3847. n_patches /= 4;
  3848. if (ctx->model.mm_boi) {
  3849. n_patches += 2; // for BOI and EOI token embeddings
  3850. }
  3851. } break;
  3852. case PROJECTOR_TYPE_MINICPMV:
  3853. {
  3854. // Use actual config value if available, otherwise fall back to hardcoded values
  3855. if (params.minicpmv_query_num > 0) {
  3856. n_patches = params.minicpmv_query_num;
  3857. } else {
  3858. // Fallback to hardcoded values for legacy models
  3859. if (params.minicpmv_version == 2) {
  3860. n_patches = 96;
  3861. } else if (params.minicpmv_version == 3) {
  3862. n_patches = 64;
  3863. } else if (params.minicpmv_version == 4) {
  3864. n_patches = 64;
  3865. } else if (params.minicpmv_version == 5) {
  3866. // MiniCPM-V 4.0
  3867. n_patches = 64;
  3868. } else if (params.minicpmv_version == 6) {
  3869. // MiniCPM-V 4.5
  3870. n_patches = 64;
  3871. } else {
  3872. GGML_ABORT("Unknown minicpmv version");
  3873. }
  3874. }
  3875. } break;
  3876. case PROJECTOR_TYPE_QWEN2VL:
  3877. case PROJECTOR_TYPE_QWEN25VL:
  3878. case PROJECTOR_TYPE_QWEN3VL:
  3879. {
  3880. // dynamic size (2 conv, so double patch size)
  3881. int x_patch = img->nx / (params.patch_size * 2);
  3882. int y_patch = img->ny / (params.patch_size * 2);
  3883. n_patches = x_patch * y_patch;
  3884. } break;
  3885. case PROJECTOR_TYPE_GEMMA3:
  3886. case PROJECTOR_TYPE_IDEFICS3:
  3887. case PROJECTOR_TYPE_INTERNVL:
  3888. case PROJECTOR_TYPE_LLAMA4:
  3889. {
  3890. // both X and Y are downscaled by the scale factor
  3891. int scale_factor = ctx->model.hparams.n_merge;
  3892. n_patches /= (scale_factor * scale_factor);
  3893. } break;
  3894. case PROJECTOR_TYPE_LFM2:
  3895. case PROJECTOR_TYPE_KIMIVL:
  3896. {
  3897. // dynamic size
  3898. int out_patch_size = params.patch_size * ctx->model.hparams.n_merge;
  3899. int x_patch = CLIP_ALIGN(img->nx, out_patch_size) / out_patch_size;
  3900. int y_patch = CLIP_ALIGN(img->ny, out_patch_size) / out_patch_size;
  3901. n_patches = x_patch * y_patch;
  3902. } break;
  3903. case PROJECTOR_TYPE_PIXTRAL:
  3904. case PROJECTOR_TYPE_LIGHTONOCR:
  3905. {
  3906. // dynamic size
  3907. int n_merge = ctx->model.hparams.n_merge;
  3908. int n_patches_x = img->nx / patch_size / (n_merge > 0 ? n_merge : 1);
  3909. int n_patches_y = img->ny / patch_size / (n_merge > 0 ? n_merge : 1);
  3910. if (ctx->model.token_embd_img_break) {
  3911. n_patches = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  3912. } else {
  3913. n_patches = n_patches_y * n_patches_x;
  3914. }
  3915. } break;
  3916. case PROJECTOR_TYPE_VOXTRAL:
  3917. case PROJECTOR_TYPE_ULTRAVOX:
  3918. case PROJECTOR_TYPE_QWEN2A:
  3919. {
  3920. n_patches = img->nx;
  3921. const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
  3922. if (ctx->model.audio_has_stack_frames()) {
  3923. GGML_ASSERT(proj_stack_factor > 0);
  3924. const int n_len = CLIP_ALIGN(n_patches, proj_stack_factor);
  3925. n_patches = n_len / proj_stack_factor;
  3926. }
  3927. // whisper downscales input token by half after conv1d
  3928. n_patches /= 2;
  3929. if (ctx->model.audio_has_avgpool()) {
  3930. // divide by 2 because of nn.AvgPool1d(2, stride=2)
  3931. n_patches /= 2;
  3932. }
  3933. } break;
  3934. case PROJECTOR_TYPE_COGVLM:
  3935. {
  3936. n_patches += 2; // for BOI and EOI token embeddings
  3937. } break;
  3938. default:
  3939. GGML_ABORT("unsupported projector type");
  3940. }
  3941. return n_patches;
  3942. }
  3943. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  3944. clip_image_f32_batch imgs;
  3945. clip_image_f32_ptr img_copy(clip_image_f32_init());
  3946. *img_copy = *img;
  3947. imgs.entries.push_back(std::move(img_copy));
  3948. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  3949. }
  3950. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  3951. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  3952. int batch_size = imgs.entries.size();
  3953. // TODO @ngxson : implement batch size > 1 as a loop
  3954. // we don't need true batching support because the cgraph will gonna be big anyway
  3955. if (batch_size != 1) {
  3956. return false; // only support batch size of 1
  3957. }
  3958. // if buffers are not allocated, we need to do a warmup run to allocate them
  3959. if (!ctx->is_allocated) {
  3960. clip_model_loader::warmup(*ctx, *imgs_c_ptr);
  3961. }
  3962. // build the inference graph
  3963. ctx->debug_print_tensors.clear();
  3964. ggml_backend_sched_reset(ctx->sched.get());
  3965. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  3966. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  3967. // set inputs
  3968. const auto & model = ctx->model;
  3969. const auto & hparams = model.hparams;
  3970. const int image_size_width = imgs.entries[0]->nx;
  3971. const int image_size_height = imgs.entries[0]->ny;
  3972. const int patch_size = hparams.patch_size;
  3973. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  3974. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  3975. const int pos_w = image_size_width / patch_size;
  3976. const int pos_h = image_size_height / patch_size;
  3977. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  3978. auto get_inp_tensor = [&gf](const char * name) {
  3979. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  3980. if (inp == nullptr) {
  3981. GGML_ABORT("Failed to get tensor %s", name);
  3982. }
  3983. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  3984. GGML_ABORT("Tensor %s is not an input tensor", name);
  3985. }
  3986. return inp;
  3987. };
  3988. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  3989. ggml_tensor * cur = get_inp_tensor(name);
  3990. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  3991. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3992. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3993. };
  3994. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  3995. ggml_tensor * cur = get_inp_tensor(name);
  3996. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  3997. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  3998. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  3999. };
  4000. // set input pixel values
  4001. if (!imgs.is_audio) {
  4002. size_t nelem = 0;
  4003. for (const auto & img : imgs.entries) {
  4004. nelem += img->nx * img->ny * 3;
  4005. }
  4006. std::vector<float> inp_raw(nelem);
  4007. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  4008. //
  4009. // ┌──W──┐
  4010. // │ H │ channel = R
  4011. // ├─────┤ │
  4012. // │ H │ channel = G
  4013. // ├─────┤ │
  4014. // │ H │ channel = B
  4015. // └─────┘ │
  4016. // ──────┘ x B
  4017. for (size_t i = 0; i < imgs.entries.size(); i++) {
  4018. const int nx = imgs.entries[i]->nx;
  4019. const int ny = imgs.entries[i]->ny;
  4020. const int n = nx * ny;
  4021. for (int b = 0; b < batch_size; b++) {
  4022. float * batch_entry = inp_raw.data() + b * (3*n);
  4023. for (int y = 0; y < ny; y++) {
  4024. for (int x = 0; x < nx; x++) {
  4025. size_t base_src = 3*(y * nx + x); // idx of the first channel
  4026. size_t base_dst = y * nx + x; // idx of the first channel
  4027. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  4028. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  4029. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  4030. }
  4031. }
  4032. }
  4033. }
  4034. set_input_f32("inp_raw", inp_raw);
  4035. } else {
  4036. // audio input
  4037. GGML_ASSERT(imgs.entries.size() == 1);
  4038. const auto & mel_inp = imgs.entries[0];
  4039. const int n_step = mel_inp->nx;
  4040. const int n_mel = mel_inp->ny;
  4041. std::vector<float> inp_raw(n_step * n_mel);
  4042. std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
  4043. set_input_f32("inp_raw", inp_raw);
  4044. }
  4045. // set input per projector
  4046. switch (ctx->model.proj_type) {
  4047. case PROJECTOR_TYPE_MINICPMV:
  4048. {
  4049. // inspired from siglip:
  4050. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  4051. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  4052. std::vector<int32_t> positions(pos_h * pos_w);
  4053. int bucket_coords_h[1024];
  4054. int bucket_coords_w[1024];
  4055. for (int i = 0; i < pos_h; i++){
  4056. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  4057. }
  4058. for (int i = 0; i < pos_w; i++){
  4059. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  4060. }
  4061. for (int i = 0, id = 0; i < pos_h; i++){
  4062. for (int j = 0; j < pos_w; j++){
  4063. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  4064. }
  4065. }
  4066. set_input_i32("positions", positions);
  4067. // inputs for resampler projector
  4068. // set the 2D positions (using float for sinusoidal embedding)
  4069. int n_patches_per_col = image_size_width / patch_size;
  4070. std::vector<float> pos_data(n_pos);
  4071. // dimension H
  4072. for (int i = 0; i < n_pos; i++) {
  4073. pos_data[i] = static_cast<float>(i / n_patches_per_col);
  4074. }
  4075. set_input_f32("pos_h", pos_data);
  4076. // dimension W
  4077. for (int i = 0; i < n_pos; i++) {
  4078. pos_data[i] = static_cast<float>(i % n_patches_per_col);
  4079. }
  4080. set_input_f32("pos_w", pos_data);
  4081. // base frequency omega
  4082. const float base_freq = 10000.0f;
  4083. const int n_embd_proj = clip_n_mmproj_embd(ctx);
  4084. std::vector<float> omega(n_embd_proj / 4);
  4085. for (int i = 0; i < n_embd_proj / 4; ++i) {
  4086. omega[i] = 1.0f / std::pow(base_freq, static_cast<float>(i) / (n_embd_proj / 4));
  4087. }
  4088. set_input_f32("omega", omega);
  4089. } break;
  4090. case PROJECTOR_TYPE_QWEN2VL:
  4091. case PROJECTOR_TYPE_QWEN3VL:
  4092. {
  4093. const int merge_ratio = hparams.n_merge;
  4094. const int pw = image_size_width / patch_size;
  4095. const int ph = image_size_height / patch_size;
  4096. std::vector<int> positions(n_pos * 4);
  4097. int ptr = 0;
  4098. for (int y = 0; y < ph; y += merge_ratio) {
  4099. for (int x = 0; x < pw; x += merge_ratio) {
  4100. for (int dy = 0; dy < 2; dy++) {
  4101. for (int dx = 0; dx < 2; dx++) {
  4102. positions[ ptr] = y + dy;
  4103. positions[ num_patches + ptr] = x + dx;
  4104. positions[2 * num_patches + ptr] = y + dy;
  4105. positions[3 * num_patches + ptr] = x + dx;
  4106. ptr++;
  4107. }
  4108. }
  4109. }
  4110. }
  4111. set_input_i32("positions", positions);
  4112. } break;
  4113. case PROJECTOR_TYPE_QWEN25VL:
  4114. {
  4115. // pw * ph = number of tokens output by ViT after apply patch merger
  4116. // ipw * ipw = number of vision token been processed inside ViT
  4117. const int merge_ratio = 2;
  4118. const int pw = image_size_width / patch_size / merge_ratio;
  4119. const int ph = image_size_height / patch_size / merge_ratio;
  4120. const int ipw = image_size_width / patch_size;
  4121. const int iph = image_size_height / patch_size;
  4122. std::vector<int> idx (ph * pw);
  4123. std::vector<int> inv_idx(ph * pw);
  4124. if (use_window_attn) {
  4125. const int attn_window_size = 112;
  4126. const int grid_window = attn_window_size / patch_size / merge_ratio;
  4127. int dst = 0;
  4128. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  4129. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  4130. int mask_row = 0;
  4131. for (int y = 0; y < ph; y += grid_window) {
  4132. for (int x = 0; x < pw; x += grid_window) {
  4133. const int win_h = std::min(grid_window, ph - y);
  4134. const int win_w = std::min(grid_window, pw - x);
  4135. const int dst_0 = dst;
  4136. // group all tokens belong to the same window togather (to a continue range)
  4137. for (int dy = 0; dy < win_h; dy++) {
  4138. for (int dx = 0; dx < win_w; dx++) {
  4139. const int src = (y + dy) * pw + (x + dx);
  4140. GGML_ASSERT(src < (int)idx.size());
  4141. GGML_ASSERT(dst < (int)inv_idx.size());
  4142. idx [src] = dst;
  4143. inv_idx[dst] = src;
  4144. dst++;
  4145. }
  4146. }
  4147. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  4148. int row_offset = mask_row * (ipw * iph);
  4149. std::fill(
  4150. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  4151. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  4152. 0.0);
  4153. mask_row++;
  4154. }
  4155. }
  4156. }
  4157. set_input_i32("window_idx", idx);
  4158. set_input_i32("inv_window_idx", inv_idx);
  4159. set_input_f32("window_mask", mask);
  4160. } else {
  4161. for (int i = 0; i < ph * pw; i++) {
  4162. idx[i] = i;
  4163. }
  4164. }
  4165. const int mpow = merge_ratio * merge_ratio;
  4166. std::vector<int> positions(n_pos * 4);
  4167. int ptr = 0;
  4168. for (int y = 0; y < iph; y += merge_ratio) {
  4169. for (int x = 0; x < ipw; x += merge_ratio) {
  4170. for (int dy = 0; dy < 2; dy++) {
  4171. for (int dx = 0; dx < 2; dx++) {
  4172. auto remap = idx[ptr / mpow];
  4173. remap = (remap * mpow) + (ptr % mpow);
  4174. positions[ remap] = y + dy;
  4175. positions[ num_patches + remap] = x + dx;
  4176. positions[2 * num_patches + remap] = y + dy;
  4177. positions[3 * num_patches + remap] = x + dx;
  4178. ptr++;
  4179. }
  4180. }
  4181. }
  4182. }
  4183. set_input_i32("positions", positions);
  4184. } break;
  4185. case PROJECTOR_TYPE_PIXTRAL:
  4186. case PROJECTOR_TYPE_KIMIVL:
  4187. case PROJECTOR_TYPE_LIGHTONOCR:
  4188. {
  4189. // set the 2D positions
  4190. int n_patches_per_col = image_size_width / patch_size;
  4191. std::vector<int> pos_data(n_pos);
  4192. // dimension H
  4193. for (int i = 0; i < n_pos; i++) {
  4194. pos_data[i] = i / n_patches_per_col;
  4195. }
  4196. set_input_i32("pos_h", pos_data);
  4197. // dimension W
  4198. for (int i = 0; i < n_pos; i++) {
  4199. pos_data[i] = i % n_patches_per_col;
  4200. }
  4201. set_input_i32("pos_w", pos_data);
  4202. } break;
  4203. case PROJECTOR_TYPE_GLM_EDGE:
  4204. {
  4205. // llava and other models
  4206. std::vector<int32_t> positions(n_pos);
  4207. for (int i = 0; i < n_pos; i++) {
  4208. positions[i] = i;
  4209. }
  4210. set_input_i32("positions", positions);
  4211. } break;
  4212. case PROJECTOR_TYPE_MLP:
  4213. case PROJECTOR_TYPE_MLP_NORM:
  4214. case PROJECTOR_TYPE_LDP:
  4215. case PROJECTOR_TYPE_LDPV2:
  4216. {
  4217. // llava and other models
  4218. std::vector<int32_t> positions(n_pos);
  4219. for (int i = 0; i < n_pos; i++) {
  4220. positions[i] = i;
  4221. }
  4222. set_input_i32("positions", positions);
  4223. // The patches vector is used to get rows to index into the embeds with;
  4224. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  4225. // when retrieving the rows.
  4226. int patch_offset = model.class_embedding ? 1 : 0;
  4227. std::vector<int32_t> patches(num_patches);
  4228. for (int i = 0; i < num_patches; i++) {
  4229. patches[i] = i + patch_offset;
  4230. }
  4231. set_input_i32("patches", patches);
  4232. } break;
  4233. case PROJECTOR_TYPE_GEMMA3:
  4234. case PROJECTOR_TYPE_IDEFICS3:
  4235. case PROJECTOR_TYPE_INTERNVL:
  4236. case PROJECTOR_TYPE_QWEN2A:
  4237. case PROJECTOR_TYPE_ULTRAVOX:
  4238. case PROJECTOR_TYPE_LFM2:
  4239. case PROJECTOR_TYPE_VOXTRAL:
  4240. case PROJECTOR_TYPE_JANUS_PRO:
  4241. case PROJECTOR_TYPE_COGVLM:
  4242. {
  4243. // do nothing
  4244. } break;
  4245. case PROJECTOR_TYPE_LLAMA4:
  4246. {
  4247. // set the 2D positions
  4248. int n_patches_per_col = image_size_width / patch_size;
  4249. std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
  4250. // last pos is always kept 0, it's for CLS
  4251. // dimension H
  4252. for (int i = 0; i < num_patches; i++) {
  4253. pos_data[i] = (i / n_patches_per_col) + 1;
  4254. }
  4255. set_input_i32("pos_h", pos_data);
  4256. // dimension W
  4257. for (int i = 0; i < num_patches; i++) {
  4258. pos_data[i] = (i % n_patches_per_col) + 1;
  4259. }
  4260. set_input_i32("pos_w", pos_data);
  4261. } break;
  4262. default:
  4263. GGML_ABORT("Unknown projector type");
  4264. }
  4265. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  4266. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  4267. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  4268. if (reg) {
  4269. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  4270. if (ggml_backend_set_n_threads_fn) {
  4271. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  4272. }
  4273. }
  4274. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  4275. if (status != GGML_STATUS_SUCCESS) {
  4276. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  4277. return false;
  4278. }
  4279. // print debug nodes
  4280. if (ctx->debug_graph) {
  4281. LOG_INF("\n\n---\n\n");
  4282. LOG_INF("\n\nDebug graph:\n\n");
  4283. for (ggml_tensor * t : ctx->debug_print_tensors) {
  4284. std::vector<uint8_t> data(ggml_nbytes(t));
  4285. ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
  4286. print_tensor_shape(t);
  4287. print_tensor_data(t, data.data(), 3);
  4288. }
  4289. }
  4290. // the last node is the embedding tensor
  4291. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  4292. // sanity check (only support batch size of 1 for now)
  4293. const int n_tokens_out = embeddings->ne[1];
  4294. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  4295. if (n_tokens_out != expected_n_tokens_out) {
  4296. LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  4297. GGML_ABORT("Invalid number of output tokens");
  4298. }
  4299. // copy the embeddings to the location passed by the user
  4300. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  4301. return true;
  4302. }
  4303. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  4304. switch (ctx->model.proj_type) {
  4305. case PROJECTOR_TYPE_LDP:
  4306. return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
  4307. case PROJECTOR_TYPE_LDPV2:
  4308. return ctx->model.mm_model_peg_0_b->ne[0];
  4309. case PROJECTOR_TYPE_MLP:
  4310. case PROJECTOR_TYPE_PIXTRAL:
  4311. case PROJECTOR_TYPE_LIGHTONOCR:
  4312. return ctx->model.mm_2_w->ne[1];
  4313. case PROJECTOR_TYPE_MLP_NORM:
  4314. return ctx->model.mm_3_b->ne[0];
  4315. case PROJECTOR_TYPE_MINICPMV:
  4316. return ctx->model.mm_model_proj->ne[0];
  4317. case PROJECTOR_TYPE_GLM_EDGE:
  4318. return ctx->model.mm_model_mlp_3_w->ne[1];
  4319. case PROJECTOR_TYPE_QWEN2VL:
  4320. case PROJECTOR_TYPE_QWEN25VL:
  4321. case PROJECTOR_TYPE_JANUS_PRO:
  4322. return ctx->model.mm_1_b->ne[0];
  4323. case PROJECTOR_TYPE_QWEN3VL:
  4324. // main path + deepstack paths
  4325. return ctx->model.mm_1_b->ne[0] * (1 + ctx->model.n_deepstack_layers);
  4326. case PROJECTOR_TYPE_GEMMA3:
  4327. return ctx->model.mm_input_proj_w->ne[0];
  4328. case PROJECTOR_TYPE_IDEFICS3:
  4329. return ctx->model.projection->ne[1];
  4330. case PROJECTOR_TYPE_ULTRAVOX:
  4331. case PROJECTOR_TYPE_VOXTRAL:
  4332. return ctx->model.mm_2_w->ne[1];
  4333. case PROJECTOR_TYPE_INTERNVL:
  4334. return ctx->model.mm_3_w->ne[1];
  4335. case PROJECTOR_TYPE_LLAMA4:
  4336. return ctx->model.mm_model_proj->ne[1];
  4337. case PROJECTOR_TYPE_QWEN2A:
  4338. return ctx->model.mm_fc_w->ne[1];
  4339. case PROJECTOR_TYPE_LFM2:
  4340. case PROJECTOR_TYPE_KIMIVL:
  4341. return ctx->model.mm_2_w->ne[1];
  4342. case PROJECTOR_TYPE_COGVLM:
  4343. return ctx->model.mm_4h_to_h_w->ne[1];
  4344. default:
  4345. GGML_ABORT("Unknown projector type");
  4346. }
  4347. }
  4348. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  4349. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
  4350. return ctx->model.hparams.minicpmv_version;
  4351. }
  4352. return 0;
  4353. }
  4354. bool clip_is_glm(const struct clip_ctx * ctx) {
  4355. return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
  4356. }
  4357. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  4358. return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
  4359. || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
  4360. || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL;
  4361. }
  4362. bool clip_is_llava(const struct clip_ctx * ctx) {
  4363. return ctx->model.hparams.has_llava_projector;
  4364. }
  4365. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  4366. return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
  4367. }
  4368. bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
  4369. return ctx->model.modality == CLIP_MODALITY_VISION;
  4370. }
  4371. bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
  4372. return ctx->model.modality == CLIP_MODALITY_AUDIO;
  4373. }
  4374. bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
  4375. return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
  4376. || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A
  4377. || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL;
  4378. }
  4379. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  4380. clip_image_f32 clip_img;
  4381. clip_img.buf.resize(h * w * 3);
  4382. for (int i = 0; i < h*w*3; i++)
  4383. {
  4384. clip_img.buf[i] = img[i];
  4385. }
  4386. clip_img.nx = w;
  4387. clip_img.ny = h;
  4388. clip_image_encode(ctx, n_threads, &clip_img, vec);
  4389. return true;
  4390. }
  4391. //
  4392. // API used internally with mtmd
  4393. //
  4394. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  4395. return ctx->proj_type();
  4396. }
  4397. void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
  4398. clip_image_f32 * audio = new clip_image_f32;
  4399. audio->nx = n_frames;
  4400. audio->ny = n_mel;
  4401. audio->buf.resize(n_frames * n_mel);
  4402. std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
  4403. batch->entries.push_back(clip_image_f32_ptr(audio));
  4404. batch->is_audio = true;
  4405. }