clip.cpp 151 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-cpu.h"
  10. #include "ggml-alloc.h"
  11. #include "ggml-backend.h"
  12. #include "gguf.h"
  13. #define STB_IMAGE_IMPLEMENTATION
  14. #include "stb_image.h"
  15. #include <cassert>
  16. #include <cmath>
  17. #include <cstdlib>
  18. #include <cstring>
  19. #include <fstream>
  20. #include <map>
  21. #include <regex>
  22. #include <stdexcept>
  23. #include <unordered_set>
  24. #include <vector>
  25. #include <sstream>
  26. #include <cinttypes>
  27. #include <limits>
  28. #include <array>
  29. #include <numeric>
  30. struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
  31. //#define CLIP_DEBUG_FUNCTIONS
  32. #ifdef CLIP_DEBUG_FUNCTIONS
  33. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  34. std::ofstream file(filename, std::ios::binary);
  35. if (!file.is_open()) {
  36. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  37. return;
  38. }
  39. // PPM header: P6 format, width, height, and max color value
  40. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  41. // Write pixel data
  42. for (size_t i = 0; i < img.buf.size(); i += 3) {
  43. // PPM expects binary data in RGB format, which matches our image buffer
  44. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  45. }
  46. file.close();
  47. }
  48. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  49. std::ofstream file(filename, std::ios::binary);
  50. if (!file.is_open()) {
  51. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  52. return;
  53. }
  54. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  55. int bytesPerPixel = 3;
  56. int widthInBytes = img.nx * bytesPerPixel;
  57. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  58. int stride = widthInBytes + paddingAmount;
  59. // Bitmap file header
  60. unsigned char fileHeader[14] = {
  61. 'B','M', // Signature
  62. 0,0,0,0, // Image file size in bytes
  63. 0,0,0,0, // Reserved
  64. 54,0,0,0 // Start of pixel array
  65. };
  66. // Total file size
  67. fileSize = 54 + (stride * img.ny);
  68. fileHeader[2] = (unsigned char)(fileSize);
  69. fileHeader[3] = (unsigned char)(fileSize >> 8);
  70. fileHeader[4] = (unsigned char)(fileSize >> 16);
  71. fileHeader[5] = (unsigned char)(fileSize >> 24);
  72. // Bitmap information header (BITMAPINFOHEADER)
  73. unsigned char infoHeader[40] = {
  74. 40,0,0,0, // Size of this header (40 bytes)
  75. 0,0,0,0, // Image width
  76. 0,0,0,0, // Image height
  77. 1,0, // Number of color planes
  78. 24,0, // Bits per pixel
  79. 0,0,0,0, // No compression
  80. 0,0,0,0, // Image size (can be 0 for no compression)
  81. 0,0,0,0, // X pixels per meter (not specified)
  82. 0,0,0,0, // Y pixels per meter (not specified)
  83. 0,0,0,0, // Total colors (color table not used)
  84. 0,0,0,0 // Important colors (all are important)
  85. };
  86. // Width and height in the information header
  87. infoHeader[4] = (unsigned char)(img.nx);
  88. infoHeader[5] = (unsigned char)(img.nx >> 8);
  89. infoHeader[6] = (unsigned char)(img.nx >> 16);
  90. infoHeader[7] = (unsigned char)(img.nx >> 24);
  91. infoHeader[8] = (unsigned char)(img.ny);
  92. infoHeader[9] = (unsigned char)(img.ny >> 8);
  93. infoHeader[10] = (unsigned char)(img.ny >> 16);
  94. infoHeader[11] = (unsigned char)(img.ny >> 24);
  95. // Write file headers
  96. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  97. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  98. // Pixel data
  99. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  100. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  101. for (int x = 0; x < img.nx; ++x) {
  102. // Each pixel
  103. size_t pixelIndex = (y * img.nx + x) * 3;
  104. unsigned char pixel[3] = {
  105. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  106. img.buf[pixelIndex + 1],
  107. img.buf[pixelIndex]
  108. };
  109. file.write(reinterpret_cast<char*>(pixel), 3);
  110. }
  111. // Write padding for the row
  112. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  113. }
  114. file.close();
  115. }
  116. // debug function to convert f32 to u8
  117. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  118. dst.nx = src.nx;
  119. dst.ny = src.ny;
  120. dst.buf.resize(3 * src.nx * src.ny);
  121. for (size_t i = 0; i < src.buf.size(); ++i) {
  122. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  123. }
  124. }
  125. #endif
  126. //
  127. // clip layers
  128. //
  129. enum patch_merge_type {
  130. PATCH_MERGE_FLAT,
  131. PATCH_MERGE_SPATIAL_UNPAD,
  132. };
  133. struct clip_hparams {
  134. int32_t image_size;
  135. int32_t patch_size;
  136. int32_t hidden_size;
  137. int32_t n_intermediate;
  138. int32_t projection_dim;
  139. int32_t n_head;
  140. int32_t n_layer;
  141. int32_t proj_scale_factor = 0; // idefics3
  142. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  143. float eps = 1e-6;
  144. float rope_theta = 0.0;
  145. std::vector<int32_t> image_grid_pinpoints;
  146. int32_t image_crop_resolution;
  147. std::unordered_set<int32_t> vision_feature_layer;
  148. int32_t attn_window_size = 0;
  149. int32_t n_wa_pattern = 0;
  150. };
  151. struct clip_layer {
  152. // attention
  153. struct ggml_tensor * k_w = nullptr;
  154. struct ggml_tensor * k_b = nullptr;
  155. struct ggml_tensor * q_w = nullptr;
  156. struct ggml_tensor * q_b = nullptr;
  157. struct ggml_tensor * v_w = nullptr;
  158. struct ggml_tensor * v_b = nullptr;
  159. struct ggml_tensor * o_w = nullptr;
  160. struct ggml_tensor * o_b = nullptr;
  161. // layernorm 1
  162. struct ggml_tensor * ln_1_w = nullptr;
  163. struct ggml_tensor * ln_1_b = nullptr;
  164. // ff
  165. struct ggml_tensor * ff_i_w = nullptr; // legacy naming
  166. struct ggml_tensor * ff_i_b = nullptr; // legacy naming
  167. struct ggml_tensor * ff_o_w = nullptr; // legacy naming
  168. struct ggml_tensor * ff_o_b = nullptr; // legacy naming
  169. struct ggml_tensor * ff_up_w = nullptr;
  170. struct ggml_tensor * ff_up_b = nullptr;
  171. struct ggml_tensor * ff_gate_w = nullptr;
  172. struct ggml_tensor * ff_gate_b = nullptr;
  173. struct ggml_tensor * ff_down_w = nullptr;
  174. struct ggml_tensor * ff_down_b = nullptr;
  175. struct ggml_tensor * ff_g_w = NULL;
  176. struct ggml_tensor * ff_g_b = NULL;
  177. // layernorm 2
  178. struct ggml_tensor * ln_2_w = nullptr;
  179. struct ggml_tensor * ln_2_b = nullptr;
  180. };
  181. struct clip_vision_model {
  182. struct clip_hparams hparams;
  183. // embeddings
  184. struct ggml_tensor * class_embedding = nullptr;
  185. struct ggml_tensor * patch_embeddings_0 = nullptr;
  186. struct ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  187. struct ggml_tensor * patch_bias = nullptr;
  188. struct ggml_tensor * position_embeddings = nullptr;
  189. struct ggml_tensor * pre_ln_w = nullptr;
  190. struct ggml_tensor * pre_ln_b = nullptr;
  191. std::vector<clip_layer> layers;
  192. struct ggml_tensor * post_ln_w;
  193. struct ggml_tensor * post_ln_b;
  194. struct ggml_tensor * projection;
  195. // LLaVA projection
  196. struct ggml_tensor * mm_0_w = nullptr;
  197. struct ggml_tensor * mm_0_b = nullptr;
  198. struct ggml_tensor * mm_2_w = nullptr;
  199. struct ggml_tensor * mm_2_b = nullptr;
  200. struct ggml_tensor * image_newline = nullptr;
  201. // Yi type models with mlp+normalization projection
  202. struct ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  203. struct ggml_tensor * mm_1_b = nullptr;
  204. struct ggml_tensor * mm_3_w = nullptr;
  205. struct ggml_tensor * mm_3_b = nullptr;
  206. struct ggml_tensor * mm_4_w = nullptr;
  207. struct ggml_tensor * mm_4_b = nullptr;
  208. //GLMV-Edge projection
  209. struct ggml_tensor * mm_model_adapter_conv_w = nullptr;
  210. struct ggml_tensor * mm_model_adapter_conv_b = nullptr;
  211. // MobileVLM projection
  212. struct ggml_tensor * mm_model_mlp_1_w = nullptr;
  213. struct ggml_tensor * mm_model_mlp_1_b = nullptr;
  214. struct ggml_tensor * mm_model_mlp_3_w = nullptr;
  215. struct ggml_tensor * mm_model_mlp_3_b = nullptr;
  216. struct ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  217. struct ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  218. struct ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  219. struct ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  220. struct ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  221. struct ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  222. struct ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  223. struct ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  224. struct ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  225. struct ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  226. struct ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  227. struct ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  228. struct ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  229. struct ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  230. struct ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  231. struct ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  232. struct ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  233. struct ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  234. struct ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  235. struct ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  236. // MobileVLM_V2 projection
  237. struct ggml_tensor * mm_model_mlp_0_w = nullptr;
  238. struct ggml_tensor * mm_model_mlp_0_b = nullptr;
  239. struct ggml_tensor * mm_model_mlp_2_w = nullptr;
  240. struct ggml_tensor * mm_model_mlp_2_b = nullptr;
  241. struct ggml_tensor * mm_model_peg_0_w = nullptr;
  242. struct ggml_tensor * mm_model_peg_0_b = nullptr;
  243. // MINICPMV projection
  244. struct ggml_tensor * mm_model_pos_embed_k = nullptr;
  245. struct ggml_tensor * mm_model_query = nullptr;
  246. struct ggml_tensor * mm_model_proj = nullptr;
  247. struct ggml_tensor * mm_model_kv_proj = nullptr;
  248. struct ggml_tensor * mm_model_attn_q_w = nullptr;
  249. struct ggml_tensor * mm_model_attn_q_b = nullptr;
  250. struct ggml_tensor * mm_model_attn_k_w = nullptr;
  251. struct ggml_tensor * mm_model_attn_k_b = nullptr;
  252. struct ggml_tensor * mm_model_attn_v_w = nullptr;
  253. struct ggml_tensor * mm_model_attn_v_b = nullptr;
  254. struct ggml_tensor * mm_model_attn_o_w = nullptr;
  255. struct ggml_tensor * mm_model_attn_o_b = nullptr;
  256. struct ggml_tensor * mm_model_ln_q_w = nullptr;
  257. struct ggml_tensor * mm_model_ln_q_b = nullptr;
  258. struct ggml_tensor * mm_model_ln_kv_w = nullptr;
  259. struct ggml_tensor * mm_model_ln_kv_b = nullptr;
  260. struct ggml_tensor * mm_model_ln_post_w = nullptr;
  261. struct ggml_tensor * mm_model_ln_post_b = nullptr;
  262. // gemma3
  263. struct ggml_tensor * mm_input_proj_w = nullptr;
  264. struct ggml_tensor * mm_soft_emb_norm_w = nullptr;
  265. // pixtral
  266. struct ggml_tensor * token_embd_img_break = nullptr;
  267. };
  268. struct clip_ctx {
  269. bool has_llava_projector = false;
  270. int minicpmv_version = 0;
  271. struct clip_vision_model vision_model;
  272. projector_type proj_type = PROJECTOR_TYPE_MLP;
  273. int32_t max_feature_layer; // unused in newer models like gemma3
  274. float image_mean[3];
  275. float image_std[3];
  276. bool use_gelu = false;
  277. bool use_silu = false;
  278. gguf_context_ptr ctx_gguf;
  279. ggml_context_ptr ctx_data;
  280. std::vector<uint8_t> buf_compute_meta;
  281. std::vector<ggml_backend_t> backend_ptrs;
  282. std::vector<ggml_backend_buffer_type_t> backend_buft;
  283. ggml_backend_t backend;
  284. ggml_backend_t backend_cpu;
  285. ggml_backend_buffer_ptr buf;
  286. int max_nodes = 8192;
  287. ggml_backend_sched_ptr sched;
  288. clip_image_size load_image_size;
  289. clip_ctx(clip_context_params & ctx_params) {
  290. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  291. backend = ctx_params.use_gpu
  292. ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
  293. : nullptr;
  294. if (backend) {
  295. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  296. backend_ptrs.push_back(backend);
  297. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  298. } else {
  299. backend = backend_cpu;
  300. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  301. }
  302. backend_ptrs.push_back(backend_cpu);
  303. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  304. sched.reset(
  305. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false)
  306. );
  307. }
  308. ~clip_ctx() {
  309. ggml_backend_free(backend);
  310. if (backend != backend_cpu) {
  311. ggml_backend_free(backend_cpu);
  312. }
  313. }
  314. };
  315. static ggml_cgraph * clip_image_build_graph_siglip(clip_ctx * ctx, const clip_image_f32 & img) {
  316. const auto & model = ctx->vision_model;
  317. const auto & hparams = model.hparams;
  318. int image_size_width = img.nx;
  319. int image_size_height = img.ny;
  320. const int patch_size = hparams.patch_size;
  321. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  322. const int hidden_size = hparams.hidden_size;
  323. const int n_head = hparams.n_head;
  324. const int d_head = hidden_size / n_head;
  325. const int n_layer = hparams.n_layer;
  326. const float eps = hparams.eps;
  327. struct ggml_init_params params = {
  328. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  329. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  330. /*.no_alloc =*/ true,
  331. };
  332. ggml_context_ptr ctx0_ptr(ggml_init(params));
  333. auto ctx0 = ctx0_ptr.get();
  334. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  335. // input raw
  336. struct ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3);
  337. ggml_set_name(inp_raw, "inp_raw");
  338. ggml_set_input(inp_raw);
  339. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  340. inp = ggml_reshape_2d(ctx0, inp, num_patches, hidden_size);
  341. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  342. inp = ggml_add(ctx0, inp, model.patch_bias);
  343. // position embeddings
  344. struct ggml_tensor * embeddings = ggml_add(ctx0, inp, model.position_embeddings);
  345. // loop over layers
  346. for (int il = 0; il < n_layer; il++) {
  347. struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
  348. // layernorm1
  349. {
  350. cur = ggml_norm(ctx0, cur, eps);
  351. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), model.layers[il].ln_1_b);
  352. }
  353. // self-attention
  354. {
  355. struct ggml_tensor * Q =
  356. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
  357. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_patches);
  358. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  359. struct ggml_tensor * K =
  360. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
  361. K = ggml_reshape_3d(ctx0, K, d_head, n_head, num_patches);
  362. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  363. struct ggml_tensor * V =
  364. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
  365. V = ggml_reshape_3d(ctx0, V, d_head, n_head, num_patches);
  366. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  367. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  368. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  369. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  370. KQV = ggml_reshape_3d(ctx0, KQV, d_head, num_patches, n_head);
  371. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  372. cur = ggml_cont_2d(ctx0, KQV, hidden_size, num_patches);
  373. }
  374. // attention output
  375. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
  376. // re-add the layer input, e.g., residual
  377. cur = ggml_add(ctx0, cur, embeddings);
  378. embeddings = cur; // embeddings = residual, cur = hidden_states
  379. // layernorm2
  380. {
  381. cur = ggml_norm(ctx0, cur, eps);
  382. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
  383. }
  384. cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
  385. cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
  386. // siglip uses gelu
  387. cur = ggml_gelu(ctx0, cur);
  388. cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
  389. cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
  390. // residual 2
  391. cur = ggml_add(ctx0, embeddings, cur);
  392. embeddings = cur;
  393. }
  394. // post-layernorm
  395. if (model.post_ln_w) {
  396. embeddings = ggml_norm(ctx0, embeddings, eps);
  397. ggml_set_name(embeddings, "post_ln");
  398. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
  399. }
  400. if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  401. const int batch_size = 1;
  402. const int mm_tokens_per_image = 256; // default value for gemma3
  403. const int tokens_per_side = sqrt(mm_tokens_per_image);
  404. const int patches_per_image = sqrt(num_patches);
  405. const int kernel_size = patches_per_image / tokens_per_side;
  406. embeddings = ggml_cont(ctx0, ggml_transpose(ctx0, embeddings));
  407. embeddings = ggml_reshape_4d(ctx0, embeddings, patches_per_image, patches_per_image, hidden_size, batch_size);
  408. // doing a pool2d to reduce the number of output tokens to 256
  409. embeddings = ggml_pool_2d(ctx0, embeddings, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  410. embeddings = ggml_reshape_3d(ctx0, embeddings, embeddings->ne[0] * embeddings->ne[0], hidden_size, batch_size);
  411. embeddings = ggml_cont(ctx0, ggml_transpose(ctx0, embeddings));
  412. // apply norm before projection
  413. embeddings = ggml_rms_norm(ctx0, embeddings, eps);
  414. embeddings = ggml_mul(ctx0, embeddings, model.mm_soft_emb_norm_w);
  415. // apply projection
  416. embeddings = ggml_mul_mat(ctx0,
  417. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  418. embeddings);
  419. } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) {
  420. // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
  421. ggml_tensor * cur = embeddings;
  422. const int scale_factor = model.hparams.proj_scale_factor;
  423. const int n_embd = cur->ne[0];
  424. const int seq = cur->ne[1];
  425. const int bsz = 1; // batch size, always 1 for now since we don't support batching
  426. const int height = std::sqrt(seq);
  427. const int width = std::sqrt(seq);
  428. GGML_ASSERT(scale_factor != 0);
  429. cur = ggml_reshape_4d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height, bsz);
  430. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  431. cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
  432. n_embd * scale_factor * scale_factor,
  433. height / scale_factor,
  434. width / scale_factor,
  435. bsz);
  436. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  437. cur = ggml_reshape_3d(ctx0, ggml_cont(ctx0, cur),
  438. n_embd * scale_factor * scale_factor,
  439. seq / (scale_factor * scale_factor),
  440. bsz);
  441. cur = ggml_mul_mat(ctx0, model.projection, cur);
  442. embeddings = cur;
  443. } else {
  444. GGML_ABORT("SigLIP: Unsupported projector type");
  445. }
  446. // build the graph
  447. ggml_build_forward_expand(gf, embeddings);
  448. return gf;
  449. }
  450. // implementation of the 2D RoPE without adding a new op in ggml
  451. // this is not efficient (use double the memory), but works on all backends
  452. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  453. static ggml_tensor * build_rope_2d(
  454. ggml_context * ctx0,
  455. ggml_tensor * cur,
  456. ggml_tensor * pos_h,
  457. ggml_tensor * pos_w,
  458. const float freq_base
  459. ) {
  460. const int64_t n_dim = cur->ne[0];
  461. const int64_t n_head = cur->ne[1];
  462. const int64_t n_pos = cur->ne[2];
  463. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  464. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  465. // first half of cur will use 1e-0, 1e-2 (even)
  466. // second half of cur will use 1e-1, 1e-3 (odd)
  467. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  468. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  469. // then for the second half, we use freq_scale to shift the inv_freq
  470. // ^ why? replace (2i) with (2i+1) in the above equation
  471. const float freq_scale_odd = std::pow(freq_base, (float)-2/n_dim);
  472. // first half
  473. ggml_tensor * first;
  474. {
  475. first = ggml_view_3d(ctx0, cur,
  476. n_dim/2, n_head, n_pos,
  477. ggml_row_size(cur->type, n_dim),
  478. ggml_row_size(cur->type, n_dim*n_head),
  479. 0);
  480. first = ggml_rope_ext(
  481. ctx0,
  482. first,
  483. pos_h, // positions
  484. nullptr, // freq factors
  485. n_dim/2, // n_dims
  486. 0, 0, freq_base,
  487. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  488. );
  489. }
  490. // second half
  491. ggml_tensor * second;
  492. {
  493. second = ggml_view_3d(ctx0, cur,
  494. n_dim/2, n_head, n_pos,
  495. ggml_row_size(cur->type, n_dim),
  496. ggml_row_size(cur->type, n_dim*n_head),
  497. n_dim/2 * ggml_element_size(cur));
  498. second = ggml_cont(ctx0, second); // copy, because ggml_rope don't play well with non-contiguous tensors
  499. second = ggml_rope_ext(
  500. ctx0,
  501. second,
  502. pos_w, // positions
  503. nullptr, // freq factors
  504. n_dim/2, // n_dims
  505. 0, 0, freq_base,
  506. freq_scale_odd,
  507. 0.0f, 1.0f, 0.0f, 0.0f
  508. );
  509. }
  510. cur = ggml_concat(ctx0, first, second, 0);
  511. return cur;
  512. }
  513. static ggml_cgraph * clip_image_build_graph_pixtral(clip_ctx * ctx, const clip_image_f32 & img) {
  514. const auto & model = ctx->vision_model;
  515. const auto & hparams = model.hparams;
  516. GGML_ASSERT(ctx->proj_type == PROJECTOR_TYPE_PIXTRAL);
  517. int image_size_width = img.nx;
  518. int image_size_height = img.ny;
  519. const int patch_size = hparams.patch_size;
  520. const int n_patches_x = image_size_width / patch_size;
  521. const int n_patches_y = image_size_height / patch_size;
  522. const int num_patches = n_patches_x * n_patches_y;
  523. const int hidden_size = hparams.hidden_size;
  524. const int n_head = hparams.n_head;
  525. const int d_head = hidden_size / n_head;
  526. const int n_layer = hparams.n_layer;
  527. const float eps = hparams.eps;
  528. struct ggml_init_params params = {
  529. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  530. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  531. /*.no_alloc =*/ true,
  532. };
  533. ggml_context_ptr ctx0_ptr(ggml_init(params));
  534. auto ctx0 = ctx0_ptr.get();
  535. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  536. // input raw
  537. struct ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3);
  538. ggml_set_name(inp_raw, "inp_raw");
  539. ggml_set_input(inp_raw);
  540. // 2D input positions
  541. struct ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
  542. ggml_set_name(pos_h, "pos_h");
  543. ggml_set_input(pos_h);
  544. struct ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
  545. ggml_set_name(pos_w, "pos_w");
  546. ggml_set_input(pos_w);
  547. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  548. inp = ggml_reshape_2d(ctx0, inp, num_patches, hidden_size);
  549. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  550. struct ggml_tensor * embeddings = inp;
  551. // pre-layer norm
  552. embeddings = ggml_mul(ctx0, ggml_rms_norm(ctx0, embeddings, eps), model.pre_ln_w);
  553. // loop over layers
  554. for (int il = 0; il < n_layer; il++) {
  555. struct ggml_tensor * cur = embeddings;
  556. // pre-attention norm
  557. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.layers[il].ln_1_w);
  558. // self-attention
  559. {
  560. struct ggml_tensor * Q = ggml_mul_mat(ctx0, model.layers[il].q_w, cur);
  561. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_patches);
  562. Q = build_rope_2d(ctx0, Q, pos_h, pos_w, hparams.rope_theta);
  563. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  564. struct ggml_tensor * K = ggml_mul_mat(ctx0, model.layers[il].k_w, cur);
  565. K = ggml_reshape_3d(ctx0, K, d_head, n_head, num_patches);
  566. K = build_rope_2d(ctx0, K, pos_h, pos_w, hparams.rope_theta);
  567. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  568. struct ggml_tensor * V = ggml_mul_mat(ctx0, model.layers[il].v_w, cur);
  569. V = ggml_reshape_3d(ctx0, V, d_head, n_head, num_patches);
  570. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  571. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  572. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  573. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  574. KQV = ggml_reshape_3d(ctx0, KQV, d_head, num_patches, n_head);
  575. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  576. cur = ggml_cont_2d(ctx0, KQV, hidden_size, num_patches);
  577. cur = ggml_mul_mat(ctx0, model.layers[il].o_w, cur);
  578. }
  579. // re-add the layer input, e.g., residual
  580. cur = ggml_add(ctx0, cur, embeddings);
  581. embeddings = cur; // embeddings = residual, cur = hidden_states
  582. // pre-ffn norm
  583. cur = ggml_mul(ctx0, ggml_rms_norm(ctx0, cur, eps), model.layers[il].ln_2_w);
  584. // feed-forward
  585. {
  586. ggml_tensor * gate_proj = ggml_mul_mat(ctx0, model.layers[il].ff_gate_w, cur);
  587. ggml_tensor * up_proj = ggml_mul_mat(ctx0, model.layers[il].ff_up_w, cur);
  588. gate_proj = ggml_silu(ctx0, gate_proj); // pixtral uses silu
  589. cur = ggml_mul(ctx0, up_proj, gate_proj);
  590. cur = ggml_mul_mat(ctx0, model.layers[il].ff_down_w, cur);
  591. }
  592. // residual 2
  593. cur = ggml_add(ctx0, embeddings, cur);
  594. embeddings = cur;
  595. }
  596. // LlavaMultiModalProjector (with GELU activation)
  597. {
  598. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  599. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  600. embeddings = ggml_gelu(ctx0, embeddings);
  601. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  602. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  603. }
  604. // arrangement of the [IMG_BREAK] token
  605. {
  606. // not efficient, but works
  607. // the trick is to view the embeddings as a 3D tensor with shape [hidden_size, n_patches_per_row, n_rows]
  608. // and then concatenate the [IMG_BREAK] token to the end of each row, aka n_patches_per_row dimension
  609. // after the concatenation, we have a tensor with shape [hidden_size, n_patches_per_row + 1, n_rows]
  610. const int n_embd_text = embeddings->ne[0];
  611. const int n_tokens_output = num_patches + n_patches_y - 1; // one [IMG_BREAK] per row, except the last row
  612. ggml_tensor * cur = ggml_reshape_3d(ctx0, embeddings, n_embd_text, n_patches_x, n_patches_y);
  613. ggml_tensor * tok = ggml_new_tensor_3d(ctx0, embeddings->type, n_embd_text, 1, n_patches_y);
  614. tok = ggml_scale(ctx0, tok, 0.0); // clear the tensor
  615. tok = ggml_add(ctx0, tok, model.token_embd_img_break);
  616. cur = ggml_concat(ctx0, cur, tok, 1);
  617. embeddings = ggml_view_2d(ctx0, cur,
  618. n_embd_text, n_tokens_output,
  619. ggml_row_size(cur->type, n_embd_text), 0);
  620. }
  621. // build the graph
  622. ggml_build_forward_expand(gf, embeddings);
  623. return gf;
  624. }
  625. static ggml_cgraph * clip_image_build_graph_qwen25vl(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  626. const auto & model = ctx->vision_model;
  627. const auto & hparams = model.hparams;
  628. const int image_size_width = imgs.entries[0]->nx;
  629. const int image_size_height = imgs.entries[0]->ny;
  630. const bool use_window_attn = hparams.n_wa_pattern > 0;
  631. const int n_wa_pattern = hparams.n_wa_pattern;
  632. const int patch_size = hparams.patch_size;
  633. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  634. const int patches_w = image_size_width / patch_size;
  635. const int patches_h = image_size_height / patch_size;
  636. const int num_positions = num_patches + (model.class_embedding ? 1 : 0);
  637. const int num_position_ids = num_positions * 4; // m-rope requires 4 dim per position
  638. const int hidden_size = hparams.hidden_size;
  639. const int n_head = hparams.n_head;
  640. const int d_head = hidden_size / n_head;
  641. const int n_layer = hparams.n_layer;
  642. const float eps = hparams.eps;
  643. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  644. const int batch_size = imgs.entries.size();
  645. GGML_ASSERT(batch_size == 1);
  646. struct ggml_init_params params = {
  647. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  648. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  649. /*.no_alloc =*/ true,
  650. };
  651. ggml_context_ptr ctx0_ptr(ggml_init(params));
  652. auto ctx0 = ctx0_ptr.get();
  653. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  654. struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size);
  655. ggml_set_name(inp_raw, "inp_raw");
  656. ggml_set_input(inp_raw);
  657. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  658. GGML_ASSERT(image_size_width % (patch_size * 2) == 0);
  659. GGML_ASSERT(image_size_height % (patch_size * 2) == 0);
  660. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  661. inp = ggml_add(ctx0, inp, inp_1);
  662. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
  663. inp = ggml_reshape_4d(
  664. ctx0, inp,
  665. hidden_size * 2, patches_w / 2, patches_h, batch_size);
  666. inp = ggml_reshape_4d(
  667. ctx0, inp,
  668. hidden_size * 2, patches_w / 2, 2, batch_size * (patches_h / 2));
  669. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3));
  670. inp = ggml_reshape_3d(
  671. ctx0, inp,
  672. hidden_size, patches_w * patches_h, batch_size);
  673. if (model.patch_bias) {
  674. // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
  675. inp = ggml_add(ctx0, inp, model.patch_bias);
  676. }
  677. struct ggml_tensor * embeddings = inp;
  678. struct ggml_tensor * window_mask = nullptr;
  679. struct ggml_tensor * window_idx = nullptr;
  680. struct ggml_tensor * inv_window_idx = nullptr;
  681. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  682. ggml_set_name(positions, "positions");
  683. ggml_set_input(positions);
  684. // pre-layernorm
  685. if (model.pre_ln_w) {
  686. embeddings = ggml_rms_norm(ctx0, embeddings, eps);
  687. ggml_set_name(embeddings, "pre_ln");
  688. embeddings = ggml_mul(ctx0, embeddings, model.pre_ln_w);
  689. }
  690. if (use_window_attn) {
  691. // handle window attention inputs
  692. inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions / 4);
  693. ggml_set_name(inv_window_idx, "inv_window_idx");
  694. ggml_set_input(inv_window_idx);
  695. // mask for window attention
  696. window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, num_positions, num_positions);
  697. ggml_set_name(window_mask, "window_mask");
  698. ggml_set_input(window_mask);
  699. // embeddings shape: [hidden_size, patches_w * patches_h, batch_size]
  700. GGML_ASSERT(batch_size == 1);
  701. embeddings = ggml_reshape_2d(ctx0, embeddings, hidden_size * 4, patches_w * patches_h * batch_size / 4);
  702. embeddings = ggml_get_rows(ctx0, embeddings, inv_window_idx);
  703. embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size, patches_w * patches_h, batch_size);
  704. }
  705. // loop over layers
  706. for (int il = 0; il < n_layer; il++) {
  707. struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
  708. // rmsnorm1
  709. cur = ggml_rms_norm(ctx0, cur, eps);
  710. cur = ggml_mul(ctx0, cur, model.layers[il].ln_1_w);
  711. // self-attention
  712. {
  713. struct ggml_tensor * Q =
  714. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
  715. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
  716. Q = ggml_rope_multi(
  717. ctx0, Q, positions, nullptr,
  718. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  719. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  720. Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
  721. struct ggml_tensor * K =
  722. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
  723. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  724. K = ggml_rope_multi(
  725. ctx0, K, positions, nullptr,
  726. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  727. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  728. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  729. struct ggml_tensor * V =
  730. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
  731. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  732. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  733. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  734. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  735. const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
  736. if (full_attn) {
  737. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  738. } else {
  739. KQ = ggml_soft_max_ext(ctx0, KQ, window_mask, 1.0f / sqrtf((float)d_head), 0.0f);
  740. }
  741. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  742. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
  743. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  744. cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size);
  745. }
  746. // attention output
  747. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
  748. // re-add the layer input, e.g., residual
  749. cur = ggml_add(ctx0, cur, embeddings);
  750. embeddings = cur; // embeddings = residual, cur = hidden_states
  751. // rms norm2
  752. cur = ggml_rms_norm(ctx0, cur, eps);
  753. cur = ggml_mul(ctx0, cur, model.layers[il].ln_2_w);
  754. // mlp
  755. // ffn_up
  756. auto cur_up = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
  757. cur_up = ggml_add(ctx0, cur_up, model.layers[il].ff_o_b);
  758. auto cur_gate = ggml_mul_mat(ctx0, model.layers[il].ff_g_w, cur);
  759. cur_gate = ggml_add(ctx0, cur_gate, model.layers[il].ff_g_b);
  760. // TODO : only 2 of these 3 are actually used, should we remove one of them?
  761. if (ctx->use_gelu) {
  762. cur_gate = ggml_gelu_inplace(ctx0, cur_gate);
  763. } else if (ctx->use_silu) {
  764. cur_gate = ggml_silu_inplace(ctx0, cur_gate);
  765. } else {
  766. cur_gate = ggml_gelu_quick_inplace(ctx0, cur_gate);
  767. }
  768. cur = ggml_mul(ctx0, cur_gate, cur_up);
  769. // ffn_down
  770. cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
  771. cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
  772. // residual 2
  773. cur = ggml_add(ctx0, embeddings, cur);
  774. embeddings = cur;
  775. }
  776. // post-layernorm
  777. if (model.post_ln_w) {
  778. embeddings = ggml_rms_norm(ctx0, embeddings, eps);
  779. ggml_set_name(embeddings, "post_ln");
  780. embeddings = ggml_mul(ctx0, embeddings, model.post_ln_w);
  781. }
  782. embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size);
  783. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  784. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  785. // GELU activation
  786. embeddings = ggml_gelu(ctx0, embeddings);
  787. // Second linear layer
  788. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  789. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  790. if (use_window_attn) {
  791. window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions / 4);
  792. ggml_set_name(window_idx, "window_idx");
  793. ggml_set_input(window_idx);
  794. // embeddings shape: [hidden_size, patches_w * patches_h, batch_size]
  795. GGML_ASSERT(batch_size == 1);
  796. embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, patches_w * patches_h / 4);
  797. embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
  798. embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, patches_w * patches_h / 4, batch_size);
  799. }
  800. // build the graph
  801. ggml_build_forward_expand(gf, embeddings);
  802. return gf;
  803. }
  804. static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_image_f32_batch & imgs, struct clip_image_size load_image_size, bool is_inf = false) {
  805. const auto & model = ctx->vision_model;
  806. const auto & hparams = model.hparams;
  807. const int image_size = hparams.image_size;
  808. int image_size_width = image_size;
  809. int image_size_height = image_size;
  810. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  811. LOG_DBG("%s: %d %d\n", __func__, load_image_size.width, load_image_size.height);
  812. image_size_width = load_image_size.width;
  813. image_size_height = load_image_size.height;
  814. if (is_inf) {
  815. image_size_width = imgs.entries[0]->nx;
  816. image_size_height = imgs.entries[0]->ny;
  817. }
  818. }
  819. else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
  820. // use the image's native resolution when image is avaible
  821. if (is_inf) {
  822. // if (imgs->data->nx && imgs->data->ny) {
  823. image_size_width = imgs.entries[0]->nx;
  824. image_size_height = imgs.entries[0]->ny;
  825. }
  826. }
  827. const int patch_size = hparams.patch_size;
  828. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  829. const int patches_w = image_size_width / patch_size;
  830. const int patches_h = image_size_height / patch_size;
  831. const int num_positions = num_patches + (model.class_embedding ? 1 : 0);
  832. const int num_position_ids = ctx->proj_type == PROJECTOR_TYPE_QWEN2VL ? num_positions * 4 : num_positions;
  833. const int hidden_size = hparams.hidden_size;
  834. const int n_head = hparams.n_head;
  835. const int d_head = hidden_size / n_head;
  836. const float eps = hparams.eps;
  837. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  838. const int batch_size = imgs.entries.size();
  839. if (ctx->has_llava_projector
  840. || ctx->proj_type == PROJECTOR_TYPE_MINICPMV
  841. || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  842. GGML_ASSERT(batch_size == 1);
  843. }
  844. struct ggml_init_params params = {
  845. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  846. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  847. /*.no_alloc =*/ true,
  848. };
  849. ggml_context_ptr ctx0_ptr(ggml_init(params));
  850. auto ctx0 = ctx0_ptr.get();
  851. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  852. struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size);
  853. ggml_set_name(inp_raw, "inp_raw");
  854. ggml_set_input(inp_raw);
  855. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  856. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
  857. GGML_ASSERT(image_size_width % (patch_size * 2) == 0);
  858. GGML_ASSERT(image_size_height % (patch_size * 2) == 0);
  859. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  860. inp = ggml_add(ctx0, inp, inp_1);
  861. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
  862. inp = ggml_reshape_4d(
  863. ctx0, inp,
  864. hidden_size * 2, patches_w / 2, patches_h, batch_size);
  865. inp = ggml_reshape_4d(
  866. ctx0, inp,
  867. hidden_size * 2, patches_w / 2, 2, batch_size * (patches_h / 2));
  868. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3));
  869. inp = ggml_reshape_3d(
  870. ctx0, inp,
  871. hidden_size, patches_w * patches_h, batch_size);
  872. }
  873. else {
  874. inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
  875. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
  876. }
  877. if (model.patch_bias) {
  878. // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
  879. inp = ggml_add(ctx0, inp, model.patch_bias);
  880. }
  881. struct ggml_tensor * embeddings = inp;
  882. struct ggml_tensor * pos_embed = nullptr;
  883. // concat class_embeddings and patch_embeddings
  884. if (model.class_embedding) {
  885. embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
  886. embeddings = ggml_scale(ctx0, embeddings, 0.0f); // set to all zeros
  887. embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
  888. embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
  889. embeddings = ggml_acc(ctx0, embeddings, inp,
  890. embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
  891. }
  892. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  893. ggml_set_name(positions, "positions");
  894. ggml_set_input(positions);
  895. if (ctx->proj_type != PROJECTOR_TYPE_QWEN2VL) { // qwen2vl does NOT use learned position embeddings
  896. embeddings =
  897. ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
  898. }
  899. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  900. int pos_w = image_size_width/patch_size;
  901. int pos_h = image_size_height/patch_size;
  902. int n_output_dim = clip_n_mmproj_embd(ctx);
  903. pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_output_dim, pos_w * pos_h, 1);
  904. ggml_set_name(pos_embed, "pos_embed");
  905. ggml_set_input(pos_embed);
  906. }
  907. // pre-layernorm
  908. if (model.pre_ln_w) {
  909. embeddings = ggml_norm(ctx0, embeddings, eps);
  910. ggml_set_name(embeddings, "pre_ln");
  911. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
  912. }
  913. std::vector<struct ggml_tensor *> embedding_stack;
  914. const auto & vision_feature_layer = hparams.vision_feature_layer;
  915. // loop over layers
  916. for (int il = 0; il < ctx->max_feature_layer; il++) {
  917. struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
  918. // If this is an embedding feature layer, save the output.
  919. // NOTE: 0 index here refers to the input to the encoder.
  920. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  921. embedding_stack.push_back(embeddings);
  922. }
  923. //const size_t nb_q_w = model.layers[il].q_w->nb[0];
  924. // layernorm1
  925. {
  926. cur = ggml_norm(ctx0, cur, eps);
  927. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
  928. model.layers[il].ln_1_b);
  929. }
  930. // self-attention
  931. {
  932. struct ggml_tensor * Q =
  933. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
  934. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
  935. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
  936. Q = ggml_rope_multi(
  937. ctx0, Q, positions, nullptr,
  938. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  939. }
  940. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  941. Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
  942. struct ggml_tensor * K =
  943. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
  944. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  945. if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
  946. K = ggml_rope_multi(
  947. ctx0, K, positions, nullptr,
  948. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  949. }
  950. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  951. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  952. struct ggml_tensor * V =
  953. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
  954. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  955. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  956. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  957. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  958. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  959. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  960. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
  961. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  962. cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size);
  963. }
  964. // attention output
  965. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
  966. // re-add the layer input, e.g., residual
  967. cur = ggml_add(ctx0, cur, embeddings);
  968. embeddings = cur; // embeddings = residual, cur = hidden_states
  969. // layernorm2
  970. {
  971. cur = ggml_norm(ctx0, cur, eps);
  972. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
  973. }
  974. cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
  975. cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
  976. if (ctx->use_gelu) {
  977. cur = ggml_gelu_inplace(ctx0, cur);
  978. } else if (ctx->use_silu) {
  979. cur = ggml_silu_inplace(ctx0, cur);
  980. } else {
  981. cur = ggml_gelu_quick_inplace(ctx0, cur);
  982. }
  983. cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
  984. cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
  985. // residual 2
  986. cur = ggml_add(ctx0, embeddings, cur);
  987. embeddings = cur;
  988. }
  989. // post-layernorm
  990. if (model.post_ln_w) {
  991. embeddings = ggml_norm(ctx0, embeddings, eps);
  992. ggml_set_name(embeddings, "post_ln");
  993. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
  994. }
  995. // final layer is a vision feature layer
  996. if (vision_feature_layer.find(ctx->max_feature_layer) != vision_feature_layer.end()) {
  997. embedding_stack.push_back(embeddings);
  998. }
  999. // If feature layers are explicitly set, stack them (if we have multiple)
  1000. if (!embedding_stack.empty()) {
  1001. embeddings = embedding_stack[0];
  1002. for (size_t i = 1; i < embedding_stack.size(); i++) {
  1003. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  1004. }
  1005. }
  1006. // llava projector
  1007. if (ctx->has_llava_projector) {
  1008. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  1009. struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
  1010. ggml_set_name(patches, "patches");
  1011. ggml_set_input(patches);
  1012. // shape [1, 576, 1024]
  1013. // ne is whcn, ne = [1024, 576, 1, 1]
  1014. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  1015. // print_tensor_info(embeddings, "embeddings");
  1016. // llava projector
  1017. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  1018. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1019. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1020. embeddings = ggml_gelu(ctx0, embeddings);
  1021. if (model.mm_2_w) {
  1022. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  1023. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  1024. }
  1025. }
  1026. else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  1027. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1028. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1029. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  1030. // First LayerNorm
  1031. embeddings = ggml_norm(ctx0, embeddings, eps);
  1032. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  1033. model.mm_1_b);
  1034. // GELU activation
  1035. embeddings = ggml_gelu(ctx0, embeddings);
  1036. // Second linear layer
  1037. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  1038. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  1039. // Second LayerNorm
  1040. embeddings = ggml_norm(ctx0, embeddings, eps);
  1041. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  1042. model.mm_4_b);
  1043. }
  1044. else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  1045. // MobileVLM projector
  1046. int n_patch = 24;
  1047. struct ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  1048. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  1049. mlp_1 = ggml_gelu(ctx0, mlp_1);
  1050. struct ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  1051. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  1052. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  1053. // block 1
  1054. struct ggml_tensor * block_1 = nullptr;
  1055. {
  1056. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  1057. mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
  1058. mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  1059. // stride = 1, padding = 1, bias is nullptr
  1060. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  1061. // layer norm
  1062. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1063. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1064. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1065. block_1 = ggml_norm(ctx0, block_1, eps);
  1066. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  1067. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1068. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1069. // hardswish
  1070. struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1071. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1072. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1073. // pointwise conv
  1074. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1075. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  1076. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  1077. block_1 = ggml_relu(ctx0, block_1);
  1078. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  1079. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  1080. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1081. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  1082. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1083. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1084. int w = block_1->ne[0], h = block_1->ne[1];
  1085. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1086. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1087. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1088. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  1089. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1090. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  1091. block_1 = ggml_norm(ctx0, block_1, eps);
  1092. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  1093. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1094. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  1095. // residual
  1096. block_1 = ggml_add(ctx0, mlp_3, block_1);
  1097. }
  1098. // block_2
  1099. {
  1100. // stride = 2
  1101. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  1102. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1103. // layer norm
  1104. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  1105. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1106. block_1 = ggml_norm(ctx0, block_1, eps);
  1107. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  1108. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  1109. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  1110. // hardswish
  1111. struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  1112. // not sure the parameters is right for globalAvgPooling
  1113. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  1114. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1115. // pointwise conv
  1116. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  1117. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  1118. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  1119. block_1 = ggml_relu(ctx0, block_1);
  1120. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  1121. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  1122. block_1 = ggml_hardsigmoid(ctx0, block_1);
  1123. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  1124. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  1125. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  1126. int w = block_1->ne[0], h = block_1->ne[1];
  1127. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  1128. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  1129. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  1130. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  1131. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  1132. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  1133. block_1 = ggml_norm(ctx0, block_1, eps);
  1134. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  1135. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  1136. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  1137. }
  1138. embeddings = block_1;
  1139. }
  1140. else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
  1141. {
  1142. int n_patch = 24;
  1143. struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1144. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  1145. mlp_0 = ggml_gelu(ctx0, mlp_0);
  1146. struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  1147. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  1148. // mlp_2 ne = [2048, 576, 1, 1]
  1149. // // AVG Pool Layer 2*2, strides = 2
  1150. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
  1151. // mlp_2 ne = [576, 2048, 1, 1]
  1152. mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  1153. // mlp_2 ne [24, 24, 2048, 1]
  1154. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  1155. // weight ne = [3, 3, 2048, 1]
  1156. struct ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  1157. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  1158. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  1159. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  1160. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  1161. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  1162. embeddings = peg_0;
  1163. }
  1164. else {
  1165. GGML_ABORT("fatal error");
  1166. }
  1167. }
  1168. // minicpmv projector
  1169. else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  1170. struct ggml_tensor * q = model.mm_model_query;
  1171. { // layernorm
  1172. q = ggml_norm(ctx0, q, eps);
  1173. q = ggml_add(ctx0, ggml_mul(ctx0, q, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1174. }
  1175. struct ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  1176. { // layernorm
  1177. v = ggml_norm(ctx0, v, eps);
  1178. v = ggml_add(ctx0, ggml_mul(ctx0, v, model.mm_model_ln_kv_w), model.mm_model_ln_kv_b);
  1179. }
  1180. struct ggml_tensor * k;
  1181. { // position
  1182. // q = ggml_add(ctx0, q, model.mm_model_pos_embed);
  1183. k = ggml_add(ctx0, v, pos_embed);
  1184. }
  1185. { // attention
  1186. int hidden_size = clip_n_mmproj_embd(ctx);
  1187. const int d_head = 128;
  1188. int n_head = hidden_size/d_head;
  1189. int num_query = 96;
  1190. if (ctx->minicpmv_version == 2) {
  1191. num_query = 96;
  1192. }
  1193. else if (ctx->minicpmv_version == 3) {
  1194. num_query = 64;
  1195. }
  1196. else if (ctx->minicpmv_version == 4) {
  1197. num_query = 64;
  1198. }
  1199. struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q), model.mm_model_attn_q_b);
  1200. struct ggml_tensor * K = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k), model.mm_model_attn_k_b);
  1201. struct ggml_tensor * V = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v), model.mm_model_attn_v_b);
  1202. // permute
  1203. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_query, batch_size);
  1204. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  1205. Q = ggml_reshape_3d(ctx0, Q, d_head, num_query, n_head * batch_size);
  1206. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  1207. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  1208. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  1209. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  1210. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  1211. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  1212. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  1213. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  1214. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  1215. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_query, n_head, batch_size);
  1216. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  1217. KQV = ggml_cont_3d(ctx0, KQV, hidden_size, num_query, batch_size);
  1218. embeddings = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_o_w, KQV), model.mm_model_attn_o_b);
  1219. }
  1220. { // layernorm
  1221. embeddings = ggml_norm(ctx0, embeddings, eps);
  1222. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_post_w), model.mm_model_ln_post_b);
  1223. }
  1224. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  1225. }
  1226. // glm projector
  1227. else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  1228. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  1229. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
  1230. embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  1231. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  1232. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  1233. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  1234. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  1235. // GLU
  1236. {
  1237. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  1238. embeddings = ggml_norm(ctx0, embeddings, eps);
  1239. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  1240. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  1241. struct ggml_tensor * x = embeddings;
  1242. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  1243. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  1244. embeddings = ggml_silu_inplace(ctx0, embeddings);
  1245. embeddings = ggml_mul(ctx0, embeddings,x);
  1246. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  1247. }
  1248. }
  1249. else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
  1250. embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size);
  1251. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  1252. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  1253. // GELU activation
  1254. embeddings = ggml_gelu(ctx0, embeddings);
  1255. // Second linear layer
  1256. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  1257. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  1258. }
  1259. // build the graph
  1260. ggml_build_forward_expand(gf, embeddings);
  1261. return gf;
  1262. }
  1263. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs, struct clip_image_size load_image_size, bool is_inf = false) {
  1264. ggml_cgraph * res;
  1265. switch (ctx->proj_type) {
  1266. case PROJECTOR_TYPE_GEMMA3:
  1267. case PROJECTOR_TYPE_IDEFICS3:
  1268. {
  1269. GGML_ASSERT(imgs.entries.size() == 1);
  1270. res = clip_image_build_graph_siglip(ctx, *imgs.entries[0]);
  1271. } break;
  1272. case PROJECTOR_TYPE_PIXTRAL:
  1273. {
  1274. GGML_ASSERT(imgs.entries.size() == 1);
  1275. res = clip_image_build_graph_pixtral(ctx, *imgs.entries[0]);
  1276. } break;
  1277. case PROJECTOR_TYPE_QWEN25VL:
  1278. {
  1279. res = clip_image_build_graph_qwen25vl(ctx, imgs);
  1280. } break;
  1281. default:
  1282. {
  1283. // TODO: we should have one build_* function per model
  1284. res = clip_image_build_graph_legacy(ctx, imgs, load_image_size, is_inf);
  1285. } break;
  1286. }
  1287. return res;
  1288. }
  1289. struct clip_model_loader {
  1290. ggml_context_ptr ctx_meta;
  1291. gguf_context_ptr ctx_gguf;
  1292. clip_ctx & ctx_clip;
  1293. std::string fname;
  1294. size_t model_size; // in bytes
  1295. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model
  1296. clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) {
  1297. struct ggml_context * meta = nullptr;
  1298. struct gguf_init_params params = {
  1299. /*.no_alloc = */ true,
  1300. /*.ctx = */ &meta,
  1301. };
  1302. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  1303. if (!ctx_gguf.get()) {
  1304. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  1305. }
  1306. ctx_meta.reset(meta);
  1307. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  1308. // print gguf info
  1309. {
  1310. std::string name;
  1311. get_string(KEY_NAME, name, false);
  1312. std::string description;
  1313. get_string(KEY_DESCRIPTION, description, false);
  1314. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  1315. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  1316. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  1317. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  1318. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  1319. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  1320. LOG_INF("\n");
  1321. }
  1322. // tensors
  1323. {
  1324. for (int i = 0; i < n_tensors; ++i) {
  1325. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1326. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  1327. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  1328. struct ggml_tensor * cur = ggml_get_tensor(meta, name);
  1329. size_t tensor_size = ggml_nbytes(cur);
  1330. model_size += tensor_size;
  1331. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  1332. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  1333. }
  1334. }
  1335. }
  1336. void load_hparams() {
  1337. auto & hparams = ctx_clip.vision_model.hparams;
  1338. // projector type
  1339. std::string proj_type;
  1340. {
  1341. get_string(KEY_PROJ_TYPE, proj_type, false);
  1342. if (!proj_type.empty()) {
  1343. ctx_clip.proj_type = clip_projector_type_from_string(proj_type);
  1344. }
  1345. if (ctx_clip.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  1346. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  1347. }
  1348. }
  1349. // other hparams
  1350. {
  1351. get_i32(KEY_MINICPMV_VERSION, ctx_clip.minicpmv_version, false);
  1352. get_bool(KEY_USE_GELU, ctx_clip.use_gelu, false);
  1353. get_bool(KEY_USE_SILU, ctx_clip.use_silu, false);
  1354. get_u32(KEY_N_EMBD, hparams.hidden_size);
  1355. get_u32(KEY_N_HEAD, hparams.n_head);
  1356. get_u32(KEY_N_FF, hparams.n_intermediate);
  1357. get_u32(KEY_N_BLOCK, hparams.n_layer);
  1358. get_u32(KEY_PROJ_DIM, hparams.projection_dim);
  1359. get_f32(KEY_LAYER_NORM_EPS, hparams.eps);
  1360. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  1361. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  1362. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  1363. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false);
  1364. ctx_clip.has_llava_projector = ctx_clip.proj_type == PROJECTOR_TYPE_MLP
  1365. || ctx_clip.proj_type == PROJECTOR_TYPE_MLP_NORM
  1366. || ctx_clip.proj_type == PROJECTOR_TYPE_LDP
  1367. || ctx_clip.proj_type == PROJECTOR_TYPE_LDPV2;
  1368. {
  1369. std::string mm_patch_merge_type;
  1370. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  1371. if (mm_patch_merge_type == "spatial_unpad") {
  1372. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  1373. }
  1374. }
  1375. {
  1376. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  1377. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  1378. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  1379. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  1380. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  1381. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  1382. for (int i = 0; i < 3; ++i) {
  1383. ctx_clip.image_mean[i] = mean_data[i];
  1384. ctx_clip.image_std[i] = std_data[i];
  1385. }
  1386. }
  1387. // Load the vision feature layer indices if they are explicitly provided;
  1388. // if multiple vision feature layers are present, the values will be concatenated
  1389. // to form the final visual features.
  1390. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  1391. // be non-negative, since we use -1 to mark values as unset here.
  1392. std::vector<int> vision_feature_layer;
  1393. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  1394. // convert std::vector to std::unordered_set
  1395. for (auto & layer : vision_feature_layer) {
  1396. hparams.vision_feature_layer.insert(layer);
  1397. }
  1398. // Calculate the deepest feature layer based on hparams and projector type
  1399. // NOTE: This is only used by build_graph_legacy()
  1400. {
  1401. // Get the index of the second to last layer; this is the default for models that have a llava projector
  1402. int n_layer = hparams.n_layer - 1;
  1403. int deepest_feature_layer = -1;
  1404. if (ctx_clip.proj_type == PROJECTOR_TYPE_MINICPMV
  1405. || ctx_clip.proj_type == PROJECTOR_TYPE_GLM_EDGE
  1406. || ctx_clip.proj_type == PROJECTOR_TYPE_QWEN2VL
  1407. || ctx_clip.proj_type == PROJECTOR_TYPE_QWEN25VL) {
  1408. n_layer += 1;
  1409. }
  1410. // If we set explicit vision feature layers, only go up to the deepest one
  1411. // NOTE: only used by granite-vision models for now
  1412. for (const auto & feature_layer : hparams.vision_feature_layer) {
  1413. if (feature_layer > deepest_feature_layer) {
  1414. deepest_feature_layer = feature_layer;
  1415. }
  1416. }
  1417. ctx_clip.max_feature_layer = deepest_feature_layer < 0 ? n_layer : deepest_feature_layer;
  1418. }
  1419. // model-specific params
  1420. switch (ctx_clip.proj_type) {
  1421. case PROJECTOR_TYPE_MINICPMV:
  1422. {
  1423. if (ctx_clip.minicpmv_version == 0) {
  1424. ctx_clip.minicpmv_version = 2; // default to 2 if not set
  1425. }
  1426. } break;
  1427. case PROJECTOR_TYPE_IDEFICS3:
  1428. {
  1429. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor, false);
  1430. } break;
  1431. case PROJECTOR_TYPE_PIXTRAL:
  1432. {
  1433. hparams.rope_theta = 10000.0f;
  1434. } break;
  1435. case PROJECTOR_TYPE_QWEN25VL:
  1436. {
  1437. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
  1438. } break;
  1439. default:
  1440. break;
  1441. }
  1442. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  1443. LOG_INF("%s: has_llava_proj: %d\n", __func__, ctx_clip.has_llava_projector);
  1444. LOG_INF("%s: minicpmv_version: %d\n", __func__, ctx_clip.minicpmv_version);
  1445. LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor);
  1446. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  1447. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1448. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1449. }
  1450. }
  1451. void load_tensors() {
  1452. std::map<std::string, size_t> tensor_offset;
  1453. std::vector<ggml_tensor *> tensors_to_load;
  1454. // get offsets
  1455. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1456. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1457. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1458. }
  1459. // create data context
  1460. struct ggml_init_params params = {
  1461. /*.mem_size =*/ (gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1462. /*.mem_buffer =*/ NULL,
  1463. /*.no_alloc =*/ true,
  1464. };
  1465. ctx_clip.ctx_data.reset(ggml_init(params));
  1466. if (!ctx_clip.ctx_data) {
  1467. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1468. }
  1469. // helper function
  1470. auto get_tensor = [&](const std::string & name, bool required = true) {
  1471. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1472. if (!cur && required) {
  1473. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1474. }
  1475. if (cur) {
  1476. tensors_to_load.push_back(cur);
  1477. // add tensors to context
  1478. struct ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1479. ggml_set_name(data_tensor, cur->name);
  1480. cur = data_tensor;
  1481. }
  1482. return cur;
  1483. };
  1484. auto & vision_model = ctx_clip.vision_model;
  1485. vision_model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1486. vision_model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, "v", "weight"), false);
  1487. vision_model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, "v", "bias"), false);
  1488. vision_model.post_ln_w = get_tensor(string_format(TN_LN_POST, "v", "weight"), false);
  1489. vision_model.post_ln_b = get_tensor(string_format(TN_LN_POST, "v", "bias"), false);
  1490. vision_model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1491. vision_model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1492. vision_model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1493. vision_model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, "v"), false);
  1494. // layers
  1495. vision_model.layers.resize(vision_model.hparams.n_layer);
  1496. for (int il = 0; il < vision_model.hparams.n_layer; ++il) {
  1497. auto & layer = vision_model.layers[il];
  1498. layer.k_w = get_tensor(string_format(TN_ATTN_K, "v", il, "weight"));
  1499. layer.q_w = get_tensor(string_format(TN_ATTN_Q, "v", il, "weight"));
  1500. layer.v_w = get_tensor(string_format(TN_ATTN_V, "v", il, "weight"));
  1501. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "weight"));
  1502. layer.ln_1_w = get_tensor(string_format(TN_LN_1, "v", il, "weight"), false);
  1503. layer.ln_2_w = get_tensor(string_format(TN_LN_2, "v", il, "weight"), false);
  1504. layer.k_b = get_tensor(string_format(TN_ATTN_K, "v", il, "bias"), false);
  1505. layer.q_b = get_tensor(string_format(TN_ATTN_Q, "v", il, "bias"), false);
  1506. layer.v_b = get_tensor(string_format(TN_ATTN_V, "v", il, "bias"), false);
  1507. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "bias"), false);
  1508. layer.ln_1_b = get_tensor(string_format(TN_LN_1, "v", il, "bias"), false);
  1509. layer.ln_2_b = get_tensor(string_format(TN_LN_2, "v", il, "bias"), false);
  1510. // new naming
  1511. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, "v", il, "weight"));
  1512. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, "v", il, "bias"), false);
  1513. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, "v", il, "weight"), false);
  1514. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, "v", il, "bias"), false);
  1515. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, "v", il, "weight"));
  1516. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, "v", il, "bias"), false);
  1517. // legacy naming (the in and out is reversed! don't ask me why)
  1518. layer.ff_i_w = layer.ff_down_w;
  1519. layer.ff_o_w = layer.ff_up_w;
  1520. layer.ff_g_w = layer.ff_gate_w;
  1521. layer.ff_i_b = layer.ff_down_b;
  1522. layer.ff_o_b = layer.ff_up_b;
  1523. layer.ff_g_b = layer.ff_gate_b;
  1524. }
  1525. switch (ctx_clip.proj_type) {
  1526. case PROJECTOR_TYPE_MLP:
  1527. case PROJECTOR_TYPE_MLP_NORM:
  1528. {
  1529. // LLaVA projection
  1530. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  1531. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  1532. // Yi-type llava
  1533. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  1534. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1535. // missing in Yi-type llava
  1536. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  1537. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1538. // Yi-type llava
  1539. vision_model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  1540. vision_model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  1541. vision_model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  1542. vision_model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  1543. if (vision_model.mm_3_w) {
  1544. // TODO: this is a hack to support Yi-type llava
  1545. ctx_clip.proj_type = PROJECTOR_TYPE_MLP_NORM;
  1546. }
  1547. vision_model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  1548. } break;
  1549. case PROJECTOR_TYPE_LDP:
  1550. {
  1551. // MobileVLM projection
  1552. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1553. vision_model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1554. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1555. vision_model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1556. vision_model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1557. vision_model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1558. vision_model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1559. vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1560. vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1561. vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1562. vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1563. vision_model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1564. vision_model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1565. vision_model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1566. vision_model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1567. vision_model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1568. vision_model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  1569. vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  1570. vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  1571. vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  1572. vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  1573. vision_model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  1574. vision_model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  1575. vision_model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  1576. } break;
  1577. case PROJECTOR_TYPE_LDPV2:
  1578. {
  1579. // MobilVLM_V2 projection
  1580. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1581. vision_model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1582. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1583. vision_model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  1584. vision_model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  1585. vision_model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  1586. } break;
  1587. case PROJECTOR_TYPE_MINICPMV:
  1588. {
  1589. // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  1590. vision_model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  1591. vision_model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  1592. vision_model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  1593. vision_model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  1594. vision_model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  1595. vision_model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  1596. vision_model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  1597. vision_model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  1598. vision_model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  1599. vision_model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  1600. vision_model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  1601. vision_model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  1602. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  1603. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  1604. vision_model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  1605. vision_model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  1606. vision_model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  1607. vision_model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  1608. } break;
  1609. case PROJECTOR_TYPE_GLM_EDGE:
  1610. {
  1611. vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  1612. vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  1613. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR,"weight"));
  1614. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"weight"));
  1615. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"bias"));
  1616. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H,"weight"));
  1617. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE,"weight"));
  1618. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H,"weight"));
  1619. } break;
  1620. case PROJECTOR_TYPE_QWEN2VL:
  1621. case PROJECTOR_TYPE_QWEN25VL:
  1622. {
  1623. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1624. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1625. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1626. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1627. } break;
  1628. case PROJECTOR_TYPE_GEMMA3:
  1629. {
  1630. vision_model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  1631. vision_model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  1632. } break;
  1633. case PROJECTOR_TYPE_IDEFICS3:
  1634. {
  1635. vision_model.projection = get_tensor(TN_MM_PROJECTOR);
  1636. } break;
  1637. case PROJECTOR_TYPE_PIXTRAL:
  1638. {
  1639. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1640. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  1641. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1642. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1643. // [IMG_BREAK] token embedding
  1644. vision_model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  1645. } break;
  1646. default:
  1647. GGML_ASSERT(false && "unknown projector type");
  1648. }
  1649. // load data
  1650. {
  1651. std::vector<uint8_t> read_buf;
  1652. auto fin = std::ifstream(fname, std::ios::binary);
  1653. if (!fin) {
  1654. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  1655. }
  1656. // alloc memory and offload data
  1657. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  1658. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  1659. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  1660. for (auto & t : tensors_to_load) {
  1661. struct ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  1662. const size_t offset = tensor_offset[t->name];
  1663. fin.seekg(offset, std::ios::beg);
  1664. if (!fin) {
  1665. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  1666. }
  1667. size_t num_bytes = ggml_nbytes(cur);
  1668. if (ggml_backend_buft_is_host(buft)) {
  1669. // for the CPU and Metal backend, we can read directly into the tensor
  1670. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  1671. } else {
  1672. // read into a temporary buffer first, then copy to device memory
  1673. read_buf.resize(num_bytes);
  1674. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  1675. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  1676. }
  1677. }
  1678. fin.close();
  1679. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  1680. }
  1681. }
  1682. void alloc_compute_meta() {
  1683. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  1684. // create a fake batch
  1685. clip_image_f32_batch batch;
  1686. clip_image_f32_ptr img(clip_image_f32_init());
  1687. clip_image_size image_size;
  1688. image_size.width = ctx_clip.vision_model.hparams.image_size;
  1689. image_size.height = ctx_clip.vision_model.hparams.image_size;
  1690. img->nx = image_size.width;
  1691. img->ny = image_size.height;
  1692. img->buf.resize(image_size.width * image_size.height * 3);
  1693. batch.entries.push_back(std::move(img));
  1694. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch, image_size, false);
  1695. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  1696. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  1697. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  1698. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  1699. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  1700. if (size > 1) {
  1701. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  1702. ggml_backend_buft_name(buft),
  1703. size / 1024.0 / 1024.0);
  1704. }
  1705. }
  1706. }
  1707. void get_bool(const std::string & key, bool & output, bool required = true) {
  1708. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1709. if (i < 0) {
  1710. if (required) throw std::runtime_error("Key not found: " + key);
  1711. return;
  1712. }
  1713. output = gguf_get_val_bool(ctx_gguf.get(), i);
  1714. }
  1715. void get_i32(const std::string & key, int & output, bool required = true) {
  1716. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1717. if (i < 0) {
  1718. if (required) throw std::runtime_error("Key not found: " + key);
  1719. return;
  1720. }
  1721. output = gguf_get_val_i32(ctx_gguf.get(), i);
  1722. }
  1723. void get_u32(const std::string & key, int & output, bool required = true) {
  1724. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1725. if (i < 0) {
  1726. if (required) throw std::runtime_error("Key not found: " + key);
  1727. return;
  1728. }
  1729. output = gguf_get_val_u32(ctx_gguf.get(), i);
  1730. }
  1731. void get_f32(const std::string & key, float & output, bool required = true) {
  1732. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1733. if (i < 0) {
  1734. if (required) throw std::runtime_error("Key not found: " + key);
  1735. return;
  1736. }
  1737. output = gguf_get_val_f32(ctx_gguf.get(), i);
  1738. }
  1739. void get_string(const std::string & key, std::string & output, bool required = true) {
  1740. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1741. if (i < 0) {
  1742. if (required) throw std::runtime_error("Key not found: " + key);
  1743. return;
  1744. }
  1745. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  1746. }
  1747. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
  1748. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1749. if (i < 0) {
  1750. if (required) throw std::runtime_error("Key not found: " + key);
  1751. return;
  1752. }
  1753. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  1754. output.resize(n);
  1755. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  1756. for (int i = 0; i < n; ++i) {
  1757. output[i] = values[i];
  1758. }
  1759. }
  1760. };
  1761. // read and create ggml_context containing the tensors and their data
  1762. struct clip_ctx * clip_model_load(const char * fname, const int verbosity) {
  1763. return clip_init(fname, clip_context_params{
  1764. /* use_gpu */ true,
  1765. /* verbosity */ static_cast<ggml_log_level>(verbosity),
  1766. });
  1767. }
  1768. struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) {
  1769. g_logger_state.verbosity_thold = ctx_params.verbosity;
  1770. clip_ctx * ctx_clip = new clip_ctx(ctx_params);
  1771. try {
  1772. clip_model_loader loader(fname, *ctx_clip);
  1773. loader.load_hparams();
  1774. loader.load_tensors();
  1775. loader.alloc_compute_meta();
  1776. } catch (const std::exception & e) {
  1777. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  1778. delete ctx_clip;
  1779. return nullptr;
  1780. }
  1781. return ctx_clip;
  1782. }
  1783. void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) {
  1784. ctx_clip->load_image_size = *load_image_size; // copy
  1785. }
  1786. struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip) {
  1787. return &ctx_clip->load_image_size;
  1788. }
  1789. struct clip_image_size * clip_image_size_init() {
  1790. struct clip_image_size * load_image_size = new struct clip_image_size();
  1791. load_image_size->width = 448;
  1792. load_image_size->height = 448;
  1793. return load_image_size;
  1794. }
  1795. struct clip_image_u8 * clip_image_u8_init() {
  1796. return new clip_image_u8();
  1797. }
  1798. struct clip_image_f32 * clip_image_f32_init() {
  1799. return new clip_image_f32();
  1800. }
  1801. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  1802. return new clip_image_f32_batch();
  1803. }
  1804. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  1805. if (nx) *nx = img->nx;
  1806. if (ny) *ny = img->ny;
  1807. return img->buf.data();
  1808. }
  1809. void clip_image_size_free(struct clip_image_size * load_image_size) {
  1810. if (load_image_size == nullptr) {
  1811. return;
  1812. }
  1813. delete load_image_size;
  1814. }
  1815. void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
  1816. void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
  1817. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
  1818. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
  1819. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  1820. return batch->entries.size();
  1821. }
  1822. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  1823. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1824. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1825. return 0;
  1826. }
  1827. return batch->entries[idx]->nx;
  1828. }
  1829. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  1830. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1831. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1832. return 0;
  1833. }
  1834. return batch->entries[idx]->ny;
  1835. }
  1836. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  1837. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1838. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1839. return nullptr;
  1840. }
  1841. return batch->entries[idx].get();
  1842. }
  1843. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  1844. img->nx = nx;
  1845. img->ny = ny;
  1846. img->buf.resize(3 * nx * ny);
  1847. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  1848. }
  1849. bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
  1850. int nx, ny, nc;
  1851. auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
  1852. if (!data) {
  1853. LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
  1854. return false;
  1855. }
  1856. clip_build_img_from_pixels(data, nx, ny, img);
  1857. stbi_image_free(data);
  1858. return true;
  1859. }
  1860. bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
  1861. int nx, ny, nc;
  1862. auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
  1863. if (!data) {
  1864. LOG_ERR("%s: failed to decode image bytes\n", __func__);
  1865. return false;
  1866. }
  1867. clip_build_img_from_pixels(data, nx, ny, img);
  1868. stbi_image_free(data);
  1869. return true;
  1870. }
  1871. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  1872. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  1873. dst.nx = src.nx;
  1874. dst.ny = src.ny;
  1875. dst.buf.resize(src.buf.size());
  1876. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  1877. for (size_t i = 0; i < src.buf.size(); ++i) {
  1878. int c = i % 3; // rgb
  1879. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  1880. }
  1881. }
  1882. // set of tools to manupulate images
  1883. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  1884. struct image_manipulation {
  1885. // Bilinear resize function
  1886. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  1887. dst.nx = target_width;
  1888. dst.ny = target_height;
  1889. dst.buf.resize(3 * target_width * target_height);
  1890. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  1891. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  1892. for (int y = 0; y < target_height; y++) {
  1893. for (int x = 0; x < target_width; x++) {
  1894. float px = x_ratio * x;
  1895. float py = y_ratio * y;
  1896. int x_floor = static_cast<int>(px);
  1897. int y_floor = static_cast<int>(py);
  1898. float x_lerp = px - x_floor;
  1899. float y_lerp = py - y_floor;
  1900. for (int c = 0; c < 3; c++) {
  1901. float top = lerp(
  1902. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  1903. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  1904. x_lerp
  1905. );
  1906. float bottom = lerp(
  1907. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  1908. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  1909. x_lerp
  1910. );
  1911. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  1912. }
  1913. }
  1914. }
  1915. }
  1916. // Bicubic resize function
  1917. // part of image will be cropped if the aspect ratio is different
  1918. static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  1919. const int nx = img.nx;
  1920. const int ny = img.ny;
  1921. dst.nx = target_width;
  1922. dst.ny = target_height;
  1923. dst.buf.resize(3 * target_width * target_height);
  1924. float Cc;
  1925. float C[5];
  1926. float d0, d2, d3, a0, a1, a2, a3;
  1927. int i, j, k, jj;
  1928. int x, y;
  1929. float dx, dy;
  1930. float tx, ty;
  1931. tx = (float)nx / (float)target_width;
  1932. ty = (float)ny / (float)target_height;
  1933. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  1934. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  1935. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  1936. for (i = 0; i < target_height; i++) {
  1937. for (j = 0; j < target_width; j++) {
  1938. x = (int)(tx * j);
  1939. y = (int)(ty * i);
  1940. dx = tx * j - x;
  1941. dy = ty * i - y;
  1942. for (k = 0; k < 3; k++) {
  1943. for (jj = 0; jj <= 3; jj++) {
  1944. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1945. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1946. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1947. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1948. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1949. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1950. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1951. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  1952. d0 = C[0] - C[1];
  1953. d2 = C[2] - C[1];
  1954. d3 = C[3] - C[1];
  1955. a0 = C[1];
  1956. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1957. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1958. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1959. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  1960. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  1961. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  1962. }
  1963. }
  1964. }
  1965. }
  1966. return true;
  1967. }
  1968. // llava-1.6 type of resize_and_pad
  1969. // if the ratio is not 1:1, padding with pad_color will be applied
  1970. // pad_color is single channel, default is 0 (black)
  1971. static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  1972. int target_width = target_resolution.width;
  1973. int target_height = target_resolution.height;
  1974. float scale_w = static_cast<float>(target_width) / image.nx;
  1975. float scale_h = static_cast<float>(target_height) / image.ny;
  1976. int new_width, new_height;
  1977. if (scale_w < scale_h) {
  1978. new_width = target_width;
  1979. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  1980. } else {
  1981. new_height = target_height;
  1982. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  1983. }
  1984. clip_image_u8 resized_image;
  1985. bicubic_resize(image, resized_image, new_width, new_height);
  1986. clip_image_u8 padded_image;
  1987. padded_image.nx = target_width;
  1988. padded_image.ny = target_height;
  1989. padded_image.buf.resize(3 * target_width * target_height);
  1990. // Fill the padded image with the fill color
  1991. for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
  1992. padded_image.buf[i] = pad_color[0];
  1993. padded_image.buf[i + 1] = pad_color[1];
  1994. padded_image.buf[i + 2] = pad_color[2];
  1995. }
  1996. // Calculate padding offsets
  1997. int pad_x = (target_width - new_width) / 2;
  1998. int pad_y = (target_height - new_height) / 2;
  1999. // Copy the resized image into the center of the padded buffer
  2000. for (int y = 0; y < new_height; ++y) {
  2001. for (int x = 0; x < new_width; ++x) {
  2002. for (int c = 0; c < 3; ++c) {
  2003. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  2004. }
  2005. }
  2006. }
  2007. dst = std::move(padded_image);
  2008. }
  2009. static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  2010. dst.nx = w;
  2011. dst.ny = h;
  2012. dst.buf.resize(3 * w * h);
  2013. for (int i = 0; i < h; ++i) {
  2014. for (int j = 0; j < w; ++j) {
  2015. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  2016. int dst_idx = 3 * (i*w + j);
  2017. dst.buf[dst_idx] = image.buf[src_idx];
  2018. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  2019. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  2020. }
  2021. }
  2022. }
  2023. // calculate the size of the **resized** image, while preserving the aspect ratio
  2024. // the calculated size will be aligned to the nearest multiple of align_size
  2025. // if H or W size is larger than max_dimension, it will be resized to max_dimension
  2026. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int max_dimension) {
  2027. if (inp_size.width <= 0 || inp_size.height <= 0 || align_size <= 0 || max_dimension <= 0) {
  2028. return {0, 0};
  2029. }
  2030. float scale = std::min(1.0f, std::min(static_cast<float>(max_dimension) / inp_size.width,
  2031. static_cast<float>(max_dimension) / inp_size.height));
  2032. float target_width_f = static_cast<float>(inp_size.width) * scale;
  2033. float target_height_f = static_cast<float>(inp_size.height) * scale;
  2034. int aligned_width = GGML_PAD((int)target_width_f, align_size);
  2035. int aligned_height = GGML_PAD((int)target_height_f, align_size);
  2036. return {aligned_width, aligned_height};
  2037. }
  2038. private:
  2039. static inline int clip(int x, int lower, int upper) {
  2040. return std::max(lower, std::min(x, upper));
  2041. }
  2042. // Linear interpolation between two points
  2043. static inline float lerp(float s, float e, float t) {
  2044. return s + (e - s) * t;
  2045. }
  2046. };
  2047. /**
  2048. * implementation of LLaVA-UHD:
  2049. * - https://arxiv.org/pdf/2403.11703
  2050. * - https://github.com/thunlp/LLaVA-UHD
  2051. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  2052. *
  2053. * overview:
  2054. * - an image always have a single overview (downscaled image)
  2055. * - an image can have 0 or multiple slices, depending on the image size
  2056. * - each slice can then be considered as a separate image
  2057. *
  2058. * for example:
  2059. *
  2060. * [overview] --> [slice 1] --> [slice 2]
  2061. * | |
  2062. * +--> [slice 3] --> [slice 4]
  2063. */
  2064. struct llava_uhd {
  2065. struct slice_coordinates {
  2066. int x;
  2067. int y;
  2068. clip_image_size size;
  2069. };
  2070. struct slice_instructions {
  2071. clip_image_size overview_size; // size of downscaled image
  2072. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2073. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2074. std::vector<slice_coordinates> slices;
  2075. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2076. };
  2077. static int get_max_slices(struct clip_ctx * ctx) {
  2078. if (clip_is_minicpmv(ctx)) {
  2079. return 9;
  2080. }
  2081. return 0;
  2082. }
  2083. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2084. slice_instructions res;
  2085. const int patch_size = clip_get_patch_size(ctx);
  2086. const int slice_size = clip_get_image_size(ctx);
  2087. const int max_slice_nums = get_max_slices(ctx);
  2088. const int original_width = original_size.width;
  2089. const int original_height = original_size.height;
  2090. const float log_ratio = log((float)original_width / original_height);
  2091. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2092. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2093. const bool has_slices = (multiple > 1);
  2094. const bool has_pinpoints = !ctx->vision_model.hparams.image_grid_pinpoints.empty();
  2095. if (has_pinpoints) {
  2096. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2097. auto refine_size = llava_uhd::select_best_resolution(
  2098. ctx->vision_model.hparams.image_grid_pinpoints,
  2099. original_size);
  2100. res.overview_size = clip_image_size{slice_size, slice_size};
  2101. res.refined_size = refine_size;
  2102. res.grid_size = clip_image_size{0, 0};
  2103. res.padding_refined = true;
  2104. for (int y = 0; y < refine_size.height; y += slice_size) {
  2105. for (int x = 0; x < refine_size.width; x += slice_size) {
  2106. slice_coordinates slice;
  2107. slice.x = x;
  2108. slice.y = y;
  2109. slice.size.width = std::min(slice_size, refine_size.width - x);
  2110. slice.size.height = std::min(slice_size, refine_size.height - y);
  2111. res.slices.push_back(slice);
  2112. if (x == 0) {
  2113. res.grid_size.width++;
  2114. }
  2115. }
  2116. res.grid_size.height++;
  2117. }
  2118. return res;
  2119. }
  2120. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2121. auto best_size = get_best_resize(original_size, slice_size, patch_size, has_slices);
  2122. res.overview_size = best_size;
  2123. if (!has_slices) {
  2124. // skip slicing logic
  2125. res.refined_size = clip_image_size{0, 0};
  2126. res.grid_size = clip_image_size{0, 0};
  2127. } else {
  2128. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2129. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2130. res.grid_size = best_grid;
  2131. res.refined_size = refine_size;
  2132. int width = refine_size.width;
  2133. int height = refine_size.height;
  2134. int grid_x = int(width / best_grid.width);
  2135. int grid_y = int(height / best_grid.height);
  2136. for (int patches_y = 0, ic = 0;
  2137. patches_y < refine_size.height && ic < best_grid.height;
  2138. patches_y += grid_y, ic += 1) {
  2139. for (int patches_x = 0, jc = 0;
  2140. patches_x < refine_size.width && jc < best_grid.width;
  2141. patches_x += grid_x, jc += 1) {
  2142. slice_coordinates slice;
  2143. slice.x = patches_x;
  2144. slice.y = patches_y;
  2145. slice.size.width = grid_x;
  2146. slice.size.height = grid_y;
  2147. res.slices.push_back(slice);
  2148. // LOG_INF("slice %d: %d %d %d %d\n", ic, patches_i, patches_j, grid_x, grid_y);
  2149. }
  2150. }
  2151. }
  2152. return res;
  2153. }
  2154. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2155. std::vector<clip_image_u8_ptr> output;
  2156. // resize to overview size
  2157. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2158. image_manipulation::bicubic_resize(*img, *resized_img, inst.overview_size.width, inst.overview_size.height);
  2159. output.push_back(std::move(resized_img));
  2160. if (inst.slices.empty()) {
  2161. // no slices, just return the resized image
  2162. return output;
  2163. }
  2164. // resize to refined size
  2165. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2166. if (inst.padding_refined) {
  2167. image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
  2168. } else {
  2169. image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
  2170. }
  2171. // create slices
  2172. for (const auto & slice : inst.slices) {
  2173. int x = slice.x;
  2174. int y = slice.y;
  2175. int w = slice.size.width;
  2176. int h = slice.size.height;
  2177. clip_image_u8_ptr img_slice(clip_image_u8_init());
  2178. image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
  2179. output.push_back(std::move(img_slice));
  2180. }
  2181. return output;
  2182. }
  2183. private:
  2184. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2185. int width = original_size.width;
  2186. int height = original_size.height;
  2187. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  2188. float r = static_cast<float>(width) / height;
  2189. height = static_cast<int>(scale_resolution / std::sqrt(r));
  2190. width = static_cast<int>(height * r);
  2191. }
  2192. clip_image_size res;
  2193. res.width = ensure_divide(width, patch_size);
  2194. res.height = ensure_divide(height, patch_size);
  2195. return res;
  2196. }
  2197. /**
  2198. * Selects the best resolution from a list of possible resolutions based on the original size.
  2199. *
  2200. * @param original_size The original size of the image
  2201. * @param possible_resolutions A list of possible resolutions
  2202. * @return The best fit resolution
  2203. */
  2204. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  2205. int original_width = original_size.width;
  2206. int original_height = original_size.height;
  2207. clip_image_size best_fit;
  2208. int max_effective_resolution = 0;
  2209. int min_wasted_resolution = std::numeric_limits<int>::max();
  2210. for (const auto & resolution : possible_resolutions) {
  2211. int width = resolution.width;
  2212. int height = resolution.height;
  2213. float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
  2214. int downscaled_width = static_cast<int>(original_width * scale);
  2215. int downscaled_height = static_cast<int>(original_height * scale);
  2216. int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
  2217. int wasted_resolution = (width * height) - effective_resolution;
  2218. // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
  2219. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
  2220. max_effective_resolution = effective_resolution;
  2221. min_wasted_resolution = wasted_resolution;
  2222. best_fit = resolution;
  2223. }
  2224. }
  2225. return best_fit;
  2226. }
  2227. // used by llava 1.6 with custom list of pinpoints
  2228. static clip_image_size select_best_resolution(const std::vector<int32_t> & pinpoints, const clip_image_size & original_size) {
  2229. std::vector<clip_image_size> possible_resolutions;
  2230. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  2231. possible_resolutions.push_back(clip_image_size{pinpoints[i], pinpoints[i+1]});
  2232. }
  2233. return select_best_resolution(original_size, possible_resolutions);
  2234. }
  2235. static int ensure_divide(int length, int patch_size) {
  2236. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  2237. }
  2238. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2239. int width = original_size.width;
  2240. int height = original_size.height;
  2241. int grid_x = grid.width;
  2242. int grid_y = grid.height;
  2243. int refine_width = ensure_divide(width, grid_x);
  2244. int refine_height = ensure_divide(height, grid_y);
  2245. clip_image_size grid_size;
  2246. grid_size.width = refine_width / grid_x;
  2247. grid_size.height = refine_height / grid_y;
  2248. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  2249. int best_grid_width = best_grid_size.width;
  2250. int best_grid_height = best_grid_size.height;
  2251. clip_image_size refine_size;
  2252. refine_size.width = best_grid_width * grid_x;
  2253. refine_size.height = best_grid_height * grid_y;
  2254. return refine_size;
  2255. }
  2256. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  2257. std::vector<int> candidate_split_grids_nums;
  2258. for (int i : {multiple - 1, multiple, multiple + 1}) {
  2259. if (i == 1 || i > max_slice_nums) {
  2260. continue;
  2261. }
  2262. candidate_split_grids_nums.push_back(i);
  2263. }
  2264. std::vector<clip_image_size> candidate_grids;
  2265. for (int split_grids_nums : candidate_split_grids_nums) {
  2266. int m = 1;
  2267. while (m <= split_grids_nums) {
  2268. if (split_grids_nums % m == 0) {
  2269. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  2270. }
  2271. ++m;
  2272. }
  2273. }
  2274. clip_image_size best_grid{1, 1};
  2275. float min_error = std::numeric_limits<float>::infinity();
  2276. for (const auto& grid : candidate_grids) {
  2277. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  2278. if (error < min_error) {
  2279. best_grid = grid;
  2280. min_error = error;
  2281. }
  2282. }
  2283. return best_grid;
  2284. }
  2285. };
  2286. // TODO @ngxson : decprecate the load_image_size singleton pattern
  2287. int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
  2288. const auto inst = llava_uhd::get_slice_instructions(ctx_clip, ctx_clip->load_image_size);
  2289. return inst.grid_size.width;
  2290. }
  2291. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  2292. // res_imgs memory is being allocated here, previous allocations will be freed if found
  2293. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  2294. clip_image_size original_size{img->nx, img->ny};
  2295. bool pad_to_square = true;
  2296. auto & params = ctx->vision_model.hparams;
  2297. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  2298. if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
  2299. pad_to_square = false;
  2300. }
  2301. if (clip_is_minicpmv(ctx)) {
  2302. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2303. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2304. for (size_t i = 0; i < imgs.size(); ++i) {
  2305. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2306. clip_image_f32_ptr res(clip_image_f32_init());
  2307. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2308. res_imgs->entries.push_back(std::move(res));
  2309. }
  2310. return true;
  2311. }
  2312. else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2313. clip_image_u8 resized;
  2314. auto patch_size = clip_get_patch_size(ctx) * 2;
  2315. int nx = ceil((float)img->nx / patch_size) * patch_size;
  2316. int ny = ceil((float)img->ny / patch_size) * patch_size;
  2317. image_manipulation::bicubic_resize(*img, resized, nx, ny);
  2318. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2319. // clip_image_f32_ptr res(clip_image_f32_init());
  2320. normalize_image_u8_to_f32(resized, *img_f32, ctx->image_mean, ctx->image_std);
  2321. // res_imgs->data[0] = *res;
  2322. res_imgs->entries.push_back(std::move(img_f32));
  2323. return true;
  2324. }
  2325. else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE
  2326. || ctx->proj_type == PROJECTOR_TYPE_GEMMA3
  2327. || ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) {
  2328. clip_image_u8 resized_image;
  2329. int sz = params.image_size;
  2330. image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
  2331. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2332. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  2333. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  2334. res_imgs->entries.push_back(std::move(img_f32));
  2335. return true;
  2336. }
  2337. else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
  2338. clip_image_u8 resized_image;
  2339. auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
  2340. image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
  2341. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2342. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  2343. res_imgs->entries.push_back(std::move(img_f32));
  2344. return true;
  2345. }
  2346. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  2347. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2348. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  2349. if (pad_to_square) {
  2350. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  2351. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2352. const int longer_side = std::max(img->nx, img->ny);
  2353. temp->nx = longer_side;
  2354. temp->ny = longer_side;
  2355. temp->buf.resize(3 * longer_side * longer_side);
  2356. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  2357. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2358. // resize the image to the target_size
  2359. image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
  2360. clip_image_f32_ptr res(clip_image_f32_init());
  2361. normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
  2362. res_imgs->entries.push_back(std::move(res));
  2363. return true;
  2364. } else if (!params.image_grid_pinpoints.empty()) {
  2365. // "spatial_unpad" with "anyres" processing for llava-1.6
  2366. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2367. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2368. for (size_t i = 0; i < imgs.size(); ++i) {
  2369. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2370. clip_image_f32_ptr res(clip_image_f32_init());
  2371. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  2372. res_imgs->entries.push_back(std::move(res));
  2373. }
  2374. return true;
  2375. }
  2376. GGML_ASSERT(false && "Unknown image preprocessing type");
  2377. }
  2378. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  2379. return ctx->vision_model.image_newline;
  2380. }
  2381. void clip_free(clip_ctx * ctx) {
  2382. if (ctx == nullptr) {
  2383. return;
  2384. }
  2385. delete ctx;
  2386. }
  2387. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  2388. return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2389. }
  2390. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_h, int img_w) {
  2391. clip_image_f32 img;
  2392. img.nx = img_w;
  2393. img.ny = img_h;
  2394. return clip_n_patches_by_img(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2395. }
  2396. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  2397. return ctx->vision_model.hparams.image_size;
  2398. }
  2399. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  2400. return ctx->vision_model.hparams.patch_size;
  2401. }
  2402. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  2403. return ctx->vision_model.hparams.hidden_size;
  2404. }
  2405. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  2406. return ctx->vision_model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  2407. }
  2408. const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
  2409. if (ctx->vision_model.hparams.image_grid_pinpoints.size()) {
  2410. return &ctx->vision_model.hparams.image_grid_pinpoints.front();
  2411. }
  2412. return nullptr;
  2413. }
  2414. size_t get_clip_image_grid_size(const struct clip_ctx * ctx) {
  2415. return ctx->vision_model.hparams.image_grid_pinpoints.size();
  2416. }
  2417. int clip_n_patches(const struct clip_ctx * ctx) {
  2418. clip_image_f32 img;
  2419. img.nx = ctx->vision_model.hparams.image_size;
  2420. img.ny = ctx->vision_model.hparams.image_size;
  2421. return clip_n_patches_by_img(ctx, &img);
  2422. }
  2423. int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2424. const auto & params = ctx->vision_model.hparams;
  2425. int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
  2426. if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  2427. n_patches /= 4;
  2428. } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  2429. if (ctx->minicpmv_version == 2) {
  2430. n_patches = 96;
  2431. }
  2432. else if (ctx->minicpmv_version == 3) {
  2433. n_patches = 64;
  2434. }
  2435. else if (ctx->minicpmv_version == 4) {
  2436. n_patches = 64;
  2437. }
  2438. else {
  2439. GGML_ABORT("Unknown minicpmv version");
  2440. }
  2441. } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
  2442. int patch_size = params.patch_size * 2;
  2443. int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
  2444. int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
  2445. n_patches = x_patch * y_patch;
  2446. } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  2447. n_patches = 256;
  2448. } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) {
  2449. n_patches /= ctx->vision_model.hparams.proj_scale_factor;
  2450. } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
  2451. int n_patches_x = img->nx / params.patch_size;
  2452. int n_patches_y = img->ny / params.patch_size;
  2453. n_patches = n_patches_y*n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  2454. }
  2455. return n_patches;
  2456. }
  2457. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  2458. assert(embed_dim % 2 == 0);
  2459. int H = pos.size();
  2460. int W = pos[0].size();
  2461. std::vector<float> omega(embed_dim / 2);
  2462. for (int i = 0; i < embed_dim / 2; ++i) {
  2463. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  2464. }
  2465. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2466. for (int h = 0; h < H; ++h) {
  2467. for (int w = 0; w < W; ++w) {
  2468. for (int d = 0; d < embed_dim / 2; ++d) {
  2469. float out_value = pos[h][w] * omega[d];
  2470. emb[h][w][d] = sin(out_value);
  2471. emb[h][w][d + embed_dim / 2] = cos(out_value);
  2472. }
  2473. }
  2474. }
  2475. return emb;
  2476. }
  2477. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  2478. assert(embed_dim % 2 == 0);
  2479. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  2480. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  2481. int H = emb_h.size();
  2482. int W = emb_h[0].size();
  2483. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2484. for (int h = 0; h < H; ++h) {
  2485. for (int w = 0; w < W; ++w) {
  2486. for (int d = 0; d < embed_dim / 2; ++d) {
  2487. emb[h][w][d] = emb_h[h][w][d];
  2488. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  2489. }
  2490. }
  2491. }
  2492. return emb;
  2493. }
  2494. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  2495. int grid_h_size = image_size.first;
  2496. int grid_w_size = image_size.second;
  2497. std::vector<float> grid_h(grid_h_size);
  2498. std::vector<float> grid_w(grid_w_size);
  2499. for (int i = 0; i < grid_h_size; ++i) {
  2500. grid_h[i] = static_cast<float>(i);
  2501. }
  2502. for (int i = 0; i < grid_w_size; ++i) {
  2503. grid_w[i] = static_cast<float>(i);
  2504. }
  2505. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  2506. for (int h = 0; h < grid_h_size; ++h) {
  2507. for (int w = 0; w < grid_w_size; ++w) {
  2508. grid[h][w] = grid_w[w];
  2509. }
  2510. }
  2511. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  2512. for (int h = 0; h < grid_h_size; ++h) {
  2513. for (int w = 0; w < grid_w_size; ++w) {
  2514. grid_2d[0][h][w] = grid_h[h];
  2515. grid_2d[1][h][w] = grid_w[w];
  2516. }
  2517. }
  2518. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  2519. int H = image_size.first;
  2520. int W = image_size.second;
  2521. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  2522. for (int h = 0; h < H; ++h) {
  2523. for (int w = 0; w < W; ++w) {
  2524. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  2525. }
  2526. }
  2527. return pos_embed_2d;
  2528. }
  2529. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  2530. clip_image_f32_batch imgs;
  2531. clip_image_f32_ptr img_copy(clip_image_f32_init());
  2532. *img_copy = *img;
  2533. imgs.entries.push_back(std::move(img_copy));
  2534. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  2535. }
  2536. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  2537. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  2538. int batch_size = imgs.entries.size();
  2539. if (ctx->has_llava_projector
  2540. || ctx->proj_type == PROJECTOR_TYPE_MINICPMV
  2541. || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  2542. GGML_ASSERT(batch_size == 1);
  2543. }
  2544. // build the inference graph
  2545. ggml_backend_sched_reset(ctx->sched.get());
  2546. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true);
  2547. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  2548. // set inputs
  2549. const auto & model = ctx->vision_model;
  2550. const auto & hparams = model.hparams;
  2551. const int image_size_width = imgs.entries[0]->nx;
  2552. const int image_size_height = imgs.entries[0]->ny;
  2553. const int patch_size = hparams.patch_size;
  2554. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  2555. const int num_positions = num_patches + (model.class_embedding ? 1 : 0);
  2556. const int pos_w = ctx->load_image_size.width / patch_size;
  2557. const int pos_h = ctx->load_image_size.height / patch_size;
  2558. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  2559. auto get_inp_tensor = [&gf](const char * name) {
  2560. struct ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  2561. if (inp == nullptr) {
  2562. GGML_ABORT("Failed to get tensor %s", name);
  2563. }
  2564. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  2565. GGML_ABORT("Tensor %s is not an input tensor", name);
  2566. }
  2567. return inp;
  2568. };
  2569. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  2570. ggml_tensor * cur = get_inp_tensor(name);
  2571. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  2572. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2573. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2574. };
  2575. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  2576. ggml_tensor * cur = get_inp_tensor(name);
  2577. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  2578. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2579. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2580. };
  2581. // set input pixel values
  2582. {
  2583. size_t nelem = 0;
  2584. for (const auto & img : imgs.entries) {
  2585. nelem += img->nx * img->ny * 3;
  2586. }
  2587. std::vector<float> inp_raw(nelem);
  2588. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  2589. //
  2590. // ┌──W──┐
  2591. // │ H │ channel = R
  2592. // ├─────┤ │
  2593. // │ H │ channel = G
  2594. // ├─────┤ │
  2595. // │ H │ channel = B
  2596. // └─────┘ │
  2597. // ──────┘ x B
  2598. for (size_t i = 0; i < imgs.entries.size(); i++) {
  2599. const int nx = imgs.entries[i]->nx;
  2600. const int ny = imgs.entries[i]->ny;
  2601. const int n = nx * ny;
  2602. for (int b = 0; b < batch_size; b++) {
  2603. float * batch_entry = inp_raw.data() + b * (3*n);
  2604. for (int y = 0; y < ny; y++) {
  2605. for (int x = 0; x < nx; x++) {
  2606. size_t base_src = 3*(y * nx + x); // idx of the first channel
  2607. size_t base_dst = y * nx + x; // idx of the first channel
  2608. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  2609. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  2610. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  2611. }
  2612. }
  2613. }
  2614. }
  2615. set_input_f32("inp_raw", inp_raw);
  2616. }
  2617. // set input per projector
  2618. switch (ctx->proj_type) {
  2619. case PROJECTOR_TYPE_MINICPMV:
  2620. {
  2621. // inspired from siglip:
  2622. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  2623. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  2624. std::vector<int32_t> positions(pos_h * pos_w);
  2625. int bucket_coords_h[1024];
  2626. int bucket_coords_w[1024];
  2627. for (int i = 0; i < pos_h; i++){
  2628. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  2629. }
  2630. for (int i = 0; i < pos_w; i++){
  2631. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  2632. }
  2633. for (int i = 0, id = 0; i < pos_h; i++){
  2634. for (int j = 0; j < pos_w; j++){
  2635. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  2636. }
  2637. }
  2638. set_input_i32("positions", positions);
  2639. // inspired from resampler of Qwen-VL:
  2640. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  2641. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  2642. int embed_dim = clip_n_mmproj_embd(ctx);
  2643. // TODO @ngxson : this is very inefficient, can we do this using ggml_sin and ggml_cos?
  2644. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  2645. std::vector<float> pos_embed(embed_dim * pos_w * pos_h);
  2646. for(int i = 0; i < pos_w * pos_h; ++i){
  2647. for(int j = 0; j < embed_dim; ++j){
  2648. pos_embed[i * embed_dim + j] = pos_embed_t[i][j];
  2649. }
  2650. }
  2651. set_input_f32("pos_embed", pos_embed);
  2652. } break;
  2653. case PROJECTOR_TYPE_QWEN2VL:
  2654. {
  2655. const int merge_ratio = 2;
  2656. const int pw = image_size_width / patch_size;
  2657. const int ph = image_size_height / patch_size;
  2658. std::vector<int> positions(num_positions * 4);
  2659. int ptr = 0;
  2660. for (int y = 0; y < ph; y += merge_ratio) {
  2661. for (int x = 0; x < pw; x += merge_ratio) {
  2662. for (int dy = 0; dy < 2; dy++) {
  2663. for (int dx = 0; dx < 2; dx++) {
  2664. positions[ ptr] = y + dy;
  2665. positions[ num_patches + ptr] = x + dx;
  2666. positions[2 * num_patches + ptr] = y + dy;
  2667. positions[3 * num_patches + ptr] = x + dx;
  2668. ptr++;
  2669. }
  2670. }
  2671. }
  2672. }
  2673. set_input_i32("positions", positions);
  2674. } break;
  2675. case PROJECTOR_TYPE_QWEN25VL:
  2676. {
  2677. // pw * ph = number of tokens output by ViT after apply patch merger
  2678. // ipw * ipw = number of vision token been processed inside ViT
  2679. const int merge_ratio = 2;
  2680. const int pw = image_size_width / patch_size / merge_ratio;
  2681. const int ph = image_size_height / patch_size / merge_ratio;
  2682. const int ipw = image_size_width / patch_size;
  2683. const int iph = image_size_height / patch_size;
  2684. std::vector<int> idx (ph * pw);
  2685. std::vector<int> inv_idx(ph * pw);
  2686. if (use_window_attn) {
  2687. const int attn_window_size = 112;
  2688. const int grid_window = attn_window_size / patch_size / merge_ratio;
  2689. int dst = 0;
  2690. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  2691. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  2692. int mask_row = 0;
  2693. for (int y = 0; y < ph; y += grid_window) {
  2694. for (int x = 0; x < pw; x += grid_window) {
  2695. const int win_h = std::min(grid_window, ph - y);
  2696. const int win_w = std::min(grid_window, pw - x);
  2697. const int dst_0 = dst;
  2698. // group all tokens belong to the same window togather (to a continue range)
  2699. for (int dy = 0; dy < win_h; dy++) {
  2700. for (int dx = 0; dx < win_w; dx++) {
  2701. const int src = (y + dy) * pw + (x + dx);
  2702. GGML_ASSERT(src < (int)idx.size());
  2703. GGML_ASSERT(dst < (int)inv_idx.size());
  2704. idx [src] = dst;
  2705. inv_idx[dst] = src;
  2706. dst++;
  2707. }
  2708. }
  2709. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  2710. int row_offset = mask_row * (ipw * iph);
  2711. std::fill(
  2712. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  2713. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  2714. 0.0);
  2715. mask_row++;
  2716. }
  2717. }
  2718. }
  2719. set_input_i32("window_idx", idx);
  2720. set_input_i32("inv_window_idx", inv_idx);
  2721. set_input_f32("window_mask", mask);
  2722. } else {
  2723. for (int i = 0; i < ph * pw; i++) {
  2724. idx[i] = i;
  2725. }
  2726. }
  2727. const int mpow = merge_ratio * merge_ratio;
  2728. std::vector<int> positions(num_positions * 4);
  2729. int ptr = 0;
  2730. for (int y = 0; y < iph; y += merge_ratio) {
  2731. for (int x = 0; x < ipw; x += merge_ratio) {
  2732. for (int dy = 0; dy < 2; dy++) {
  2733. for (int dx = 0; dx < 2; dx++) {
  2734. auto remap = idx[ptr / mpow];
  2735. remap = (remap * mpow) + (ptr % mpow);
  2736. positions[ remap] = y + dy;
  2737. positions[ num_patches + remap] = x + dx;
  2738. positions[2 * num_patches + remap] = y + dy;
  2739. positions[3 * num_patches + remap] = x + dx;
  2740. ptr++;
  2741. }
  2742. }
  2743. }
  2744. }
  2745. set_input_i32("positions", positions);
  2746. } break;
  2747. case PROJECTOR_TYPE_PIXTRAL:
  2748. {
  2749. // set the 2D positions
  2750. int n_patches_per_col = image_size_width / patch_size;
  2751. std::vector<int> pos_data(num_positions);
  2752. // dimension H
  2753. for (int i = 0; i < num_positions; i++) {
  2754. pos_data[i] = i / n_patches_per_col;
  2755. }
  2756. set_input_i32("pos_h", pos_data);
  2757. // dimension W
  2758. for (int i = 0; i < num_positions; i++) {
  2759. pos_data[i] = i % n_patches_per_col;
  2760. }
  2761. set_input_i32("pos_w", pos_data);
  2762. } break;
  2763. case PROJECTOR_TYPE_GLM_EDGE:
  2764. {
  2765. // llava and other models
  2766. std::vector<int32_t> positions(num_positions);
  2767. for (int i = 0; i < num_positions; i++) {
  2768. positions[i] = i;
  2769. }
  2770. set_input_i32("positions", positions);
  2771. } break;
  2772. case PROJECTOR_TYPE_MLP:
  2773. case PROJECTOR_TYPE_MLP_NORM:
  2774. case PROJECTOR_TYPE_LDP:
  2775. case PROJECTOR_TYPE_LDPV2:
  2776. {
  2777. // llava and other models
  2778. std::vector<int32_t> positions(num_positions);
  2779. for (int i = 0; i < num_positions; i++) {
  2780. positions[i] = i;
  2781. }
  2782. set_input_i32("positions", positions);
  2783. // The patches vector is used to get rows to index into the embeds with;
  2784. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  2785. // when retrieving the rows.
  2786. int patch_offset = model.class_embedding ? 1 : 0;
  2787. std::vector<int32_t> patches(num_patches);
  2788. for (int i = 0; i < num_patches; i++) {
  2789. patches[i] = i + patch_offset;
  2790. }
  2791. set_input_i32("patches", patches);
  2792. } break;
  2793. case PROJECTOR_TYPE_GEMMA3:
  2794. case PROJECTOR_TYPE_IDEFICS3:
  2795. {
  2796. // do nothing
  2797. } break;
  2798. default:
  2799. GGML_ABORT("Unknown projector type");
  2800. }
  2801. ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  2802. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  2803. if (status != GGML_STATUS_SUCCESS) {
  2804. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  2805. return false;
  2806. }
  2807. // the last node is the embedding tensor
  2808. struct ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  2809. // copy the embeddings to the location passed by the user
  2810. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  2811. return true;
  2812. }
  2813. bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
  2814. assert(itype < GGML_TYPE_COUNT);
  2815. ggml_type type = static_cast<ggml_type>(itype);
  2816. auto * ctx_clip = clip_init(fname_inp, clip_context_params{
  2817. /* use_gpu */ false,
  2818. /* verbosity */ GGML_LOG_LEVEL_ERROR,
  2819. });
  2820. const auto & ctx_src = ctx_clip->ctx_gguf.get();
  2821. const auto & ctx_data = ctx_clip->ctx_data.get();
  2822. auto * ctx_out = gguf_init_empty();
  2823. gguf_set_kv(ctx_out, ctx_src);
  2824. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  2825. gguf_set_val_u32(ctx_out, "general.file_type", itype);
  2826. auto fout = std::ofstream(fname_out, std::ios::binary);
  2827. const int n_tensors = gguf_get_n_tensors(ctx_src);
  2828. for (int i = 0; i < n_tensors; ++i) {
  2829. const char * name = gguf_get_tensor_name(ctx_src, i);
  2830. struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
  2831. gguf_add_tensor(ctx_out, cur);
  2832. }
  2833. const size_t meta_size = gguf_get_meta_size(ctx_out);
  2834. for (size_t i = 0; i < meta_size; ++i) {
  2835. fout.put(0);
  2836. }
  2837. // regexes of tensor names to be quantized
  2838. const std::vector<std::string> k_names = {
  2839. ".*weight",
  2840. };
  2841. std::vector<uint8_t> work(512);
  2842. std::vector<float> conv_buf(512);
  2843. size_t total_size_org = 0;
  2844. size_t total_size_new = 0;
  2845. for (int i = 0; i < n_tensors; ++i) {
  2846. const std::string name = gguf_get_tensor_name(ctx_src, i);
  2847. struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
  2848. enum ggml_type new_type;
  2849. void * new_data;
  2850. size_t new_size;
  2851. bool quantize = false;
  2852. for (const auto & s : k_names) {
  2853. if (std::regex_match(name, std::regex(s))) {
  2854. quantize = true;
  2855. break;
  2856. }
  2857. }
  2858. // quantize only 2D tensors and bigger than block size
  2859. quantize &= (ggml_n_dims(cur) == 2) && cur->ne[0] > ggml_blck_size(type);
  2860. if (quantize) {
  2861. new_type = type;
  2862. if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
  2863. new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
  2864. // LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
  2865. }
  2866. const size_t n_elms = ggml_nelements(cur);
  2867. float * f32_data;
  2868. switch (cur->type) {
  2869. case GGML_TYPE_F32:
  2870. f32_data = (float *)cur->data;
  2871. break;
  2872. case GGML_TYPE_F16:
  2873. if (conv_buf.size() < n_elms) {
  2874. conv_buf.resize(n_elms);
  2875. }
  2876. for (size_t j = 0; j < n_elms; ++j) {
  2877. conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]);
  2878. }
  2879. f32_data = (float *)conv_buf.data();
  2880. break;
  2881. default:
  2882. LOG_ERR("%s: Please use an input file in f32 or f16\n", __func__);
  2883. gguf_free(ctx_out);
  2884. return false;
  2885. }
  2886. if (work.size() < n_elms * 4) {
  2887. work.resize(n_elms * 4);
  2888. }
  2889. new_data = work.data();
  2890. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
  2891. } else {
  2892. new_type = cur->type;
  2893. new_data = cur->data;
  2894. new_size = ggml_nbytes(cur);
  2895. }
  2896. const size_t orig_size = ggml_nbytes(cur);
  2897. total_size_org += orig_size;
  2898. total_size_new += new_size;
  2899. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  2900. GGML_ASSERT(gguf_get_tensor_size(ctx_out, gguf_find_tensor(ctx_out, name.c_str())) == new_size);
  2901. gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
  2902. fout.write((const char *)new_data, new_size);
  2903. size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
  2904. for (size_t j = 0; j < pad; ++j) {
  2905. fout.put(0);
  2906. }
  2907. LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
  2908. orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  2909. }
  2910. // go back to beginning of file and write the updated metadata
  2911. fout.seekp(0, std::ios::beg);
  2912. std::vector<uint8_t> meta(meta_size);
  2913. gguf_get_meta_data(ctx_out, meta.data());
  2914. fout.write((const char *)meta.data(), meta_size);
  2915. fout.close();
  2916. clip_free(ctx_clip);
  2917. gguf_free(ctx_out);
  2918. {
  2919. LOG_INF("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
  2920. LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
  2921. }
  2922. return true;
  2923. }
  2924. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  2925. switch (ctx->proj_type) {
  2926. case PROJECTOR_TYPE_LDP:
  2927. return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
  2928. case PROJECTOR_TYPE_LDPV2:
  2929. return ctx->vision_model.mm_model_peg_0_b->ne[0];
  2930. case PROJECTOR_TYPE_MLP:
  2931. case PROJECTOR_TYPE_PIXTRAL:
  2932. return ctx->vision_model.mm_2_b->ne[0];
  2933. case PROJECTOR_TYPE_MLP_NORM:
  2934. return ctx->vision_model.mm_3_b->ne[0];
  2935. case PROJECTOR_TYPE_MINICPMV:
  2936. if (ctx->minicpmv_version == 2) {
  2937. return 4096;
  2938. } else if (ctx->minicpmv_version == 3) {
  2939. return 3584;
  2940. } else if (ctx->minicpmv_version == 4) {
  2941. return 3584;
  2942. }
  2943. GGML_ABORT("Unknown minicpmv version");
  2944. case PROJECTOR_TYPE_GLM_EDGE:
  2945. return ctx->vision_model.mm_model_mlp_3_w->ne[1];
  2946. case PROJECTOR_TYPE_QWEN2VL:
  2947. case PROJECTOR_TYPE_QWEN25VL:
  2948. return ctx->vision_model.mm_1_b->ne[0];
  2949. case PROJECTOR_TYPE_GEMMA3:
  2950. return ctx->vision_model.mm_input_proj_w->ne[0];
  2951. case PROJECTOR_TYPE_IDEFICS3:
  2952. return ctx->vision_model.projection->ne[1];
  2953. default:
  2954. GGML_ABORT("Unknown projector type");
  2955. }
  2956. }
  2957. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  2958. if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
  2959. return ctx->minicpmv_version;
  2960. }
  2961. return 0;
  2962. }
  2963. bool clip_is_glm(const struct clip_ctx * ctx) {
  2964. return ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE;
  2965. }
  2966. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  2967. return ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL;
  2968. }
  2969. bool clip_is_llava(const struct clip_ctx * ctx) {
  2970. return ctx->has_llava_projector;
  2971. }
  2972. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  2973. return ctx->proj_type == PROJECTOR_TYPE_GEMMA3;
  2974. }
  2975. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  2976. clip_image_f32 clip_img;
  2977. clip_img.buf.resize(h * w * 3);
  2978. for (int i = 0; i < h*w*3; i++)
  2979. {
  2980. clip_img.buf[i] = img[i];
  2981. }
  2982. clip_img.nx = w;
  2983. clip_img.ny = h;
  2984. clip_image_encode(ctx, n_threads, &clip_img, vec);
  2985. return true;
  2986. }
  2987. //
  2988. // API used internally with mtmd
  2989. //
  2990. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  2991. return ctx->proj_type;
  2992. }