clip.cpp 124 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888
  1. // NOTE: This is modified from clip.cpp only for LLaVA,
  2. // so there might be still unnecessary artifacts hanging around
  3. // I'll gradually clean and extend it
  4. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  5. #include "clip.h"
  6. #include "clip-impl.h"
  7. #include "ggml.h"
  8. #include "ggml-cpp.h"
  9. #include "ggml-cpu.h"
  10. #include "ggml-alloc.h"
  11. #include "ggml-backend.h"
  12. #include "gguf.h"
  13. #define STB_IMAGE_IMPLEMENTATION
  14. #include "stb_image.h"
  15. #include <cassert>
  16. #include <cmath>
  17. #include <cstdlib>
  18. #include <cstring>
  19. #include <fstream>
  20. #include <map>
  21. #include <regex>
  22. #include <stdexcept>
  23. #include <unordered_set>
  24. #include <vector>
  25. #include <sstream>
  26. #include <cinttypes>
  27. #include <limits>
  28. #include <array>
  29. struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL};
  30. //#define CLIP_DEBUG_FUNCTIONS
  31. #ifdef CLIP_DEBUG_FUNCTIONS
  32. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  33. std::ofstream file(filename, std::ios::binary);
  34. if (!file.is_open()) {
  35. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  36. return;
  37. }
  38. // PPM header: P6 format, width, height, and max color value
  39. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  40. // Write pixel data
  41. for (size_t i = 0; i < img.buf.size(); i += 3) {
  42. // PPM expects binary data in RGB format, which matches our image buffer
  43. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  44. }
  45. file.close();
  46. }
  47. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  48. std::ofstream file(filename, std::ios::binary);
  49. if (!file.is_open()) {
  50. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  51. return;
  52. }
  53. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  54. int bytesPerPixel = 3;
  55. int widthInBytes = img.nx * bytesPerPixel;
  56. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  57. int stride = widthInBytes + paddingAmount;
  58. // Bitmap file header
  59. unsigned char fileHeader[14] = {
  60. 'B','M', // Signature
  61. 0,0,0,0, // Image file size in bytes
  62. 0,0,0,0, // Reserved
  63. 54,0,0,0 // Start of pixel array
  64. };
  65. // Total file size
  66. fileSize = 54 + (stride * img.ny);
  67. fileHeader[2] = (unsigned char)(fileSize);
  68. fileHeader[3] = (unsigned char)(fileSize >> 8);
  69. fileHeader[4] = (unsigned char)(fileSize >> 16);
  70. fileHeader[5] = (unsigned char)(fileSize >> 24);
  71. // Bitmap information header (BITMAPINFOHEADER)
  72. unsigned char infoHeader[40] = {
  73. 40,0,0,0, // Size of this header (40 bytes)
  74. 0,0,0,0, // Image width
  75. 0,0,0,0, // Image height
  76. 1,0, // Number of color planes
  77. 24,0, // Bits per pixel
  78. 0,0,0,0, // No compression
  79. 0,0,0,0, // Image size (can be 0 for no compression)
  80. 0,0,0,0, // X pixels per meter (not specified)
  81. 0,0,0,0, // Y pixels per meter (not specified)
  82. 0,0,0,0, // Total colors (color table not used)
  83. 0,0,0,0 // Important colors (all are important)
  84. };
  85. // Width and height in the information header
  86. infoHeader[4] = (unsigned char)(img.nx);
  87. infoHeader[5] = (unsigned char)(img.nx >> 8);
  88. infoHeader[6] = (unsigned char)(img.nx >> 16);
  89. infoHeader[7] = (unsigned char)(img.nx >> 24);
  90. infoHeader[8] = (unsigned char)(img.ny);
  91. infoHeader[9] = (unsigned char)(img.ny >> 8);
  92. infoHeader[10] = (unsigned char)(img.ny >> 16);
  93. infoHeader[11] = (unsigned char)(img.ny >> 24);
  94. // Write file headers
  95. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  96. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  97. // Pixel data
  98. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  99. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  100. for (int x = 0; x < img.nx; ++x) {
  101. // Each pixel
  102. size_t pixelIndex = (y * img.nx + x) * 3;
  103. unsigned char pixel[3] = {
  104. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  105. img.buf[pixelIndex + 1],
  106. img.buf[pixelIndex]
  107. };
  108. file.write(reinterpret_cast<char*>(pixel), 3);
  109. }
  110. // Write padding for the row
  111. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  112. }
  113. file.close();
  114. }
  115. // debug function to convert f32 to u8
  116. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  117. dst.nx = src.nx;
  118. dst.ny = src.ny;
  119. dst.buf.resize(3 * src.nx * src.ny);
  120. for (size_t i = 0; i < src.buf.size(); ++i) {
  121. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  122. }
  123. }
  124. #endif
  125. //
  126. // clip layers
  127. //
  128. enum patch_merge_type {
  129. PATCH_MERGE_FLAT,
  130. PATCH_MERGE_SPATIAL_UNPAD,
  131. };
  132. struct clip_hparams {
  133. int32_t image_size;
  134. int32_t patch_size;
  135. int32_t hidden_size;
  136. int32_t n_intermediate;
  137. int32_t projection_dim;
  138. int32_t n_head;
  139. int32_t n_layer;
  140. patch_merge_type mm_patch_merge_type = PATCH_MERGE_FLAT;
  141. float eps;
  142. std::vector<int32_t> image_grid_pinpoints;
  143. int32_t image_crop_resolution;
  144. std::unordered_set<int32_t> vision_feature_layer;
  145. };
  146. struct clip_layer {
  147. // attention
  148. struct ggml_tensor * k_w = nullptr;
  149. struct ggml_tensor * k_b = nullptr;
  150. struct ggml_tensor * q_w = nullptr;
  151. struct ggml_tensor * q_b = nullptr;
  152. struct ggml_tensor * v_w = nullptr;
  153. struct ggml_tensor * v_b = nullptr;
  154. struct ggml_tensor * o_w = nullptr;
  155. struct ggml_tensor * o_b = nullptr;
  156. // layernorm 1
  157. struct ggml_tensor * ln_1_w = nullptr;
  158. struct ggml_tensor * ln_1_b = nullptr;
  159. // ff
  160. struct ggml_tensor * ff_i_w = nullptr;
  161. struct ggml_tensor * ff_i_b = nullptr;
  162. struct ggml_tensor * ff_o_w = nullptr;
  163. struct ggml_tensor * ff_o_b = nullptr;
  164. // layernorm 2
  165. struct ggml_tensor * ln_2_w = nullptr;
  166. struct ggml_tensor * ln_2_b = nullptr;
  167. };
  168. struct clip_vision_model {
  169. struct clip_hparams hparams;
  170. // embeddings
  171. struct ggml_tensor * class_embedding = nullptr;
  172. struct ggml_tensor * patch_embeddings_0 = nullptr;
  173. struct ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
  174. struct ggml_tensor * patch_bias = nullptr;
  175. struct ggml_tensor * position_embeddings = nullptr;
  176. struct ggml_tensor * pre_ln_w = nullptr;
  177. struct ggml_tensor * pre_ln_b = nullptr;
  178. std::vector<clip_layer> layers;
  179. struct ggml_tensor * post_ln_w;
  180. struct ggml_tensor * post_ln_b;
  181. struct ggml_tensor * projection;
  182. // LLaVA projection
  183. struct ggml_tensor * mm_0_w = nullptr;
  184. struct ggml_tensor * mm_0_b = nullptr;
  185. struct ggml_tensor * mm_2_w = nullptr;
  186. struct ggml_tensor * mm_2_b = nullptr;
  187. struct ggml_tensor * image_newline = nullptr;
  188. // Yi type models with mlp+normalization projection
  189. struct ggml_tensor * mm_1_w = nullptr; // Yi type models have 0, 1, 3, 4
  190. struct ggml_tensor * mm_1_b = nullptr;
  191. struct ggml_tensor * mm_3_w = nullptr;
  192. struct ggml_tensor * mm_3_b = nullptr;
  193. struct ggml_tensor * mm_4_w = nullptr;
  194. struct ggml_tensor * mm_4_b = nullptr;
  195. //GLMV-Edge projection
  196. struct ggml_tensor * mm_model_adapter_conv_w = nullptr;
  197. struct ggml_tensor * mm_model_adapter_conv_b = nullptr;
  198. struct ggml_tensor * boi_w = nullptr;
  199. struct ggml_tensor * eoi_w = nullptr;
  200. // MobileVLM projection
  201. struct ggml_tensor * mm_model_mlp_1_w = nullptr;
  202. struct ggml_tensor * mm_model_mlp_1_b = nullptr;
  203. struct ggml_tensor * mm_model_mlp_3_w = nullptr;
  204. struct ggml_tensor * mm_model_mlp_3_b = nullptr;
  205. struct ggml_tensor * mm_model_block_1_block_0_0_w = nullptr;
  206. struct ggml_tensor * mm_model_block_1_block_0_1_w = nullptr;
  207. struct ggml_tensor * mm_model_block_1_block_0_1_b = nullptr;
  208. struct ggml_tensor * mm_model_block_1_block_1_fc1_w = nullptr;
  209. struct ggml_tensor * mm_model_block_1_block_1_fc1_b = nullptr;
  210. struct ggml_tensor * mm_model_block_1_block_1_fc2_w = nullptr;
  211. struct ggml_tensor * mm_model_block_1_block_1_fc2_b = nullptr;
  212. struct ggml_tensor * mm_model_block_1_block_2_0_w = nullptr;
  213. struct ggml_tensor * mm_model_block_1_block_2_1_w = nullptr;
  214. struct ggml_tensor * mm_model_block_1_block_2_1_b = nullptr;
  215. struct ggml_tensor * mm_model_block_2_block_0_0_w = nullptr;
  216. struct ggml_tensor * mm_model_block_2_block_0_1_w = nullptr;
  217. struct ggml_tensor * mm_model_block_2_block_0_1_b = nullptr;
  218. struct ggml_tensor * mm_model_block_2_block_1_fc1_w = nullptr;
  219. struct ggml_tensor * mm_model_block_2_block_1_fc1_b = nullptr;
  220. struct ggml_tensor * mm_model_block_2_block_1_fc2_w = nullptr;
  221. struct ggml_tensor * mm_model_block_2_block_1_fc2_b = nullptr;
  222. struct ggml_tensor * mm_model_block_2_block_2_0_w = nullptr;
  223. struct ggml_tensor * mm_model_block_2_block_2_1_w = nullptr;
  224. struct ggml_tensor * mm_model_block_2_block_2_1_b = nullptr;
  225. // MobileVLM_V2 projection
  226. struct ggml_tensor * mm_model_mlp_0_w = nullptr;
  227. struct ggml_tensor * mm_model_mlp_0_b = nullptr;
  228. struct ggml_tensor * mm_model_mlp_2_w = nullptr;
  229. struct ggml_tensor * mm_model_mlp_2_b = nullptr;
  230. struct ggml_tensor * mm_model_peg_0_w = nullptr;
  231. struct ggml_tensor * mm_model_peg_0_b = nullptr;
  232. // MINICPMV projection
  233. struct ggml_tensor * mm_model_pos_embed_k = nullptr;
  234. struct ggml_tensor * mm_model_query = nullptr;
  235. struct ggml_tensor * mm_model_proj = nullptr;
  236. struct ggml_tensor * mm_model_kv_proj = nullptr;
  237. struct ggml_tensor * mm_model_attn_q_w = nullptr;
  238. struct ggml_tensor * mm_model_attn_q_b = nullptr;
  239. struct ggml_tensor * mm_model_attn_k_w = nullptr;
  240. struct ggml_tensor * mm_model_attn_k_b = nullptr;
  241. struct ggml_tensor * mm_model_attn_v_w = nullptr;
  242. struct ggml_tensor * mm_model_attn_v_b = nullptr;
  243. struct ggml_tensor * mm_model_attn_o_w = nullptr;
  244. struct ggml_tensor * mm_model_attn_o_b = nullptr;
  245. struct ggml_tensor * mm_model_ln_q_w = nullptr;
  246. struct ggml_tensor * mm_model_ln_q_b = nullptr;
  247. struct ggml_tensor * mm_model_ln_kv_w = nullptr;
  248. struct ggml_tensor * mm_model_ln_kv_b = nullptr;
  249. struct ggml_tensor * mm_model_ln_post_w = nullptr;
  250. struct ggml_tensor * mm_model_ln_post_b = nullptr;
  251. // gemma3
  252. struct ggml_tensor * mm_input_proj_w = nullptr;
  253. struct ggml_tensor * mm_soft_emb_norm_w = nullptr;
  254. };
  255. struct clip_ctx {
  256. bool has_text_encoder = false;
  257. bool has_vision_encoder = false;
  258. bool has_llava_projector = false;
  259. bool has_minicpmv_projector = false;
  260. bool has_glm_projector = false;
  261. bool has_qwen2vl_merger = false;
  262. int minicpmv_version = 2;
  263. struct clip_vision_model vision_model;
  264. projector_type proj_type = PROJECTOR_TYPE_MLP;
  265. int32_t max_feature_layer; // unused in newer models like gemma3
  266. float image_mean[3];
  267. float image_std[3];
  268. bool use_gelu = false;
  269. bool use_silu = false;
  270. gguf_context_ptr ctx_gguf;
  271. ggml_context_ptr ctx_data;
  272. std::vector<uint8_t> buf_compute_meta;
  273. std::vector<ggml_backend_t> backend_ptrs;
  274. std::vector<ggml_backend_buffer_type_t> backend_buft;
  275. ggml_backend_t backend;
  276. ggml_backend_t backend_cpu;
  277. ggml_backend_buffer_ptr buf;
  278. ggml_backend_sched_ptr sched;
  279. clip_image_size load_image_size;
  280. clip_ctx(clip_context_params & ctx_params) {
  281. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  282. backend = ctx_params.use_gpu
  283. ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
  284. : nullptr;
  285. if (backend) {
  286. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  287. backend_ptrs.push_back(backend);
  288. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  289. } else {
  290. backend = backend_cpu;
  291. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  292. }
  293. backend_ptrs.push_back(backend_cpu);
  294. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  295. sched.reset(
  296. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false)
  297. );
  298. }
  299. ~clip_ctx() {
  300. ggml_backend_free(backend);
  301. if (backend != backend_cpu) {
  302. ggml_backend_free(backend_cpu);
  303. }
  304. }
  305. };
  306. static ggml_cgraph * clip_image_build_graph_siglip(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  307. const auto & model = ctx->vision_model;
  308. const auto & hparams = model.hparams;
  309. const int image_size = hparams.image_size;
  310. int image_size_width = image_size;
  311. int image_size_height = image_size;
  312. const int patch_size = hparams.patch_size;
  313. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  314. const int hidden_size = hparams.hidden_size;
  315. const int n_head = hparams.n_head;
  316. const int d_head = hidden_size / n_head;
  317. const int n_layer = hparams.n_layer;
  318. const float eps = hparams.eps;
  319. GGML_ASSERT(imgs.entries.size() == 1); // batch_size == 1
  320. struct ggml_init_params params = {
  321. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  322. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  323. /*.no_alloc =*/ true,
  324. };
  325. ggml_context_ptr ctx0_ptr(ggml_init(params));
  326. auto ctx0 = ctx0_ptr.get();
  327. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  328. // input raw
  329. struct ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3);
  330. ggml_set_name(inp_raw, "inp_raw");
  331. ggml_set_input(inp_raw);
  332. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  333. inp = ggml_reshape_2d(ctx0, inp, num_patches, hidden_size);
  334. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  335. inp = ggml_add(ctx0, inp, model.patch_bias);
  336. // position embeddings
  337. struct ggml_tensor * embeddings = ggml_add(ctx0, inp, model.position_embeddings);
  338. // loop over layers
  339. for (int il = 0; il < n_layer; il++) {
  340. struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
  341. // layernorm1
  342. {
  343. cur = ggml_norm(ctx0, cur, eps);
  344. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), model.layers[il].ln_1_b);
  345. }
  346. // self-attention
  347. {
  348. struct ggml_tensor * Q =
  349. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
  350. Q = ggml_reshape_3d(ctx0, Q, d_head, n_head, num_patches);
  351. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  352. struct ggml_tensor * K =
  353. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
  354. K = ggml_reshape_3d(ctx0, K, d_head, n_head, num_patches);
  355. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  356. struct ggml_tensor * V =
  357. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
  358. V = ggml_reshape_3d(ctx0, V, d_head, n_head, num_patches);
  359. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  360. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  361. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  362. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  363. KQV = ggml_reshape_3d(ctx0, KQV, d_head, num_patches, n_head);
  364. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  365. cur = ggml_cont_2d(ctx0, KQV, hidden_size, num_patches);
  366. }
  367. // attention output
  368. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
  369. // re-add the layer input, e.g., residual
  370. cur = ggml_add(ctx0, cur, embeddings);
  371. embeddings = cur; // embeddings = residual, cur = hidden_states
  372. // layernorm2
  373. {
  374. cur = ggml_norm(ctx0, cur, eps);
  375. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
  376. }
  377. cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
  378. cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
  379. // siglip uses gelu
  380. cur = ggml_gelu(ctx0, cur);
  381. cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
  382. cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
  383. // residual 2
  384. cur = ggml_add(ctx0, embeddings, cur);
  385. embeddings = cur;
  386. }
  387. // post-layernorm
  388. if (model.post_ln_w) {
  389. embeddings = ggml_norm(ctx0, embeddings, eps);
  390. ggml_set_name(embeddings, "post_ln");
  391. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
  392. }
  393. if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  394. const int batch_size = 1;
  395. const int mm_tokens_per_image = 256; // default value for gemma3
  396. const int tokens_per_side = sqrt(mm_tokens_per_image);
  397. const int patches_per_image = sqrt(num_patches);
  398. const int kernel_size = patches_per_image / tokens_per_side;
  399. embeddings = ggml_cont(ctx0, ggml_transpose(ctx0, embeddings));
  400. embeddings = ggml_reshape_4d(ctx0, embeddings, patches_per_image, patches_per_image, hidden_size, batch_size);
  401. // doing a pool2d to reduce the number of output tokens to 256
  402. embeddings = ggml_pool_2d(ctx0, embeddings, GGML_OP_POOL_AVG, kernel_size, kernel_size, kernel_size, kernel_size, 0, 0);
  403. embeddings = ggml_reshape_3d(ctx0, embeddings, embeddings->ne[0] * embeddings->ne[0], hidden_size, batch_size);
  404. embeddings = ggml_cont(ctx0, ggml_transpose(ctx0, embeddings));
  405. // apply norm before projection
  406. embeddings = ggml_rms_norm(ctx0, embeddings, eps);
  407. embeddings = ggml_mul(ctx0, embeddings, model.mm_soft_emb_norm_w);
  408. // apply projection
  409. embeddings = ggml_mul_mat(ctx0,
  410. ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
  411. embeddings);
  412. }
  413. // build the graph
  414. ggml_build_forward_expand(gf, embeddings);
  415. return gf;
  416. }
  417. static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_image_f32_batch & imgs, struct clip_image_size load_image_size, bool is_inf = false) {
  418. if (!ctx->has_vision_encoder) {
  419. LOG_ERR("This gguf file seems to have no vision encoder\n");
  420. return nullptr;
  421. }
  422. const auto & model = ctx->vision_model;
  423. const auto & hparams = model.hparams;
  424. const int image_size = hparams.image_size;
  425. int image_size_width = image_size;
  426. int image_size_height = image_size;
  427. if (ctx->has_minicpmv_projector) {
  428. LOG_DBG("%s: %d %d\n", __func__, load_image_size.width, load_image_size.height);
  429. image_size_width = load_image_size.width;
  430. image_size_height = load_image_size.height;
  431. if (is_inf) {
  432. image_size_width = imgs.entries[0]->nx;
  433. image_size_height = imgs.entries[0]->ny;
  434. }
  435. }
  436. else if (ctx->has_qwen2vl_merger) {
  437. // use the image's native resolution when image is avaible
  438. if (is_inf) {
  439. // if (imgs->data->nx && imgs->data->ny) {
  440. image_size_width = imgs.entries[0]->nx;
  441. image_size_height = imgs.entries[0]->ny;
  442. }
  443. }
  444. const int patch_size = hparams.patch_size;
  445. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  446. const int patches_w = image_size_width / patch_size;
  447. const int patches_h = image_size_height / patch_size;
  448. const int num_positions = num_patches + (model.class_embedding ? 1 : 0);
  449. const int num_position_ids = ctx->has_qwen2vl_merger ? num_positions * 4 : num_positions;
  450. const int hidden_size = hparams.hidden_size;
  451. const int n_head = hparams.n_head;
  452. const int d_head = hidden_size / n_head;
  453. const float eps = hparams.eps;
  454. int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
  455. const int batch_size = imgs.entries.size();
  456. if (ctx->has_llava_projector || ctx->has_minicpmv_projector || ctx->has_glm_projector) {
  457. GGML_ASSERT(batch_size == 1);
  458. }
  459. struct ggml_init_params params = {
  460. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  461. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  462. /*.no_alloc =*/ true,
  463. };
  464. ggml_context_ptr ctx0_ptr(ggml_init(params));
  465. auto ctx0 = ctx0_ptr.get();
  466. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  467. struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size);
  468. ggml_set_name(inp_raw, "inp_raw");
  469. ggml_set_input(inp_raw);
  470. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  471. if (ctx->has_qwen2vl_merger) {
  472. GGML_ASSERT(image_size_width % (patch_size * 2) == 0);
  473. GGML_ASSERT(image_size_height % (patch_size * 2) == 0);
  474. auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  475. inp = ggml_add(ctx0, inp, inp_1);
  476. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
  477. inp = ggml_reshape_4d(
  478. ctx0, inp,
  479. hidden_size * 2, patches_w / 2, patches_h, batch_size);
  480. inp = ggml_reshape_4d(
  481. ctx0, inp,
  482. hidden_size * 2, patches_w / 2, 2, batch_size * (patches_h / 2));
  483. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3));
  484. inp = ggml_reshape_3d(
  485. ctx0, inp,
  486. hidden_size, patches_w * patches_h, batch_size);
  487. }
  488. else {
  489. inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
  490. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
  491. }
  492. if (model.patch_bias) {
  493. // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
  494. inp = ggml_add(ctx0, inp, model.patch_bias);
  495. }
  496. struct ggml_tensor * embeddings = inp;
  497. struct ggml_tensor * pos_embed = nullptr;
  498. if (ctx->has_llava_projector) {
  499. // concat class_embeddings and patch_embeddings
  500. if (model.class_embedding) {
  501. embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
  502. ggml_set_name(embeddings, "embeddings");
  503. ggml_set_input(embeddings);
  504. embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
  505. embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
  506. embeddings = ggml_acc(ctx0, embeddings, inp,
  507. embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
  508. }
  509. }
  510. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
  511. ggml_set_name(positions, "positions");
  512. ggml_set_input(positions);
  513. if (!ctx->has_qwen2vl_merger) { // qwen2vl use rope position embedding
  514. embeddings =
  515. ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
  516. }
  517. if (ctx->has_minicpmv_projector) {
  518. int pos_w = image_size_width/patch_size;
  519. int pos_h = image_size_height/patch_size;
  520. if (ctx->minicpmv_version == 2) {
  521. pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1);
  522. }
  523. else if (ctx->minicpmv_version == 3) {
  524. pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1);
  525. }
  526. else if (ctx->minicpmv_version == 4) {
  527. pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1);
  528. }
  529. ggml_set_name(pos_embed, "pos_embed");
  530. ggml_set_input(pos_embed);
  531. }
  532. // pre-layernorm
  533. if (model.pre_ln_w) {
  534. embeddings = ggml_norm(ctx0, embeddings, eps);
  535. ggml_set_name(embeddings, "pre_ln");
  536. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
  537. }
  538. std::vector<struct ggml_tensor *> embedding_stack;
  539. const auto & vision_feature_layer = hparams.vision_feature_layer;
  540. // loop over layers
  541. for (int il = 0; il < ctx->max_feature_layer; il++) {
  542. struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
  543. // If this is an embedding feature layer, save the output.
  544. // NOTE: 0 index here refers to the input to the encoder.
  545. if (vision_feature_layer.find(il) != vision_feature_layer.end()) {
  546. embedding_stack.push_back(embeddings);
  547. }
  548. //const size_t nb_q_w = model.layers[il].q_w->nb[0];
  549. // layernorm1
  550. {
  551. cur = ggml_norm(ctx0, cur, eps);
  552. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
  553. model.layers[il].ln_1_b);
  554. }
  555. // self-attention
  556. {
  557. struct ggml_tensor * Q =
  558. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
  559. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
  560. if (ctx->has_qwen2vl_merger) {
  561. Q = ggml_rope_multi(
  562. ctx0, Q, positions, nullptr,
  563. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  564. }
  565. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  566. Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
  567. struct ggml_tensor * K =
  568. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
  569. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  570. if (ctx->has_qwen2vl_merger) {
  571. K = ggml_rope_multi(
  572. ctx0, K, positions, nullptr,
  573. d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
  574. }
  575. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  576. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  577. struct ggml_tensor * V =
  578. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
  579. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  580. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  581. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  582. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  583. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  584. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  585. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
  586. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  587. cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size);
  588. }
  589. // attention output
  590. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
  591. // re-add the layer input, e.g., residual
  592. cur = ggml_add(ctx0, cur, embeddings);
  593. embeddings = cur; // embeddings = residual, cur = hidden_states
  594. // layernorm2
  595. {
  596. cur = ggml_norm(ctx0, cur, eps);
  597. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
  598. }
  599. cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
  600. cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
  601. if (ctx->use_gelu) {
  602. cur = ggml_gelu_inplace(ctx0, cur);
  603. } else if (ctx->use_silu) {
  604. cur = ggml_silu_inplace(ctx0, cur);
  605. } else {
  606. cur = ggml_gelu_quick_inplace(ctx0, cur);
  607. }
  608. cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
  609. cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
  610. // residual 2
  611. cur = ggml_add(ctx0, embeddings, cur);
  612. embeddings = cur;
  613. }
  614. // post-layernorm
  615. if (model.post_ln_w) {
  616. embeddings = ggml_norm(ctx0, embeddings, eps);
  617. ggml_set_name(embeddings, "post_ln");
  618. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
  619. }
  620. // final layer is a vision feature layer
  621. if (vision_feature_layer.find(ctx->max_feature_layer) != vision_feature_layer.end()) {
  622. embedding_stack.push_back(embeddings);
  623. }
  624. // If feature layers are explicitly set, stack them (if we have multiple)
  625. if (!embedding_stack.empty()) {
  626. embeddings = embedding_stack[0];
  627. for (size_t i = 1; i < embedding_stack.size(); i++) {
  628. embeddings = ggml_concat(ctx0, embeddings, embedding_stack[i], 0);
  629. }
  630. }
  631. // llava projector
  632. if (ctx->has_llava_projector) {
  633. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  634. struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
  635. ggml_set_name(patches, "patches");
  636. ggml_set_input(patches);
  637. // shape [1, 576, 1024]
  638. // ne is whcn, ne = [1024, 576, 1, 1]
  639. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  640. // print_tensor_info(embeddings, "embeddings");
  641. // llava projector
  642. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  643. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  644. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  645. embeddings = ggml_gelu(ctx0, embeddings);
  646. if (model.mm_2_w) {
  647. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  648. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  649. }
  650. }
  651. else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  652. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  653. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  654. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  655. // First LayerNorm
  656. embeddings = ggml_norm(ctx0, embeddings, eps);
  657. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  658. model.mm_1_b);
  659. // GELU activation
  660. embeddings = ggml_gelu(ctx0, embeddings);
  661. // Second linear layer
  662. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  663. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  664. // Second LayerNorm
  665. embeddings = ggml_norm(ctx0, embeddings, eps);
  666. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  667. model.mm_4_b);
  668. }
  669. else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  670. // MobileVLM projector
  671. int n_patch = 24;
  672. struct ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  673. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  674. mlp_1 = ggml_gelu(ctx0, mlp_1);
  675. struct ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  676. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  677. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  678. // block 1
  679. struct ggml_tensor * block_1 = nullptr;
  680. {
  681. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  682. mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
  683. mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  684. // stride = 1, padding = 1, bias is nullptr
  685. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  686. // layer norm
  687. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  688. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  689. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  690. block_1 = ggml_norm(ctx0, block_1, eps);
  691. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  692. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  693. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  694. // hardswish
  695. struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  696. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  697. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  698. // pointwise conv
  699. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  700. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  701. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  702. block_1 = ggml_relu(ctx0, block_1);
  703. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  704. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  705. block_1 = ggml_hardsigmoid(ctx0, block_1);
  706. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  707. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  708. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  709. int w = block_1->ne[0], h = block_1->ne[1];
  710. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  711. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  712. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  713. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  714. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  715. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  716. block_1 = ggml_norm(ctx0, block_1, eps);
  717. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  718. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  719. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  720. // residual
  721. block_1 = ggml_add(ctx0, mlp_3, block_1);
  722. }
  723. // block_2
  724. {
  725. // stride = 2
  726. block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  727. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  728. // layer norm
  729. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  730. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  731. block_1 = ggml_norm(ctx0, block_1, eps);
  732. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  733. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  734. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  735. // hardswish
  736. struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  737. // not sure the parameters is right for globalAvgPooling
  738. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  739. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  740. // pointwise conv
  741. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  742. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  743. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  744. block_1 = ggml_relu(ctx0, block_1);
  745. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  746. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  747. block_1 = ggml_hardsigmoid(ctx0, block_1);
  748. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  749. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  750. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  751. int w = block_1->ne[0], h = block_1->ne[1];
  752. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  753. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  754. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  755. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  756. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  757. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  758. block_1 = ggml_norm(ctx0, block_1, eps);
  759. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  760. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  761. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  762. }
  763. embeddings = block_1;
  764. }
  765. else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
  766. {
  767. int n_patch = 24;
  768. struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  769. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  770. mlp_0 = ggml_gelu(ctx0, mlp_0);
  771. struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  772. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  773. // mlp_2 ne = [2048, 576, 1, 1]
  774. // // AVG Pool Layer 2*2, strides = 2
  775. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
  776. // mlp_2 ne = [576, 2048, 1, 1]
  777. mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  778. // mlp_2 ne [24, 24, 2048, 1]
  779. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  780. // weight ne = [3, 3, 2048, 1]
  781. struct ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  782. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  783. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  784. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  785. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  786. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  787. embeddings = peg_0;
  788. }
  789. else {
  790. GGML_ABORT("fatal error");
  791. }
  792. }
  793. // minicpmv projector
  794. else if (ctx->has_minicpmv_projector)
  795. {
  796. if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  797. struct ggml_tensor * q = model.mm_model_query;
  798. { // layernorm
  799. q = ggml_norm(ctx0, q, eps);
  800. q = ggml_add(ctx0, ggml_mul(ctx0, q, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  801. }
  802. struct ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  803. { // layernorm
  804. v = ggml_norm(ctx0, v, eps);
  805. v = ggml_add(ctx0, ggml_mul(ctx0, v, model.mm_model_ln_kv_w), model.mm_model_ln_kv_b);
  806. }
  807. struct ggml_tensor * k;
  808. { // position
  809. // q = ggml_add(ctx0, q, model.mm_model_pos_embed);
  810. k = ggml_add(ctx0, v, pos_embed);
  811. }
  812. { // attention
  813. int hidden_size = 4096;
  814. const int d_head = 128;
  815. int n_head = hidden_size/d_head;
  816. int num_query = 96;
  817. if (ctx->minicpmv_version == 2) {
  818. hidden_size = 4096;
  819. n_head = hidden_size/d_head;
  820. num_query = 96;
  821. }
  822. else if (ctx->minicpmv_version == 3) {
  823. hidden_size = 3584;
  824. n_head = hidden_size/d_head;
  825. num_query = 64;
  826. }
  827. else if (ctx->minicpmv_version == 4) {
  828. hidden_size = 3584;
  829. n_head = hidden_size/d_head;
  830. num_query = 64;
  831. }
  832. struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q), model.mm_model_attn_q_b);
  833. struct ggml_tensor * K = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k), model.mm_model_attn_k_b);
  834. struct ggml_tensor * V = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v), model.mm_model_attn_v_b);
  835. // permute
  836. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_query, batch_size);
  837. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  838. Q = ggml_reshape_3d(ctx0, Q, d_head, num_query, n_head * batch_size);
  839. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  840. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  841. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  842. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  843. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  844. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  845. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  846. KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
  847. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  848. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_query, n_head, batch_size);
  849. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  850. KQV = ggml_cont_3d(ctx0, KQV, hidden_size, num_query, batch_size);
  851. embeddings = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_o_w, KQV), model.mm_model_attn_o_b);
  852. }
  853. { // layernorm
  854. embeddings = ggml_norm(ctx0, embeddings, eps);
  855. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_post_w), model.mm_model_ln_post_b);
  856. }
  857. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  858. }
  859. else {
  860. GGML_ASSERT(false);
  861. }
  862. }
  863. // glm projector
  864. else if (ctx->has_glm_projector) {
  865. if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  866. size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
  867. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
  868. embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
  869. embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
  870. embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
  871. embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
  872. embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
  873. //GLU
  874. {
  875. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  876. embeddings = ggml_norm(ctx0, embeddings, eps);
  877. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  878. embeddings = ggml_gelu_inplace(ctx0, embeddings);
  879. struct ggml_tensor * x = embeddings;
  880. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
  881. x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
  882. embeddings = ggml_silu_inplace(ctx0, embeddings);
  883. embeddings = ggml_mul(ctx0, embeddings,x);
  884. embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
  885. }
  886. } else {
  887. GGML_ABORT("fatal error");
  888. }
  889. }
  890. else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) {
  891. embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size);
  892. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  893. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  894. // GELU activation
  895. embeddings = ggml_gelu(ctx0, embeddings);
  896. // Second linear layer
  897. embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings);
  898. embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
  899. }
  900. // build the graph
  901. ggml_build_forward_expand(gf, embeddings);
  902. return gf;
  903. }
  904. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs, struct clip_image_size load_image_size, bool is_inf = false) {
  905. if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  906. return clip_image_build_graph_siglip(ctx, imgs);
  907. } else {
  908. // TODO: we should have one build_* function per model
  909. return clip_image_build_graph_legacy(ctx, imgs, load_image_size, is_inf);
  910. }
  911. }
  912. struct clip_model_loader {
  913. ggml_context_ptr ctx_meta;
  914. gguf_context_ptr ctx_gguf;
  915. clip_ctx & ctx_clip;
  916. std::string fname;
  917. size_t model_size; // in bytes
  918. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model
  919. clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) {
  920. struct ggml_context * meta = nullptr;
  921. struct gguf_init_params params = {
  922. /*.no_alloc = */ true,
  923. /*.ctx = */ &meta,
  924. };
  925. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  926. if (!ctx_gguf.get()) {
  927. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  928. }
  929. ctx_meta.reset(meta);
  930. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  931. // print gguf info
  932. {
  933. std::string name;
  934. get_string(KEY_NAME, name, false);
  935. std::string description;
  936. get_string(KEY_DESCRIPTION, description, false);
  937. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  938. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  939. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  940. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  941. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  942. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  943. LOG_INF("\n");
  944. }
  945. // tensors
  946. {
  947. for (int i = 0; i < n_tensors; ++i) {
  948. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  949. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  950. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  951. struct ggml_tensor * cur = ggml_get_tensor(meta, name);
  952. size_t tensor_size = ggml_nbytes(cur);
  953. model_size += tensor_size;
  954. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  955. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  956. }
  957. }
  958. }
  959. void load_hparams() {
  960. // projector type
  961. {
  962. std::string proj_type;
  963. get_string(KEY_PROJ_TYPE, proj_type, false);
  964. if (!proj_type.empty()) {
  965. ctx_clip.proj_type = clip_projector_type_from_string(proj_type);
  966. }
  967. if (ctx_clip.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  968. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  969. }
  970. }
  971. // other hparams
  972. {
  973. get_bool(KEY_HAS_TEXT_ENC, ctx_clip.has_text_encoder, false);
  974. get_bool(KEY_HAS_VIS_ENC, ctx_clip.has_vision_encoder, false);
  975. GGML_ASSERT(ctx_clip.has_vision_encoder);
  976. GGML_ASSERT(!ctx_clip.has_text_encoder);
  977. // legacy keys, use KEY_PROJ_TYPE instead
  978. get_bool(KEY_HAS_LLAVA_PROJ, ctx_clip.has_llava_projector, false);
  979. get_bool(KEY_HAS_MINICPMV_PROJ, ctx_clip.has_minicpmv_projector, false);
  980. get_i32(KEY_MINICPMV_VERSION, ctx_clip.minicpmv_version, false);
  981. get_bool(KEY_HAS_GLM_PROJ, ctx_clip.has_glm_projector, false);
  982. get_bool(KEY_HAS_QWEN2VL_MERGER, ctx_clip.has_qwen2vl_merger, false);
  983. // !!! do NOT extend the list above, use KEY_PROJ_TYPE instead
  984. get_bool(KEY_USE_GELU, ctx_clip.use_gelu, false);
  985. get_bool(KEY_USE_SILU, ctx_clip.use_silu, false);
  986. auto & hparams = ctx_clip.vision_model.hparams;
  987. get_u32(string_format(KEY_N_EMBD, "vision"), hparams.hidden_size);
  988. get_u32(string_format(KEY_N_HEAD, "vision"), hparams.n_head);
  989. get_u32(string_format(KEY_N_FF, "vision"), hparams.n_intermediate);
  990. get_u32(string_format(KEY_N_BLOCK, "vision"), hparams.n_layer);
  991. get_u32(string_format(KEY_PROJ_DIM, "vision"), hparams.projection_dim);
  992. get_f32(string_format(KEY_LAYER_NORM_EPS, "vision"), hparams.eps);
  993. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  994. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  995. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  996. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false);
  997. {
  998. std::string mm_patch_merge_type;
  999. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  1000. if (mm_patch_merge_type == "spatial_unpad") {
  1001. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  1002. }
  1003. }
  1004. {
  1005. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  1006. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  1007. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  1008. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  1009. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  1010. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  1011. for (int i = 0; i < 3; ++i) {
  1012. ctx_clip.image_mean[i] = mean_data[i];
  1013. ctx_clip.image_std[i] = std_data[i];
  1014. }
  1015. }
  1016. // Load the vision feature layer indices if they are explicitly provided;
  1017. // if multiple vision feature layers are present, the values will be concatenated
  1018. // to form the final visual features.
  1019. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  1020. // be non-negative, since we use -1 to mark values as unset here.
  1021. std::vector<int> vision_feature_layer;
  1022. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  1023. // convert std::vector to std::unordered_set
  1024. for (auto & layer : vision_feature_layer) {
  1025. hparams.vision_feature_layer.insert(layer);
  1026. }
  1027. // Calculate the deepest feature layer based on hparams and projector type
  1028. ctx_clip.max_feature_layer = get_deepest_feature_layer(&ctx_clip);
  1029. LOG_INF("%s: text_encoder: %d\n", __func__, ctx_clip.has_text_encoder);
  1030. LOG_INF("%s: vision_encoder: %d\n", __func__, ctx_clip.has_vision_encoder);
  1031. LOG_INF("%s: llava_projector: %d\n", __func__, ctx_clip.has_llava_projector);
  1032. LOG_INF("%s: minicpmv_projector: %d\n", __func__, ctx_clip.has_minicpmv_projector);
  1033. LOG_INF("%s: minicpmv_version: %d\n", __func__, ctx_clip.minicpmv_version);
  1034. LOG_INF("%s: glm_projector: %d\n", __func__, ctx_clip.has_glm_projector);
  1035. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1036. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1037. }
  1038. }
  1039. void load_tensors() {
  1040. std::map<std::string, size_t> tensor_offset;
  1041. std::vector<ggml_tensor *> tensors_to_load;
  1042. // get offsets
  1043. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1044. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1045. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1046. }
  1047. // create data context
  1048. struct ggml_init_params params = {
  1049. /*.mem_size =*/ (gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1050. /*.mem_buffer =*/ NULL,
  1051. /*.no_alloc =*/ true,
  1052. };
  1053. ctx_clip.ctx_data.reset(ggml_init(params));
  1054. if (!ctx_clip.ctx_data) {
  1055. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1056. }
  1057. // helper function
  1058. auto get_tensor = [&](const std::string & name, bool required = true) {
  1059. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1060. if (!cur && required) {
  1061. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1062. }
  1063. if (cur) {
  1064. tensors_to_load.push_back(cur);
  1065. // add tensors to context
  1066. struct ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1067. ggml_set_name(data_tensor, cur->name);
  1068. cur = data_tensor;
  1069. }
  1070. return cur;
  1071. };
  1072. auto & vision_model = ctx_clip.vision_model;
  1073. vision_model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1074. vision_model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, "v", "weight"), false);
  1075. vision_model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, "v", "bias"), false);
  1076. vision_model.post_ln_w = get_tensor(string_format(TN_LN_POST, "v", "weight"), false);
  1077. vision_model.post_ln_b = get_tensor(string_format(TN_LN_POST, "v", "bias"), false);
  1078. vision_model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1079. vision_model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1080. vision_model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1081. if (vision_model.patch_embeddings_1 == nullptr) {
  1082. ctx_clip.has_qwen2vl_merger = false;
  1083. }
  1084. vision_model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, "v"), false);
  1085. // layers
  1086. vision_model.layers.resize(vision_model.hparams.n_layer);
  1087. for (int il = 0; il < vision_model.hparams.n_layer; ++il) {
  1088. auto & layer = vision_model.layers[il];
  1089. layer.k_w = get_tensor(string_format(TN_ATTN_K, "v", il, "weight"));
  1090. layer.q_w = get_tensor(string_format(TN_ATTN_Q, "v", il, "weight"));
  1091. layer.v_w = get_tensor(string_format(TN_ATTN_V, "v", il, "weight"));
  1092. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "weight"));
  1093. layer.ln_1_w = get_tensor(string_format(TN_LN_1, "v", il, "weight"), false);
  1094. layer.ln_2_w = get_tensor(string_format(TN_LN_2, "v", il, "weight"), false);
  1095. layer.ff_i_w = get_tensor(string_format(TN_FFN_DOWN, "v", il, "weight"));
  1096. layer.ff_o_w = get_tensor(string_format(TN_FFN_UP, "v", il, "weight"));
  1097. layer.k_b = get_tensor(string_format(TN_ATTN_K, "v", il, "bias"), false);
  1098. layer.q_b = get_tensor(string_format(TN_ATTN_Q, "v", il, "bias"), false);
  1099. layer.v_b = get_tensor(string_format(TN_ATTN_V, "v", il, "bias"), false);
  1100. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "bias"), false);
  1101. layer.ln_1_b = get_tensor(string_format(TN_LN_1, "v", il, "bias"), false);
  1102. layer.ln_2_b = get_tensor(string_format(TN_LN_2, "v", il, "bias"), false);
  1103. layer.ff_i_b = get_tensor(string_format(TN_FFN_DOWN, "v", il, "bias"), false);
  1104. layer.ff_o_b = get_tensor(string_format(TN_FFN_UP, "v", il, "bias"), false);
  1105. }
  1106. switch (ctx_clip.proj_type) {
  1107. case PROJECTOR_TYPE_MLP:
  1108. case PROJECTOR_TYPE_MLP_NORM:
  1109. {
  1110. // LLaVA projection
  1111. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  1112. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  1113. // Yi-type llava
  1114. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  1115. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1116. // missing in Yi-type llava
  1117. vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  1118. vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1119. // Yi-type llava
  1120. vision_model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  1121. vision_model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  1122. vision_model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  1123. vision_model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  1124. if (vision_model.mm_3_w) {
  1125. // TODO: this is a hack to support Yi-type llava
  1126. ctx_clip.proj_type = PROJECTOR_TYPE_MLP_NORM;
  1127. }
  1128. vision_model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  1129. } break;
  1130. case PROJECTOR_TYPE_LDP:
  1131. {
  1132. // MobileVLM projection
  1133. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1134. vision_model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1135. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1136. vision_model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1137. vision_model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1138. vision_model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1139. vision_model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1140. vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1141. vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1142. vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1143. vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1144. vision_model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1145. vision_model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1146. vision_model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1147. vision_model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1148. vision_model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1149. vision_model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  1150. vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  1151. vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  1152. vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  1153. vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  1154. vision_model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  1155. vision_model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  1156. vision_model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  1157. } break;
  1158. case PROJECTOR_TYPE_LDPV2:
  1159. {
  1160. // MobilVLM_V2 projection
  1161. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1162. vision_model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1163. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1164. vision_model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  1165. vision_model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  1166. vision_model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  1167. } break;
  1168. case PROJECTOR_TYPE_RESAMPLER:
  1169. {
  1170. // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  1171. vision_model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  1172. vision_model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  1173. vision_model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  1174. vision_model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  1175. vision_model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  1176. vision_model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  1177. vision_model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  1178. vision_model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  1179. vision_model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  1180. vision_model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  1181. vision_model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  1182. vision_model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  1183. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  1184. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  1185. vision_model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  1186. vision_model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  1187. vision_model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  1188. vision_model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  1189. } break;
  1190. case PROJECTOR_TYPE_GLM_EDGE:
  1191. {
  1192. vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  1193. vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  1194. vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR,"weight"));
  1195. vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"weight"));
  1196. vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"bias"));
  1197. vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H,"weight"));
  1198. vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE,"weight"));
  1199. vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H,"weight"));
  1200. vision_model.boi_w = get_tensor(TN_GLM_BOI_W);
  1201. vision_model.eoi_w = get_tensor(TN_GLM_EOI_W);
  1202. } break;
  1203. case PROJECTOR_TYPE_MERGER:
  1204. {
  1205. vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1206. vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1207. vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1208. vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1209. } break;
  1210. case PROJECTOR_TYPE_GEMMA3:
  1211. {
  1212. vision_model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  1213. vision_model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  1214. } break;
  1215. default:
  1216. GGML_ASSERT(false && "unknown projector type");
  1217. }
  1218. // load data
  1219. {
  1220. std::vector<uint8_t> read_buf;
  1221. auto fin = std::ifstream(fname, std::ios::binary);
  1222. if (!fin) {
  1223. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  1224. }
  1225. // alloc memory and offload data
  1226. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  1227. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  1228. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  1229. for (auto & t : tensors_to_load) {
  1230. struct ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  1231. const size_t offset = tensor_offset[t->name];
  1232. fin.seekg(offset, std::ios::beg);
  1233. if (!fin) {
  1234. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  1235. }
  1236. size_t num_bytes = ggml_nbytes(cur);
  1237. if (ggml_backend_buft_is_host(buft)) {
  1238. // for the CPU and Metal backend, we can read directly into the tensor
  1239. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  1240. } else {
  1241. // read into a temporary buffer first, then copy to device memory
  1242. read_buf.resize(num_bytes);
  1243. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  1244. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  1245. }
  1246. }
  1247. fin.close();
  1248. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  1249. }
  1250. }
  1251. void alloc_compute_meta() {
  1252. ctx_clip.buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead());
  1253. // create a fake batch
  1254. clip_image_f32_batch batch;
  1255. clip_image_f32_ptr img(clip_image_f32_init());
  1256. clip_image_size image_size;
  1257. image_size.width = clip_get_image_size(&ctx_clip);
  1258. image_size.height = clip_get_image_size(&ctx_clip);
  1259. int n_patches = clip_get_image_size(&ctx_clip) / image_size.width;
  1260. img->nx = n_patches;
  1261. img->ny = n_patches;
  1262. img->buf.resize(n_patches * image_size.width * image_size.height * 3);
  1263. batch.entries.push_back(std::move(img));
  1264. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch, image_size, false);
  1265. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  1266. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  1267. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  1268. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  1269. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  1270. if (size > 1) {
  1271. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  1272. ggml_backend_buft_name(buft),
  1273. size / 1024.0 / 1024.0);
  1274. }
  1275. }
  1276. }
  1277. void get_bool(const std::string & key, bool & output, bool required = true) {
  1278. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1279. if (i < 0) {
  1280. if (required) throw std::runtime_error("Key not found: " + key);
  1281. return;
  1282. }
  1283. output = gguf_get_val_bool(ctx_gguf.get(), i);
  1284. }
  1285. void get_i32(const std::string & key, int & output, bool required = true) {
  1286. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1287. if (i < 0) {
  1288. if (required) throw std::runtime_error("Key not found: " + key);
  1289. return;
  1290. }
  1291. output = gguf_get_val_i32(ctx_gguf.get(), i);
  1292. }
  1293. void get_u32(const std::string & key, int & output, bool required = true) {
  1294. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1295. if (i < 0) {
  1296. if (required) throw std::runtime_error("Key not found: " + key);
  1297. return;
  1298. }
  1299. output = gguf_get_val_u32(ctx_gguf.get(), i);
  1300. }
  1301. void get_f32(const std::string & key, float & output, bool required = true) {
  1302. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1303. if (i < 0) {
  1304. if (required) throw std::runtime_error("Key not found: " + key);
  1305. return;
  1306. }
  1307. output = gguf_get_val_f32(ctx_gguf.get(), i);
  1308. }
  1309. void get_string(const std::string & key, std::string & output, bool required = true) {
  1310. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1311. if (i < 0) {
  1312. if (required) throw std::runtime_error("Key not found: " + key);
  1313. return;
  1314. }
  1315. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  1316. }
  1317. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) {
  1318. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1319. if (i < 0) {
  1320. if (required) throw std::runtime_error("Key not found: " + key);
  1321. return;
  1322. }
  1323. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  1324. output.resize(n);
  1325. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  1326. for (int i = 0; i < n; ++i) {
  1327. output[i] = values[i];
  1328. }
  1329. }
  1330. };
  1331. // read and create ggml_context containing the tensors and their data
  1332. struct clip_ctx * clip_model_load(const char * fname, const int verbosity) {
  1333. return clip_init(fname, clip_context_params{
  1334. /* use_gpu */ true,
  1335. /* verbosity */ static_cast<ggml_log_level>(verbosity),
  1336. });
  1337. }
  1338. struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) {
  1339. g_logger_state.verbosity_thold = ctx_params.verbosity;
  1340. clip_ctx * ctx_clip = new clip_ctx(ctx_params);
  1341. try {
  1342. clip_model_loader loader(fname, *ctx_clip);
  1343. loader.load_hparams();
  1344. loader.load_tensors();
  1345. loader.alloc_compute_meta();
  1346. } catch (const std::exception & e) {
  1347. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  1348. delete ctx_clip;
  1349. return nullptr;
  1350. }
  1351. return ctx_clip;
  1352. }
  1353. void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) {
  1354. ctx_clip->load_image_size = *load_image_size; // copy
  1355. }
  1356. struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip) {
  1357. return &ctx_clip->load_image_size;
  1358. }
  1359. struct clip_image_size * clip_image_size_init() {
  1360. struct clip_image_size * load_image_size = new struct clip_image_size();
  1361. load_image_size->width = 448;
  1362. load_image_size->height = 448;
  1363. return load_image_size;
  1364. }
  1365. struct clip_image_u8 * clip_image_u8_init() {
  1366. return new clip_image_u8();
  1367. }
  1368. struct clip_image_f32 * clip_image_f32_init() {
  1369. return new clip_image_f32();
  1370. }
  1371. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  1372. return new clip_image_f32_batch();
  1373. }
  1374. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  1375. if (nx) *nx = img->nx;
  1376. if (ny) *ny = img->ny;
  1377. return img->buf.data();
  1378. }
  1379. void clip_image_size_free(struct clip_image_size * load_image_size) {
  1380. if (load_image_size == nullptr) {
  1381. return;
  1382. }
  1383. delete load_image_size;
  1384. }
  1385. void clip_image_u8_free(struct clip_image_u8 * img) { if (img) delete img; }
  1386. void clip_image_f32_free(struct clip_image_f32 * img) { if (img) delete img; }
  1387. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { if (batch) delete batch; }
  1388. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { if (batch) delete batch; }
  1389. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  1390. return batch->entries.size();
  1391. }
  1392. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  1393. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1394. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1395. return 0;
  1396. }
  1397. return batch->entries[idx]->nx;
  1398. }
  1399. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  1400. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1401. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1402. return 0;
  1403. }
  1404. return batch->entries[idx]->ny;
  1405. }
  1406. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  1407. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1408. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1409. return nullptr;
  1410. }
  1411. return batch->entries[idx].get();
  1412. }
  1413. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  1414. img->nx = nx;
  1415. img->ny = ny;
  1416. img->buf.resize(3 * nx * ny);
  1417. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  1418. }
  1419. bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
  1420. int nx, ny, nc;
  1421. auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
  1422. if (!data) {
  1423. LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
  1424. return false;
  1425. }
  1426. clip_build_img_from_pixels(data, nx, ny, img);
  1427. stbi_image_free(data);
  1428. return true;
  1429. }
  1430. bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
  1431. int nx, ny, nc;
  1432. auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
  1433. if (!data) {
  1434. LOG_ERR("%s: failed to decode image bytes\n", __func__);
  1435. return false;
  1436. }
  1437. clip_build_img_from_pixels(data, nx, ny, img);
  1438. stbi_image_free(data);
  1439. return true;
  1440. }
  1441. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  1442. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  1443. dst.nx = src.nx;
  1444. dst.ny = src.ny;
  1445. dst.buf.resize(src.buf.size());
  1446. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  1447. for (size_t i = 0; i < src.buf.size(); ++i) {
  1448. int c = i % 3; // rgb
  1449. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  1450. }
  1451. }
  1452. // set of tools to manupulate images
  1453. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  1454. struct image_manipulation {
  1455. // Bilinear resize function
  1456. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  1457. dst.nx = target_width;
  1458. dst.ny = target_height;
  1459. dst.buf.resize(3 * target_width * target_height);
  1460. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  1461. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  1462. for (int y = 0; y < target_height; y++) {
  1463. for (int x = 0; x < target_width; x++) {
  1464. float px = x_ratio * x;
  1465. float py = y_ratio * y;
  1466. int x_floor = static_cast<int>(px);
  1467. int y_floor = static_cast<int>(py);
  1468. float x_lerp = px - x_floor;
  1469. float y_lerp = py - y_floor;
  1470. for (int c = 0; c < 3; c++) {
  1471. float top = lerp(
  1472. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  1473. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  1474. x_lerp
  1475. );
  1476. float bottom = lerp(
  1477. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  1478. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  1479. x_lerp
  1480. );
  1481. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  1482. }
  1483. }
  1484. }
  1485. }
  1486. // Bicubic resize function
  1487. // part of image will be cropped if the aspect ratio is different
  1488. static bool bicubic_resize(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  1489. const int nx = img.nx;
  1490. const int ny = img.ny;
  1491. dst.nx = target_width;
  1492. dst.ny = target_height;
  1493. dst.buf.resize(3 * target_width * target_height);
  1494. float Cc;
  1495. float C[5];
  1496. float d0, d2, d3, a0, a1, a2, a3;
  1497. int i, j, k, jj;
  1498. int x, y;
  1499. float dx, dy;
  1500. float tx, ty;
  1501. tx = (float)nx / (float)target_width;
  1502. ty = (float)ny / (float)target_height;
  1503. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  1504. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  1505. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  1506. for (i = 0; i < target_height; i++) {
  1507. for (j = 0; j < target_width; j++) {
  1508. x = (int)(tx * j);
  1509. y = (int)(ty * i);
  1510. dx = tx * j - x;
  1511. dy = ty * i - y;
  1512. for (k = 0; k < 3; k++) {
  1513. for (jj = 0; jj <= 3; jj++) {
  1514. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1515. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1516. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1517. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1518. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1519. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1520. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1521. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  1522. d0 = C[0] - C[1];
  1523. d2 = C[2] - C[1];
  1524. d3 = C[3] - C[1];
  1525. a0 = C[1];
  1526. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1527. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1528. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1529. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  1530. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  1531. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  1532. }
  1533. }
  1534. }
  1535. }
  1536. return true;
  1537. }
  1538. // llava-1.6 type of resize_and_pad
  1539. // if the ratio is not 1:1, padding with pad_color will be applied
  1540. // pad_color is single channel, default is 0 (black)
  1541. static void resize_and_pad_image(const clip_image_u8 & image, clip_image_u8 & dst, const clip_image_size & target_resolution, std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  1542. int target_width = target_resolution.width;
  1543. int target_height = target_resolution.height;
  1544. float scale_w = static_cast<float>(target_width) / image.nx;
  1545. float scale_h = static_cast<float>(target_height) / image.ny;
  1546. int new_width, new_height;
  1547. if (scale_w < scale_h) {
  1548. new_width = target_width;
  1549. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  1550. } else {
  1551. new_height = target_height;
  1552. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  1553. }
  1554. clip_image_u8 resized_image;
  1555. bicubic_resize(image, resized_image, new_width, new_height);
  1556. clip_image_u8 padded_image;
  1557. padded_image.nx = target_width;
  1558. padded_image.ny = target_height;
  1559. padded_image.buf.resize(3 * target_width * target_height);
  1560. // Fill the padded image with the fill color
  1561. for (size_t i = 0; i < padded_image.buf.size(); i += 3) {
  1562. padded_image.buf[i] = pad_color[0];
  1563. padded_image.buf[i + 1] = pad_color[1];
  1564. padded_image.buf[i + 2] = pad_color[2];
  1565. }
  1566. // Calculate padding offsets
  1567. int pad_x = (target_width - new_width) / 2;
  1568. int pad_y = (target_height - new_height) / 2;
  1569. // Copy the resized image into the center of the padded buffer
  1570. for (int y = 0; y < new_height; ++y) {
  1571. for (int x = 0; x < new_width; ++x) {
  1572. for (int c = 0; c < 3; ++c) {
  1573. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  1574. }
  1575. }
  1576. }
  1577. dst = std::move(padded_image);
  1578. }
  1579. static void crop_image(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  1580. dst.nx = w;
  1581. dst.ny = h;
  1582. dst.buf.resize(3 * w * h);
  1583. for (int i = 0; i < h; ++i) {
  1584. for (int j = 0; j < w; ++j) {
  1585. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  1586. int dst_idx = 3 * (i*w + j);
  1587. dst.buf[dst_idx] = image.buf[src_idx];
  1588. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  1589. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  1590. }
  1591. }
  1592. }
  1593. private:
  1594. static inline int clip(int x, int lower, int upper) {
  1595. return std::max(lower, std::min(x, upper));
  1596. }
  1597. // Linear interpolation between two points
  1598. static inline float lerp(float s, float e, float t) {
  1599. return s + (e - s) * t;
  1600. }
  1601. };
  1602. /**
  1603. * implementation of LLaVA-UHD:
  1604. * - https://arxiv.org/pdf/2403.11703
  1605. * - https://github.com/thunlp/LLaVA-UHD
  1606. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  1607. *
  1608. * overview:
  1609. * - an image always have a single overview (downscaled image)
  1610. * - an image can have 0 or multiple slices, depending on the image size
  1611. * - each slice can then be considered as a separate image
  1612. *
  1613. * for example:
  1614. *
  1615. * [overview] --> [slice 1] --> [slice 2]
  1616. * | |
  1617. * +--> [slice 3] --> [slice 4]
  1618. */
  1619. struct llava_uhd {
  1620. struct slice_coordinates {
  1621. int x;
  1622. int y;
  1623. clip_image_size size;
  1624. };
  1625. struct slice_instructions {
  1626. clip_image_size overview_size; // size of downscaled image
  1627. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  1628. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  1629. std::vector<slice_coordinates> slices;
  1630. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  1631. };
  1632. static int get_max_slices(struct clip_ctx * ctx) {
  1633. if (clip_is_minicpmv(ctx)) {
  1634. return 9;
  1635. }
  1636. return 0;
  1637. }
  1638. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  1639. slice_instructions res;
  1640. const int patch_size = clip_get_patch_size(ctx);
  1641. const int slice_size = clip_get_image_size(ctx);
  1642. const int max_slice_nums = get_max_slices(ctx);
  1643. const int original_width = original_size.width;
  1644. const int original_height = original_size.height;
  1645. const float log_ratio = log((float)original_width / original_height);
  1646. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  1647. const int multiple = fmin(ceil(ratio), max_slice_nums);
  1648. const bool has_slices = (multiple > 1);
  1649. const bool has_pinpoints = !ctx->vision_model.hparams.image_grid_pinpoints.empty();
  1650. if (has_pinpoints) {
  1651. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  1652. auto refine_size = llava_uhd::select_best_resolution(
  1653. ctx->vision_model.hparams.image_grid_pinpoints,
  1654. original_size);
  1655. res.overview_size = clip_image_size{slice_size, slice_size};
  1656. res.refined_size = refine_size;
  1657. res.grid_size = clip_image_size{0, 0};
  1658. res.padding_refined = true;
  1659. for (int y = 0; y < refine_size.height; y += slice_size) {
  1660. for (int x = 0; x < refine_size.width; x += slice_size) {
  1661. slice_coordinates slice;
  1662. slice.x = x;
  1663. slice.y = y;
  1664. slice.size.width = std::min(slice_size, refine_size.width - x);
  1665. slice.size.height = std::min(slice_size, refine_size.height - y);
  1666. res.slices.push_back(slice);
  1667. if (x == 0) {
  1668. res.grid_size.width++;
  1669. }
  1670. }
  1671. res.grid_size.height++;
  1672. }
  1673. return res;
  1674. }
  1675. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  1676. auto best_size = get_best_resize(original_size, slice_size, patch_size, has_slices);
  1677. res.overview_size = best_size;
  1678. if (!has_slices) {
  1679. // skip slicing logic
  1680. res.refined_size = clip_image_size{0, 0};
  1681. res.grid_size = clip_image_size{0, 0};
  1682. } else {
  1683. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  1684. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  1685. res.grid_size = best_grid;
  1686. res.refined_size = refine_size;
  1687. int width = refine_size.width;
  1688. int height = refine_size.height;
  1689. int grid_x = int(width / best_grid.width);
  1690. int grid_y = int(height / best_grid.height);
  1691. for (int patches_y = 0, ic = 0;
  1692. patches_y < refine_size.height && ic < best_grid.height;
  1693. patches_y += grid_y, ic += 1) {
  1694. for (int patches_x = 0, jc = 0;
  1695. patches_x < refine_size.width && jc < best_grid.width;
  1696. patches_x += grid_x, jc += 1) {
  1697. slice_coordinates slice;
  1698. slice.x = patches_x;
  1699. slice.y = patches_y;
  1700. slice.size.width = grid_x;
  1701. slice.size.height = grid_y;
  1702. res.slices.push_back(slice);
  1703. // LOG_INF("slice %d: %d %d %d %d\n", ic, patches_i, patches_j, grid_x, grid_y);
  1704. }
  1705. }
  1706. }
  1707. return res;
  1708. }
  1709. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  1710. std::vector<clip_image_u8_ptr> output;
  1711. // resize to overview size
  1712. clip_image_u8_ptr resized_img(clip_image_u8_init());
  1713. image_manipulation::bicubic_resize(*img, *resized_img, inst.overview_size.width, inst.overview_size.height);
  1714. output.push_back(std::move(resized_img));
  1715. if (inst.slices.empty()) {
  1716. // no slices, just return the resized image
  1717. return output;
  1718. }
  1719. // resize to refined size
  1720. clip_image_u8_ptr refined_img(clip_image_u8_init());
  1721. if (inst.padding_refined) {
  1722. image_manipulation::resize_and_pad_image(*img, *refined_img, inst.refined_size);
  1723. } else {
  1724. image_manipulation::bilinear_resize(*img, *refined_img, inst.refined_size.width, inst.refined_size.height);
  1725. }
  1726. // create slices
  1727. for (const auto & slice : inst.slices) {
  1728. int x = slice.x;
  1729. int y = slice.y;
  1730. int w = slice.size.width;
  1731. int h = slice.size.height;
  1732. clip_image_u8_ptr img_slice(clip_image_u8_init());
  1733. image_manipulation::crop_image(*refined_img, *img_slice, x, y, w, h);
  1734. output.push_back(std::move(img_slice));
  1735. }
  1736. return output;
  1737. }
  1738. private:
  1739. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  1740. int width = original_size.width;
  1741. int height = original_size.height;
  1742. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  1743. float r = static_cast<float>(width) / height;
  1744. height = static_cast<int>(scale_resolution / std::sqrt(r));
  1745. width = static_cast<int>(height * r);
  1746. }
  1747. clip_image_size res;
  1748. res.width = ensure_divide(width, patch_size);
  1749. res.height = ensure_divide(height, patch_size);
  1750. return res;
  1751. }
  1752. /**
  1753. * Selects the best resolution from a list of possible resolutions based on the original size.
  1754. *
  1755. * @param original_size The original size of the image
  1756. * @param possible_resolutions A list of possible resolutions
  1757. * @return The best fit resolution
  1758. */
  1759. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  1760. int original_width = original_size.width;
  1761. int original_height = original_size.height;
  1762. clip_image_size best_fit;
  1763. int max_effective_resolution = 0;
  1764. int min_wasted_resolution = std::numeric_limits<int>::max();
  1765. for (const auto & resolution : possible_resolutions) {
  1766. int width = resolution.width;
  1767. int height = resolution.height;
  1768. float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
  1769. int downscaled_width = static_cast<int>(original_width * scale);
  1770. int downscaled_height = static_cast<int>(original_height * scale);
  1771. int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
  1772. int wasted_resolution = (width * height) - effective_resolution;
  1773. // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
  1774. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
  1775. max_effective_resolution = effective_resolution;
  1776. min_wasted_resolution = wasted_resolution;
  1777. best_fit = resolution;
  1778. }
  1779. }
  1780. return best_fit;
  1781. }
  1782. // used by llava 1.6 with custom list of pinpoints
  1783. static clip_image_size select_best_resolution(const std::vector<int32_t> & pinpoints, const clip_image_size & original_size) {
  1784. std::vector<clip_image_size> possible_resolutions;
  1785. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  1786. possible_resolutions.push_back(clip_image_size{pinpoints[i], pinpoints[i+1]});
  1787. }
  1788. return select_best_resolution(original_size, possible_resolutions);
  1789. }
  1790. static int ensure_divide(int length, int patch_size) {
  1791. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  1792. }
  1793. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  1794. int width = original_size.width;
  1795. int height = original_size.height;
  1796. int grid_x = grid.width;
  1797. int grid_y = grid.height;
  1798. int refine_width = ensure_divide(width, grid_x);
  1799. int refine_height = ensure_divide(height, grid_y);
  1800. clip_image_size grid_size;
  1801. grid_size.width = refine_width / grid_x;
  1802. grid_size.height = refine_height / grid_y;
  1803. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  1804. int best_grid_width = best_grid_size.width;
  1805. int best_grid_height = best_grid_size.height;
  1806. clip_image_size refine_size;
  1807. refine_size.width = best_grid_width * grid_x;
  1808. refine_size.height = best_grid_height * grid_y;
  1809. return refine_size;
  1810. }
  1811. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  1812. std::vector<int> candidate_split_grids_nums;
  1813. for (int i : {multiple - 1, multiple, multiple + 1}) {
  1814. if (i == 1 || i > max_slice_nums) {
  1815. continue;
  1816. }
  1817. candidate_split_grids_nums.push_back(i);
  1818. }
  1819. std::vector<clip_image_size> candidate_grids;
  1820. for (int split_grids_nums : candidate_split_grids_nums) {
  1821. int m = 1;
  1822. while (m <= split_grids_nums) {
  1823. if (split_grids_nums % m == 0) {
  1824. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  1825. }
  1826. ++m;
  1827. }
  1828. }
  1829. clip_image_size best_grid{1, 1};
  1830. float min_error = std::numeric_limits<float>::infinity();
  1831. for (const auto& grid : candidate_grids) {
  1832. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  1833. if (error < min_error) {
  1834. best_grid = grid;
  1835. min_error = error;
  1836. }
  1837. }
  1838. return best_grid;
  1839. }
  1840. };
  1841. // TODO @ngxson : decprecate the load_image_size singleton pattern
  1842. int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
  1843. const auto inst = llava_uhd::get_slice_instructions(ctx_clip, ctx_clip->load_image_size);
  1844. return inst.grid_size.width;
  1845. }
  1846. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  1847. // res_imgs memory is being allocated here, previous allocations will be freed if found
  1848. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  1849. if (!ctx->has_vision_encoder) {
  1850. LOG_ERR("%s: This gguf file seems to have no vision encoder\n", __func__);
  1851. return false;
  1852. }
  1853. clip_image_size original_size{img->nx, img->ny};
  1854. bool pad_to_square = true;
  1855. auto & params = ctx->vision_model.hparams;
  1856. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  1857. if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
  1858. pad_to_square = false;
  1859. }
  1860. if (clip_is_minicpmv(ctx)) {
  1861. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  1862. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  1863. for (size_t i = 0; i < imgs.size(); ++i) {
  1864. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  1865. clip_image_f32_ptr res(clip_image_f32_init());
  1866. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  1867. res_imgs->entries.push_back(std::move(res));
  1868. }
  1869. return true;
  1870. }
  1871. else if (ctx->has_qwen2vl_merger) {
  1872. clip_image_u8 resized;
  1873. auto patch_size = clip_get_patch_size(ctx) * 2;
  1874. int nx = ceil((float)img->nx / patch_size) * patch_size;
  1875. int ny = ceil((float)img->ny / patch_size) * patch_size;
  1876. image_manipulation::bicubic_resize(*img, resized, nx, ny);
  1877. clip_image_f32_ptr img_f32(clip_image_f32_init());
  1878. // clip_image_f32_ptr res(clip_image_f32_init());
  1879. normalize_image_u8_to_f32(resized, *img_f32, ctx->image_mean, ctx->image_std);
  1880. // res_imgs->data[0] = *res;
  1881. res_imgs->entries.push_back(std::move(img_f32));
  1882. return true;
  1883. }
  1884. if (ctx->has_glm_projector || ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  1885. clip_image_u8 resized_image;
  1886. int sz = params.image_size;
  1887. image_manipulation::bicubic_resize(*img, resized_image, sz, sz);
  1888. clip_image_f32_ptr img_f32(clip_image_f32_init());
  1889. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  1890. normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
  1891. res_imgs->entries.push_back(std::move(img_f32));
  1892. return true;
  1893. }
  1894. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  1895. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  1896. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  1897. if (pad_to_square) {
  1898. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  1899. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  1900. const int longer_side = std::max(img->nx, img->ny);
  1901. temp->nx = longer_side;
  1902. temp->ny = longer_side;
  1903. temp->buf.resize(3 * longer_side * longer_side);
  1904. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  1905. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  1906. // resize the image to the target_size
  1907. image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
  1908. clip_image_f32_ptr res(clip_image_f32_init());
  1909. normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
  1910. res_imgs->entries.push_back(std::move(res));
  1911. return true;
  1912. } else if (!params.image_grid_pinpoints.empty()) {
  1913. // "spatial_unpad" with "anyres" processing for llava-1.6
  1914. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  1915. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  1916. for (size_t i = 0; i < imgs.size(); ++i) {
  1917. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  1918. clip_image_f32_ptr res(clip_image_f32_init());
  1919. normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
  1920. res_imgs->entries.push_back(std::move(res));
  1921. }
  1922. return true;
  1923. }
  1924. GGML_ASSERT(false && "Unknown image preprocessing type");
  1925. }
  1926. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  1927. return ctx->vision_model.image_newline;
  1928. }
  1929. void clip_free(clip_ctx * ctx) {
  1930. if (ctx == nullptr) {
  1931. return;
  1932. }
  1933. delete ctx;
  1934. }
  1935. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  1936. int extra_tokens = ctx->has_glm_projector ? 2 : 0;
  1937. return (clip_n_patches(ctx) + extra_tokens) * clip_n_mmproj_embd(ctx) * sizeof(float);
  1938. }
  1939. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_h, int img_w) {
  1940. clip_image_f32 img;
  1941. img.nx = img_w;
  1942. img.ny = img_h;
  1943. return clip_n_patches_by_img(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  1944. }
  1945. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  1946. return ctx->vision_model.hparams.image_size;
  1947. }
  1948. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  1949. return ctx->vision_model.hparams.patch_size;
  1950. }
  1951. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  1952. return ctx->vision_model.hparams.hidden_size;
  1953. }
  1954. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  1955. return ctx->vision_model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  1956. }
  1957. const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
  1958. if (ctx->vision_model.hparams.image_grid_pinpoints.size()) {
  1959. return &ctx->vision_model.hparams.image_grid_pinpoints.front();
  1960. }
  1961. return nullptr;
  1962. }
  1963. size_t get_clip_image_grid_size(const struct clip_ctx * ctx) {
  1964. return ctx->vision_model.hparams.image_grid_pinpoints.size();
  1965. }
  1966. int clip_n_patches(const struct clip_ctx * ctx) {
  1967. clip_image_f32 img;
  1968. img.nx = ctx->vision_model.hparams.image_size;
  1969. img.ny = ctx->vision_model.hparams.image_size;
  1970. return clip_n_patches_by_img(ctx, &img);
  1971. }
  1972. int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  1973. const auto & params = ctx->vision_model.hparams;
  1974. int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
  1975. if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
  1976. n_patches /= 4;
  1977. } else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  1978. if (ctx->minicpmv_version == 2) {
  1979. n_patches = 96;
  1980. }
  1981. else if (ctx->minicpmv_version == 3) {
  1982. n_patches = 64;
  1983. }
  1984. else if (ctx->minicpmv_version == 4) {
  1985. n_patches = 64;
  1986. }
  1987. } else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) {
  1988. int patch_size = params.patch_size * 2;
  1989. int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
  1990. int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
  1991. n_patches = x_patch * y_patch;
  1992. } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  1993. n_patches = 256;
  1994. }
  1995. return n_patches;
  1996. }
  1997. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  1998. assert(embed_dim % 2 == 0);
  1999. int H = pos.size();
  2000. int W = pos[0].size();
  2001. std::vector<float> omega(embed_dim / 2);
  2002. for (int i = 0; i < embed_dim / 2; ++i) {
  2003. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  2004. }
  2005. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2006. for (int h = 0; h < H; ++h) {
  2007. for (int w = 0; w < W; ++w) {
  2008. for (int d = 0; d < embed_dim / 2; ++d) {
  2009. float out_value = pos[h][w] * omega[d];
  2010. emb[h][w][d] = sin(out_value);
  2011. emb[h][w][d + embed_dim / 2] = cos(out_value);
  2012. }
  2013. }
  2014. }
  2015. return emb;
  2016. }
  2017. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  2018. assert(embed_dim % 2 == 0);
  2019. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  2020. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  2021. int H = emb_h.size();
  2022. int W = emb_h[0].size();
  2023. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  2024. for (int h = 0; h < H; ++h) {
  2025. for (int w = 0; w < W; ++w) {
  2026. for (int d = 0; d < embed_dim / 2; ++d) {
  2027. emb[h][w][d] = emb_h[h][w][d];
  2028. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  2029. }
  2030. }
  2031. }
  2032. return emb;
  2033. }
  2034. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  2035. int grid_h_size = image_size.first;
  2036. int grid_w_size = image_size.second;
  2037. std::vector<float> grid_h(grid_h_size);
  2038. std::vector<float> grid_w(grid_w_size);
  2039. for (int i = 0; i < grid_h_size; ++i) {
  2040. grid_h[i] = static_cast<float>(i);
  2041. }
  2042. for (int i = 0; i < grid_w_size; ++i) {
  2043. grid_w[i] = static_cast<float>(i);
  2044. }
  2045. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  2046. for (int h = 0; h < grid_h_size; ++h) {
  2047. for (int w = 0; w < grid_w_size; ++w) {
  2048. grid[h][w] = grid_w[w];
  2049. }
  2050. }
  2051. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  2052. for (int h = 0; h < grid_h_size; ++h) {
  2053. for (int w = 0; w < grid_w_size; ++w) {
  2054. grid_2d[0][h][w] = grid_h[h];
  2055. grid_2d[1][h][w] = grid_w[w];
  2056. }
  2057. }
  2058. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  2059. int H = image_size.first;
  2060. int W = image_size.second;
  2061. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  2062. for (int h = 0; h < H; ++h) {
  2063. for (int w = 0; w < W; ++w) {
  2064. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  2065. }
  2066. }
  2067. return pos_embed_2d;
  2068. }
  2069. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  2070. if (!ctx->has_vision_encoder) {
  2071. LOG_ERR("%s: This gguf file seems to have no vision encoder\n", __func__);
  2072. return false;
  2073. }
  2074. clip_image_f32_batch imgs;
  2075. clip_image_f32_ptr img_copy(clip_image_f32_init());
  2076. *img_copy = *img;
  2077. imgs.entries.push_back(std::move(img_copy));
  2078. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  2079. }
  2080. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  2081. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  2082. if (!ctx->has_vision_encoder) {
  2083. LOG_ERR("%s: This gguf file seems to have no vision encoder\n", __func__);
  2084. return false;
  2085. }
  2086. int batch_size = imgs.entries.size();
  2087. if (ctx->has_llava_projector) {
  2088. GGML_ASSERT(batch_size == 1); // TODO: support multiple images
  2089. }
  2090. if (ctx->has_minicpmv_projector) {
  2091. GGML_ASSERT(batch_size == 1);
  2092. }
  2093. if (ctx->has_glm_projector) {
  2094. GGML_ASSERT(batch_size == 1);
  2095. ggml_tensor * boi = ctx->vision_model.boi_w;
  2096. ggml_backend_tensor_get(boi,vec,0,ggml_nbytes(boi));
  2097. vec = (float*)(vec+ggml_nelements(boi)); //offset for boi
  2098. }
  2099. // build the inference graph
  2100. ggml_backend_sched_reset(ctx->sched.get());
  2101. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true);
  2102. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  2103. // set inputs
  2104. const auto & model = ctx->vision_model;
  2105. const auto & hparams = model.hparams;
  2106. const int image_size = hparams.image_size;
  2107. int image_size_width = image_size;
  2108. int image_size_height = image_size;
  2109. if (ctx->has_minicpmv_projector | ctx->has_qwen2vl_merger) {
  2110. image_size_width = imgs.entries[0]->nx;
  2111. image_size_height = imgs.entries[0]->ny;
  2112. }
  2113. const int patch_size = hparams.patch_size;
  2114. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  2115. const int num_positions = num_patches + (model.class_embedding ? 1 : 0);
  2116. const int pos_w = ctx->load_image_size.width / patch_size;
  2117. const int pos_h = ctx->load_image_size.height / patch_size;
  2118. {
  2119. struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
  2120. float * data = (float *)malloc(ggml_nbytes(inp_raw));
  2121. for (size_t i = 0; i < imgs.entries.size(); i++) {
  2122. const int nx = imgs.entries[i]->nx;
  2123. const int ny = imgs.entries[i]->ny;
  2124. if (!(ctx->has_minicpmv_projector | ctx->has_qwen2vl_merger)) {
  2125. GGML_ASSERT(nx == image_size && ny == image_size);
  2126. }
  2127. const int n = nx * ny;
  2128. for (int b = 0; b < batch_size; b++) {
  2129. for (int k = 0; k < 3; k++) {
  2130. for (int y = 0; y < ny; y++) {
  2131. for (int x = 0; x < nx; x++) {
  2132. data[(b * 3 * n) + k * n + y * nx + x] = imgs.entries[b]->buf[3 * (y * nx + x) + k];
  2133. }
  2134. }
  2135. }
  2136. }
  2137. }
  2138. ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw));
  2139. free(data);
  2140. }
  2141. if (ctx->has_minicpmv_projector) {
  2142. {
  2143. // inspired from siglip:
  2144. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  2145. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  2146. struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
  2147. int* positions_data = (int*)malloc(ggml_nbytes(positions));
  2148. int bucket_coords_h[1024];
  2149. int bucket_coords_w[1024];
  2150. for (int i = 0; i < pos_h; i++){
  2151. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  2152. }
  2153. for (int i = 0; i < pos_w; i++){
  2154. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  2155. }
  2156. for (int i = 0, id = 0; i < pos_h; i++){
  2157. for (int j = 0; j < pos_w; j++){
  2158. positions_data[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  2159. }
  2160. }
  2161. ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
  2162. free(positions_data);
  2163. }
  2164. {
  2165. // inspired from resampler of Qwen-VL:
  2166. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  2167. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  2168. struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed");
  2169. int embed_dim = 4096;
  2170. if (ctx->minicpmv_version == 2) {
  2171. embed_dim = 4096;
  2172. }
  2173. else if (ctx->minicpmv_version == 3) {
  2174. embed_dim = 3584;
  2175. }
  2176. else if (ctx->minicpmv_version == 4) {
  2177. embed_dim = 3584;
  2178. }
  2179. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  2180. float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed));
  2181. for(int i=0;i < pos_w * pos_h; ++i){
  2182. for(int j=0; j < embed_dim; ++j){
  2183. pos_embed_data[i * embed_dim + j] = pos_embed_t[i][j];
  2184. }
  2185. }
  2186. ggml_backend_tensor_set(pos_embed, pos_embed_data, 0, ggml_nbytes(pos_embed));
  2187. free(pos_embed_data);
  2188. }
  2189. }
  2190. else {
  2191. if (model.class_embedding) {
  2192. struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
  2193. void* zero_mem = malloc(ggml_nbytes(embeddings));
  2194. memset(zero_mem, 0, ggml_nbytes(embeddings));
  2195. ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
  2196. free(zero_mem);
  2197. }
  2198. if (ctx->has_qwen2vl_merger) {
  2199. struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
  2200. const int pw = image_size_width / patch_size;
  2201. const int ph = image_size_height / patch_size;
  2202. int* positions_data = (int*)malloc(ggml_nbytes(positions));
  2203. int ptr = 0;
  2204. for (int y = 0; y < ph; y+=2)
  2205. {
  2206. for (int x = 0; x < pw; x+=2)
  2207. {
  2208. for (int dy = 0; dy < 2; dy++) {
  2209. for (int dx = 0; dx < 2; dx++) {
  2210. positions_data[ptr] = y + dy;
  2211. positions_data[num_patches + ptr] = x + dx;
  2212. positions_data[num_patches * 2 + ptr] = y + dy;
  2213. positions_data[num_patches * 3 + ptr] = x + dx;
  2214. ptr++;
  2215. }
  2216. }
  2217. }
  2218. }
  2219. ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
  2220. free(positions_data);
  2221. }
  2222. else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  2223. // do nothing
  2224. }
  2225. else {
  2226. struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
  2227. int* positions_data = (int*)malloc(ggml_nbytes(positions));
  2228. for (int i = 0; i < num_positions; i++) {
  2229. positions_data[i] = i;
  2230. }
  2231. ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
  2232. free(positions_data);
  2233. if (!ctx->has_glm_projector) {
  2234. struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
  2235. // The patches vector is used to get rows to index into the embeds with;
  2236. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  2237. // when retrieving the rows.
  2238. int patch_offset = model.class_embedding ? 1 : 0;
  2239. int* patches_data = (int*)malloc(ggml_nbytes(patches));
  2240. for (int i = 0; i < num_patches; i++) {
  2241. patches_data[i] = i + patch_offset;
  2242. }
  2243. ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
  2244. free(patches_data);
  2245. }
  2246. }
  2247. }
  2248. ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  2249. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  2250. if (status != GGML_STATUS_SUCCESS) {
  2251. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  2252. return false;
  2253. }
  2254. // the last node is the embedding tensor
  2255. struct ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  2256. // copy the embeddings to the location passed by the user
  2257. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  2258. if (ctx->has_glm_projector) {
  2259. //eoi
  2260. ggml_tensor * eoi = ctx->vision_model.eoi_w;
  2261. int offset = ggml_nelements(embeddings);
  2262. ggml_backend_tensor_get(eoi, vec+offset, 0, ggml_nbytes(eoi));
  2263. }
  2264. return true;
  2265. }
  2266. bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
  2267. assert(itype < GGML_TYPE_COUNT);
  2268. ggml_type type = static_cast<ggml_type>(itype);
  2269. auto * ctx_clip = clip_init(fname_inp, clip_context_params{
  2270. /* use_gpu */ false,
  2271. /* verbosity */ GGML_LOG_LEVEL_ERROR,
  2272. });
  2273. const auto & ctx_src = ctx_clip->ctx_gguf.get();
  2274. const auto & ctx_data = ctx_clip->ctx_data.get();
  2275. auto * ctx_out = gguf_init_empty();
  2276. gguf_set_kv(ctx_out, ctx_src);
  2277. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  2278. gguf_set_val_u32(ctx_out, "general.file_type", itype);
  2279. auto fout = std::ofstream(fname_out, std::ios::binary);
  2280. const int n_tensors = gguf_get_n_tensors(ctx_src);
  2281. for (int i = 0; i < n_tensors; ++i) {
  2282. const char * name = gguf_get_tensor_name(ctx_src, i);
  2283. struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
  2284. gguf_add_tensor(ctx_out, cur);
  2285. }
  2286. const size_t meta_size = gguf_get_meta_size(ctx_out);
  2287. for (size_t i = 0; i < meta_size; ++i) {
  2288. fout.put(0);
  2289. }
  2290. // regexes of tensor names to be quantized
  2291. const std::vector<std::string> k_names = {
  2292. ".*weight",
  2293. };
  2294. std::vector<uint8_t> work(512);
  2295. std::vector<float> conv_buf(512);
  2296. size_t total_size_org = 0;
  2297. size_t total_size_new = 0;
  2298. for (int i = 0; i < n_tensors; ++i) {
  2299. const std::string name = gguf_get_tensor_name(ctx_src, i);
  2300. struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
  2301. enum ggml_type new_type;
  2302. void * new_data;
  2303. size_t new_size;
  2304. bool quantize = false;
  2305. for (const auto & s : k_names) {
  2306. if (std::regex_match(name, std::regex(s))) {
  2307. quantize = true;
  2308. break;
  2309. }
  2310. }
  2311. // quantize only 2D tensors and bigger than block size
  2312. quantize &= (ggml_n_dims(cur) == 2) && cur->ne[0] > ggml_blck_size(type);
  2313. if (quantize) {
  2314. new_type = type;
  2315. if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
  2316. new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
  2317. // LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
  2318. }
  2319. const size_t n_elms = ggml_nelements(cur);
  2320. float * f32_data;
  2321. switch (cur->type) {
  2322. case GGML_TYPE_F32:
  2323. f32_data = (float *)cur->data;
  2324. break;
  2325. case GGML_TYPE_F16:
  2326. if (conv_buf.size() < n_elms) {
  2327. conv_buf.resize(n_elms);
  2328. }
  2329. for (size_t j = 0; j < n_elms; ++j) {
  2330. conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]);
  2331. }
  2332. f32_data = (float *)conv_buf.data();
  2333. break;
  2334. default:
  2335. LOG_ERR("%s: Please use an input file in f32 or f16\n", __func__);
  2336. gguf_free(ctx_out);
  2337. return false;
  2338. }
  2339. if (work.size() < n_elms * 4) {
  2340. work.resize(n_elms * 4);
  2341. }
  2342. new_data = work.data();
  2343. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
  2344. } else {
  2345. new_type = cur->type;
  2346. new_data = cur->data;
  2347. new_size = ggml_nbytes(cur);
  2348. }
  2349. const size_t orig_size = ggml_nbytes(cur);
  2350. total_size_org += orig_size;
  2351. total_size_new += new_size;
  2352. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  2353. GGML_ASSERT(gguf_get_tensor_size(ctx_out, gguf_find_tensor(ctx_out, name.c_str())) == new_size);
  2354. gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
  2355. fout.write((const char *)new_data, new_size);
  2356. size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
  2357. for (size_t j = 0; j < pad; ++j) {
  2358. fout.put(0);
  2359. }
  2360. LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
  2361. orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  2362. }
  2363. // go back to beginning of file and write the updated metadata
  2364. fout.seekp(0, std::ios::beg);
  2365. std::vector<uint8_t> meta(meta_size);
  2366. gguf_get_meta_data(ctx_out, meta.data());
  2367. fout.write((const char *)meta.data(), meta_size);
  2368. fout.close();
  2369. clip_free(ctx_clip);
  2370. gguf_free(ctx_out);
  2371. {
  2372. LOG_INF("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
  2373. LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
  2374. }
  2375. return true;
  2376. }
  2377. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  2378. if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  2379. return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
  2380. }
  2381. if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
  2382. return ctx->vision_model.mm_model_peg_0_b->ne[0];
  2383. }
  2384. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  2385. return ctx->vision_model.mm_2_b->ne[0];
  2386. }
  2387. if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  2388. return ctx->vision_model.mm_3_b->ne[0];
  2389. }
  2390. if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  2391. if (ctx->minicpmv_version == 2) {
  2392. return 4096;
  2393. }
  2394. else if (ctx->minicpmv_version == 3) {
  2395. return 3584;
  2396. }
  2397. else if (ctx->minicpmv_version == 4) {
  2398. return 3584;
  2399. }
  2400. }
  2401. if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE){
  2402. return ctx->vision_model.mm_model_mlp_3_w->ne[1];
  2403. }
  2404. if (ctx->proj_type == PROJECTOR_TYPE_MERGER) {
  2405. return ctx->vision_model.mm_1_b->ne[0];
  2406. }
  2407. if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
  2408. return ctx->vision_model.mm_input_proj_w->ne[0];
  2409. }
  2410. std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
  2411. throw std::runtime_error(string_format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
  2412. }
  2413. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  2414. if (ctx->has_minicpmv_projector) {
  2415. return ctx->minicpmv_version;
  2416. }
  2417. return 0;
  2418. }
  2419. bool clip_is_glm(const struct clip_ctx * ctx) {
  2420. return ctx->has_glm_projector;
  2421. }
  2422. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  2423. return ctx->has_qwen2vl_merger;
  2424. }
  2425. bool clip_is_llava(const struct clip_ctx * ctx) {
  2426. return ctx->has_llava_projector;
  2427. }
  2428. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  2429. return ctx->proj_type == PROJECTOR_TYPE_GEMMA3;
  2430. }
  2431. // Determine the number of encoder layers to iterate over
  2432. int get_deepest_feature_layer(const struct clip_ctx * ctx) {
  2433. // Get the index of the second to last layer; this is the
  2434. // default for models that have a llava projector
  2435. const auto & hparams = ctx->vision_model.hparams;
  2436. int n_layer = hparams.n_layer - 1;
  2437. int deepest_feature_layer = -1;
  2438. // Handle other projectors; incrementing here indicates that we
  2439. // should use the last encoder layer for the vision features.
  2440. if (ctx->has_minicpmv_projector || ctx->has_glm_projector || ctx->has_qwen2vl_merger) {
  2441. n_layer += 1;
  2442. }
  2443. // If we set explicit vision feature layers, only go up to the deepest one
  2444. for (const auto & feature_layer : hparams.vision_feature_layer) {
  2445. if (feature_layer > deepest_feature_layer) {
  2446. deepest_feature_layer = feature_layer;
  2447. }
  2448. }
  2449. return deepest_feature_layer < 0 ? n_layer : deepest_feature_layer;
  2450. }
  2451. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  2452. clip_image_f32 clip_img;
  2453. clip_img.buf.resize(h * w * 3);
  2454. for (int i = 0; i < h*w*3; i++)
  2455. {
  2456. clip_img.buf[i] = img[i];
  2457. }
  2458. clip_img.nx = w;
  2459. clip_img.ny = h;
  2460. clip_image_encode(ctx, n_threads, &clip_img, vec);
  2461. return true;
  2462. }
  2463. //
  2464. // API used internally with mtmd
  2465. //
  2466. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  2467. return ctx->proj_type;
  2468. }