clip.cpp 151 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480
  1. #include "clip.h"
  2. #include "clip-impl.h"
  3. #include "clip-model.h"
  4. #include "clip-graph.h"
  5. #include "models/models.h"
  6. #include "ggml.h"
  7. #include "ggml-cpp.h"
  8. #include "ggml-alloc.h"
  9. #include "ggml-backend.h"
  10. #include "gguf.h"
  11. #include <cassert>
  12. #include <cmath>
  13. #include <cstdlib>
  14. #include <cstring>
  15. #include <fstream>
  16. #include <map>
  17. #include <stdexcept>
  18. #include <unordered_set>
  19. #include <vector>
  20. #include <cinttypes>
  21. #include <limits>
  22. #include <array>
  23. #include <functional>
  24. struct clip_logger_state g_logger_state = {clip_log_callback_default, NULL};
  25. //#define CLIP_DEBUG_FUNCTIONS
  26. #ifdef CLIP_DEBUG_FUNCTIONS
  27. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  28. std::ofstream file(filename, std::ios::binary);
  29. if (!file.is_open()) {
  30. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  31. return;
  32. }
  33. // PPM header: P6 format, width, height, and max color value
  34. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  35. // Write pixel data
  36. for (size_t i = 0; i < img.buf.size(); i += 3) {
  37. // PPM expects binary data in RGB format, which matches our image buffer
  38. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  39. }
  40. file.close();
  41. }
  42. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  43. std::ofstream file(filename, std::ios::binary);
  44. if (!file.is_open()) {
  45. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  46. return;
  47. }
  48. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  49. int bytesPerPixel = 3;
  50. int widthInBytes = img.nx * bytesPerPixel;
  51. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  52. int stride = widthInBytes + paddingAmount;
  53. // Bitmap file header
  54. unsigned char fileHeader[14] = {
  55. 'B','M', // Signature
  56. 0,0,0,0, // Image file size in bytes
  57. 0,0,0,0, // Reserved
  58. 54,0,0,0 // Start of pixel array
  59. };
  60. // Total file size
  61. fileSize = 54 + (stride * img.ny);
  62. fileHeader[2] = (unsigned char)(fileSize);
  63. fileHeader[3] = (unsigned char)(fileSize >> 8);
  64. fileHeader[4] = (unsigned char)(fileSize >> 16);
  65. fileHeader[5] = (unsigned char)(fileSize >> 24);
  66. // Bitmap information header (BITMAPINFOHEADER)
  67. unsigned char infoHeader[40] = {
  68. 40,0,0,0, // Size of this header (40 bytes)
  69. 0,0,0,0, // Image width
  70. 0,0,0,0, // Image height
  71. 1,0, // Number of color planes
  72. 24,0, // Bits per pixel
  73. 0,0,0,0, // No compression
  74. 0,0,0,0, // Image size (can be 0 for no compression)
  75. 0,0,0,0, // X pixels per meter (not specified)
  76. 0,0,0,0, // Y pixels per meter (not specified)
  77. 0,0,0,0, // Total colors (color table not used)
  78. 0,0,0,0 // Important colors (all are important)
  79. };
  80. // Width and height in the information header
  81. infoHeader[4] = (unsigned char)(img.nx);
  82. infoHeader[5] = (unsigned char)(img.nx >> 8);
  83. infoHeader[6] = (unsigned char)(img.nx >> 16);
  84. infoHeader[7] = (unsigned char)(img.nx >> 24);
  85. infoHeader[8] = (unsigned char)(img.ny);
  86. infoHeader[9] = (unsigned char)(img.ny >> 8);
  87. infoHeader[10] = (unsigned char)(img.ny >> 16);
  88. infoHeader[11] = (unsigned char)(img.ny >> 24);
  89. // Write file headers
  90. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  91. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  92. // Pixel data
  93. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  94. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  95. for (int x = 0; x < img.nx; ++x) {
  96. // Each pixel
  97. size_t pixelIndex = (y * img.nx + x) * 3;
  98. unsigned char pixel[3] = {
  99. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  100. img.buf[pixelIndex + 1],
  101. img.buf[pixelIndex]
  102. };
  103. file.write(reinterpret_cast<char*>(pixel), 3);
  104. }
  105. // Write padding for the row
  106. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  107. }
  108. file.close();
  109. }
  110. // debug function to convert f32 to u8
  111. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  112. dst.nx = src.nx;
  113. dst.ny = src.ny;
  114. dst.buf.resize(3 * src.nx * src.ny);
  115. for (size_t i = 0; i < src.buf.size(); ++i) {
  116. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  117. }
  118. }
  119. #endif
  120. struct clip_ctx {
  121. clip_model model;
  122. gguf_context_ptr ctx_gguf;
  123. ggml_context_ptr ctx_data;
  124. std::vector<uint8_t> buf_compute_meta;
  125. std::vector<ggml_backend_t> backend_ptrs;
  126. std::vector<ggml_backend_buffer_type_t> backend_buft;
  127. ggml_backend_t backend = nullptr;
  128. ggml_backend_t backend_cpu = nullptr;
  129. ggml_backend_buffer_ptr buf;
  130. int max_nodes = 8192;
  131. ggml_backend_sched_ptr sched;
  132. clip_flash_attn_type flash_attn_type = CLIP_FLASH_ATTN_TYPE_AUTO;
  133. bool is_allocated = false;
  134. // for debugging
  135. bool debug_graph = false;
  136. std::vector<ggml_tensor *> debug_print_tensors;
  137. clip_ctx(clip_context_params & ctx_params) {
  138. flash_attn_type = ctx_params.flash_attn_type;
  139. debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
  140. backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
  141. if (!backend_cpu) {
  142. throw std::runtime_error("failed to initialize CPU backend");
  143. }
  144. if (ctx_params.use_gpu) {
  145. auto backend_name = std::getenv("MTMD_BACKEND_DEVICE");
  146. if (backend_name != nullptr) {
  147. backend = ggml_backend_init_by_name(backend_name, nullptr);
  148. if (!backend) {
  149. LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name);
  150. }
  151. }
  152. if (!backend) {
  153. backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
  154. backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr);
  155. }
  156. }
  157. if (backend) {
  158. LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
  159. backend_ptrs.push_back(backend);
  160. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  161. } else {
  162. backend = backend_cpu;
  163. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  164. }
  165. if (ctx_params.image_min_tokens > 0) {
  166. model.hparams.custom_image_min_tokens = ctx_params.image_min_tokens;
  167. }
  168. if (ctx_params.image_max_tokens > 0) {
  169. model.hparams.custom_image_max_tokens = ctx_params.image_max_tokens;
  170. }
  171. backend_ptrs.push_back(backend_cpu);
  172. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
  173. sched.reset(
  174. ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false, true)
  175. );
  176. }
  177. ~clip_ctx() {
  178. ggml_backend_free(backend);
  179. if (backend != backend_cpu) {
  180. ggml_backend_free(backend_cpu);
  181. }
  182. }
  183. // this function is added so that we don't change too much of the existing code
  184. projector_type proj_type() const {
  185. return model.proj_type;
  186. }
  187. };
  188. //
  189. // clip_graph
  190. //
  191. clip_graph::clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
  192. model(ctx->model),
  193. hparams(model.hparams),
  194. proj_type(ctx->proj_type()),
  195. img(img),
  196. patch_size(hparams.patch_size),
  197. n_patches_x(img.nx / patch_size),
  198. n_patches_y(img.ny / patch_size),
  199. n_patches(n_patches_x * n_patches_y),
  200. n_embd(hparams.n_embd),
  201. n_head(hparams.n_head),
  202. d_head(n_embd / n_head),
  203. n_layer(hparams.n_layer),
  204. n_mmproj_embd(clip_n_mmproj_embd(ctx)),
  205. eps(hparams.eps),
  206. kq_scale(1.0f / sqrtf((float)d_head)),
  207. flash_attn_type(ctx->flash_attn_type),
  208. debug_graph(ctx->debug_graph),
  209. debug_print_tensors(ctx->debug_print_tensors) {
  210. struct ggml_init_params params = {
  211. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  212. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  213. /*.no_alloc =*/ true,
  214. };
  215. ctx0_ptr.reset(ggml_init(params));
  216. ctx0 = ctx0_ptr.get();
  217. gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
  218. }
  219. void clip_graph::cb(ggml_tensor * cur0, const char * name, int il) const {
  220. if (debug_graph) {
  221. ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
  222. std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
  223. ggml_set_name(cur, cur_name.c_str());
  224. ggml_set_output(cur);
  225. ggml_build_forward_expand(gf, cur);
  226. debug_print_tensors.push_back(cur);
  227. }
  228. }
  229. // siglip2 naflex
  230. ggml_tensor * clip_graph::resize_position_embeddings() {
  231. ggml_tensor * pos_embd = model.position_embeddings;
  232. const int height = img.ny / patch_size;
  233. const int width = img.nx / patch_size;
  234. const uint32_t mode = GGML_SCALE_MODE_BILINEAR | GGML_SCALE_FLAG_ANTIALIAS;
  235. const int n_per_side = (int)std::sqrt(pos_embd->ne[1]);
  236. GGML_ASSERT(pos_embd);
  237. if (height == n_per_side && width == n_per_side) {
  238. return pos_embd;
  239. }
  240. pos_embd = ggml_reshape_3d(ctx0, pos_embd, n_embd, n_per_side, n_per_side); // -> (n_embd, n_per_side, n_per_side)
  241. pos_embd = ggml_permute(ctx0, pos_embd, 2, 0, 1, 3); // -> (n_per_side, n_per_side, n_embd)
  242. pos_embd = ggml_interpolate(ctx0, pos_embd, width, height, n_embd, 1, mode); // -> (width, height, n_embd)
  243. pos_embd = ggml_permute(ctx0, pos_embd, 1, 2, 0, 3); // -> (n_embd, width, height)
  244. pos_embd = ggml_cont_2d(ctx0, pos_embd, n_embd, width * height); // -> (n_embd, width * height)
  245. return pos_embd;
  246. }
  247. // build vision transformer (ViT) cgraph
  248. // this function should cover most of the models
  249. // if your model has specific features, you should probably duplicate this function
  250. ggml_tensor * clip_graph::build_vit(
  251. ggml_tensor * inp,
  252. int64_t n_pos,
  253. norm_type norm_t,
  254. ffn_op_type ffn_t,
  255. ggml_tensor * learned_pos_embd,
  256. std::function<ggml_tensor *(ggml_tensor *, const clip_layer &)> add_pos
  257. ) {
  258. if (learned_pos_embd) {
  259. inp = ggml_add(ctx0, inp, learned_pos_embd);
  260. cb(inp, "pos_embed", -1);
  261. }
  262. ggml_tensor * inpL = inp;
  263. // pre-layernorm
  264. if (model.pre_ln_w) {
  265. inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
  266. cb(inpL, "pre_ln", -1);
  267. }
  268. // loop over layers
  269. for (int il = 0; il < n_layer; il++) {
  270. auto & layer = model.layers[il];
  271. ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
  272. // layernorm1
  273. cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
  274. cb(cur, "layer_inp_normed", il);
  275. // self-attention
  276. {
  277. ggml_tensor * Qcur = nullptr;
  278. ggml_tensor * Kcur = nullptr;
  279. ggml_tensor * Vcur = nullptr;
  280. if (layer.qkv_w != nullptr) {
  281. // fused qkv
  282. cur = ggml_mul_mat(ctx0, layer.qkv_w, cur);
  283. if (layer.qkv_b != nullptr) {
  284. cur = ggml_add(ctx0, cur, layer.qkv_b);
  285. }
  286. Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  287. /* nb1 */ ggml_row_size(cur->type, d_head),
  288. /* nb2 */ cur->nb[1],
  289. /* offset */ 0);
  290. Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  291. /* nb1 */ ggml_row_size(cur->type, d_head),
  292. /* nb2 */ cur->nb[1],
  293. /* offset */ ggml_row_size(cur->type, n_embd));
  294. Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
  295. /* nb1 */ ggml_row_size(cur->type, d_head),
  296. /* nb2 */ cur->nb[1],
  297. /* offset */ ggml_row_size(cur->type, 2 * n_embd));
  298. // TODO: q/k norm requires row size == n_embd, while here it's d_head
  299. // we can add support in the future if needed
  300. GGML_ASSERT(layer.q_norm == nullptr && layer.k_norm == nullptr);
  301. } else {
  302. // separate q, k, v
  303. Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
  304. if (layer.q_b) {
  305. Qcur = ggml_add(ctx0, Qcur, layer.q_b);
  306. }
  307. Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
  308. if (layer.k_b) {
  309. Kcur = ggml_add(ctx0, Kcur, layer.k_b);
  310. }
  311. Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
  312. if (layer.v_b) {
  313. Vcur = ggml_add(ctx0, Vcur, layer.v_b);
  314. }
  315. if (layer.q_norm) {
  316. Qcur = build_norm(Qcur, layer.q_norm, NULL, norm_t, eps, il);
  317. cb(Qcur, "Qcur_norm", il);
  318. }
  319. if (layer.k_norm) {
  320. Kcur = build_norm(Kcur, layer.k_norm, NULL, norm_t, eps, il);
  321. cb(Kcur, "Kcur_norm", il);
  322. }
  323. Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
  324. Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
  325. Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
  326. }
  327. cb(Qcur, "Qcur", il);
  328. cb(Kcur, "Kcur", il);
  329. cb(Vcur, "Vcur", il);
  330. if (add_pos) {
  331. Qcur = add_pos(Qcur, layer);
  332. Kcur = add_pos(Kcur, layer);
  333. cb(Qcur, "Qcur_pos", il);
  334. cb(Kcur, "Kcur_pos", il);
  335. }
  336. cur = build_attn(layer.o_w, layer.o_b,
  337. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  338. cb(cur, "attn_out", il);
  339. }
  340. if (layer.ls_1_w) {
  341. cur = ggml_mul(ctx0, cur, layer.ls_1_w);
  342. cb(cur, "attn_out_scaled", il);
  343. }
  344. // re-add the layer input, e.g., residual
  345. cur = ggml_add(ctx0, cur, inpL);
  346. inpL = cur; // inpL = residual, cur = hidden_states
  347. cb(cur, "ffn_inp", il);
  348. // layernorm2
  349. cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
  350. cb(cur, "ffn_inp_normed", il);
  351. // ffn
  352. cur = build_ffn(cur,
  353. layer.ff_up_w, layer.ff_up_b,
  354. layer.ff_gate_w, layer.ff_gate_b,
  355. layer.ff_down_w, layer.ff_down_b,
  356. ffn_t, il);
  357. cb(cur, "ffn_out", il);
  358. if (layer.ls_2_w) {
  359. cur = ggml_mul(ctx0, cur, layer.ls_2_w);
  360. cb(cur, "ffn_out_scaled", il);
  361. }
  362. // residual 2
  363. cur = ggml_add(ctx0, inpL, cur);
  364. cb(cur, "layer_out", il);
  365. inpL = cur;
  366. }
  367. if (model.audio_has_avgpool()) {
  368. ggml_tensor * cur = inpL;
  369. cur = ggml_transpose(ctx0, cur);
  370. cur = ggml_cont(ctx0, cur);
  371. cur = ggml_pool_1d(ctx0, cur, GGML_OP_POOL_AVG, 2, 2, 0);
  372. cur = ggml_transpose(ctx0, cur);
  373. cur = ggml_cont(ctx0, cur);
  374. inpL = cur;
  375. }
  376. // post-layernorm
  377. if (model.post_ln_w) {
  378. inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
  379. }
  380. return inpL;
  381. }
  382. // build the input after conv2d (inp_raw --> patches)
  383. // returns tensor with shape [n_embd, n_patches]
  384. ggml_tensor * clip_graph::build_inp() {
  385. ggml_tensor * inp_raw = build_inp_raw();
  386. ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  387. inp = ggml_reshape_2d(ctx0, inp, n_patches, n_embd);
  388. inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
  389. if (model.patch_bias) {
  390. inp = ggml_add(ctx0, inp, model.patch_bias);
  391. cb(inp, "patch_bias", -1);
  392. }
  393. return inp;
  394. }
  395. ggml_tensor * clip_graph::build_inp_raw(int channels) {
  396. ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
  397. ggml_set_name(inp_raw, "inp_raw");
  398. ggml_set_input(inp_raw);
  399. return inp_raw;
  400. }
  401. ggml_tensor * clip_graph::build_norm(
  402. ggml_tensor * cur,
  403. ggml_tensor * mw,
  404. ggml_tensor * mb,
  405. norm_type type,
  406. float norm_eps,
  407. int il) const {
  408. cur = type == NORM_TYPE_RMS
  409. ? ggml_rms_norm(ctx0, cur, norm_eps)
  410. : ggml_norm(ctx0, cur, norm_eps);
  411. if (mw || mb) {
  412. cb(cur, "norm", il);
  413. }
  414. if (mw) {
  415. cur = ggml_mul(ctx0, cur, mw);
  416. if (mb) {
  417. cb(cur, "norm_w", il);
  418. }
  419. }
  420. if (mb) {
  421. cur = ggml_add(ctx0, cur, mb);
  422. }
  423. return cur;
  424. }
  425. ggml_tensor * clip_graph::build_ffn(
  426. ggml_tensor * cur,
  427. ggml_tensor * up,
  428. ggml_tensor * up_b,
  429. ggml_tensor * gate,
  430. ggml_tensor * gate_b,
  431. ggml_tensor * down,
  432. ggml_tensor * down_b,
  433. ffn_op_type type_op,
  434. int il) const {
  435. ggml_tensor * tmp = up ? ggml_mul_mat(ctx0, up, cur) : cur;
  436. cb(tmp, "ffn_up", il);
  437. if (up_b) {
  438. tmp = ggml_add(ctx0, tmp, up_b);
  439. cb(tmp, "ffn_up_b", il);
  440. }
  441. if (gate) {
  442. cur = ggml_mul_mat(ctx0, gate, cur);
  443. cb(cur, "ffn_gate", il);
  444. if (gate_b) {
  445. cur = ggml_add(ctx0, cur, gate_b);
  446. cb(cur, "ffn_gate_b", il);
  447. }
  448. } else {
  449. cur = tmp;
  450. }
  451. // we only support parallel ffn for now
  452. switch (type_op) {
  453. case FFN_SILU:
  454. if (gate) {
  455. cur = ggml_swiglu_split(ctx0, cur, tmp);
  456. cb(cur, "ffn_swiglu", il);
  457. } else {
  458. cur = ggml_silu(ctx0, cur);
  459. cb(cur, "ffn_silu", il);
  460. } break;
  461. case FFN_GELU:
  462. if (gate) {
  463. cur = ggml_geglu_split(ctx0, cur, tmp);
  464. cb(cur, "ffn_geglu", il);
  465. } else {
  466. cur = ggml_gelu(ctx0, cur);
  467. cb(cur, "ffn_gelu", il);
  468. } break;
  469. case FFN_GELU_ERF:
  470. if (gate) {
  471. cur = ggml_geglu_erf_split(ctx0, cur, tmp);
  472. cb(cur, "ffn_geglu_erf", il);
  473. } else {
  474. cur = ggml_gelu_erf(ctx0, cur);
  475. cb(cur, "ffn_gelu_erf", il);
  476. } break;
  477. case FFN_GELU_QUICK:
  478. if (gate) {
  479. cur = ggml_geglu_quick_split(ctx0, cur, tmp);
  480. cb(cur, "ffn_geglu_quick", il);
  481. } else {
  482. cur = ggml_gelu_quick(ctx0, cur);
  483. cb(cur, "ffn_gelu_quick", il);
  484. } break;
  485. }
  486. if (down) {
  487. cur = ggml_mul_mat(ctx0, down, cur);
  488. }
  489. if (down_b) {
  490. cb(cur, "ffn_down", il);
  491. }
  492. if (down_b) {
  493. cur = ggml_add(ctx0, cur, down_b);
  494. }
  495. return cur;
  496. }
  497. ggml_tensor * clip_graph::build_attn(
  498. ggml_tensor * wo,
  499. ggml_tensor * wo_b,
  500. ggml_tensor * q_cur,
  501. ggml_tensor * k_cur,
  502. ggml_tensor * v_cur,
  503. ggml_tensor * kq_mask,
  504. float kq_scale,
  505. int il) const {
  506. // these nodes are added to the graph together so that they are not reordered
  507. // by doing so, the number of splits in the graph is reduced
  508. ggml_build_forward_expand(gf, q_cur);
  509. ggml_build_forward_expand(gf, k_cur);
  510. ggml_build_forward_expand(gf, v_cur);
  511. ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3);
  512. //cb(q, "q", il);
  513. ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3);
  514. //cb(k, "k", il);
  515. ggml_tensor * cur;
  516. if (flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  517. ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3);
  518. k = ggml_cast(ctx0, k, GGML_TYPE_F16);
  519. v = ggml_cast(ctx0, v, GGML_TYPE_F16);
  520. cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, 0.0f, 0.0f);
  521. ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
  522. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
  523. } else {
  524. ggml_tensor * v = ggml_permute(ctx0, v_cur, 1, 2, 0, 3);
  525. v = ggml_cont(ctx0, v);
  526. const auto n_tokens = q->ne[1];
  527. const auto n_head = q->ne[2];
  528. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  529. // F32 may not needed for vision encoders?
  530. // ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  531. kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, 0.0f);
  532. ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
  533. cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  534. cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens);
  535. }
  536. cb(cur, "kqv_out", il);
  537. if (wo) {
  538. cur = ggml_mul_mat(ctx0, wo, cur);
  539. }
  540. if (wo_b) {
  541. cur = ggml_add(ctx0, cur, wo_b);
  542. }
  543. return cur;
  544. }
  545. // implementation of the 2D RoPE without adding a new op in ggml
  546. // this is not efficient (use double the memory), but works on all backends
  547. // TODO: there was a more efficient which relies on ggml_view and ggml_rope_ext_inplace, but the rope inplace does not work well with non-contiguous tensors ; we should fix that and revert back to the original implementation in https://github.com/ggml-org/llama.cpp/pull/13065
  548. ggml_tensor * clip_graph::build_rope_2d(
  549. ggml_context * ctx0,
  550. ggml_tensor * cur,
  551. ggml_tensor * pos_a, // first half
  552. ggml_tensor * pos_b, // second half
  553. const float freq_base,
  554. const bool interleave_freq
  555. ) {
  556. const int64_t n_dim = cur->ne[0];
  557. const int64_t n_head = cur->ne[1];
  558. const int64_t n_pos = cur->ne[2];
  559. // for example, if we have cur tensor of shape (n_dim=8, n_head, n_pos)
  560. // we will have a list of 4 inv_freq: 1e-0, 1e-1, 1e-2, 1e-3
  561. // first half of cur will use 1e-0, 1e-2 (even)
  562. // second half of cur will use 1e-1, 1e-3 (odd)
  563. // the trick here is to rotate just half of n_dim, so inv_freq will automatically be even
  564. // ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
  565. // then for the second half, we use freq_scale to shift the inv_freq
  566. // ^ why? replace (2i) with (2i+1) in the above equation
  567. const float freq_scale_odd = interleave_freq
  568. ? std::pow(freq_base, (float)-2/n_dim)
  569. : 1.0;
  570. // first half
  571. ggml_tensor * first;
  572. {
  573. first = ggml_view_3d(ctx0, cur,
  574. n_dim/2, n_head, n_pos,
  575. ggml_row_size(cur->type, n_dim),
  576. ggml_row_size(cur->type, n_dim*n_head),
  577. 0);
  578. first = ggml_rope_ext(
  579. ctx0,
  580. first,
  581. pos_a, // positions
  582. nullptr, // freq factors
  583. n_dim/2, // n_dims
  584. 0, 0, freq_base,
  585. 1.0f, 0.0f, 1.0f, 0.0f, 0.0f
  586. );
  587. }
  588. // second half
  589. ggml_tensor * second;
  590. {
  591. second = ggml_view_3d(ctx0, cur,
  592. n_dim/2, n_head, n_pos,
  593. ggml_row_size(cur->type, n_dim),
  594. ggml_row_size(cur->type, n_dim*n_head),
  595. n_dim/2 * ggml_element_size(cur));
  596. second = ggml_rope_ext(
  597. ctx0,
  598. second,
  599. pos_b, // positions
  600. nullptr, // freq factors
  601. n_dim/2, // n_dims
  602. 0, 0, freq_base,
  603. freq_scale_odd,
  604. 0.0f, 1.0f, 0.0f, 0.0f
  605. );
  606. }
  607. cur = ggml_concat(ctx0, first, second, 0);
  608. return cur;
  609. }
  610. // Generic function to stack frames for audio processing
  611. // Abstracts out the StackAudioFrames logic used by ultravox
  612. ggml_tensor * clip_graph::build_stack(ggml_tensor * cur, int32_t stack_factor, int32_t n_embed) {
  613. if (stack_factor <= 1) {
  614. return cur;
  615. }
  616. int64_t total_elements = ggml_nelements(cur);
  617. int64_t stride = n_embed * stack_factor;
  618. // Calculate padded length
  619. int64_t padded_len = GGML_PAD(total_elements, stride);
  620. int64_t pad = padded_len - total_elements;
  621. if (pad > 0) {
  622. // Pad the tensor to make it divisible by stride
  623. cur = ggml_view_1d(ctx0, cur, total_elements, 0);
  624. cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
  625. }
  626. // Reshape to [stride, padded_len / stride]
  627. cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
  628. ggml_row_size(cur->type, stride), 0);
  629. return cur;
  630. }
  631. // aka pixel_shuffle / pixel_unshuffle / patch_merger (Kimi-VL)
  632. // support dynamic resolution
  633. ggml_tensor * clip_graph::build_patch_merge_permute(ggml_tensor * cur, int scale_factor) {
  634. GGML_ASSERT(scale_factor > 1);
  635. const int n_embd = cur->ne[0];
  636. int width = img.nx / patch_size;
  637. int height = img.ny / patch_size;
  638. // pad width and height to factor
  639. const int64_t pad_width = CLIP_ALIGN(width, scale_factor) - width;
  640. const int64_t pad_height = CLIP_ALIGN(height, scale_factor) - height;
  641. cur = ggml_reshape_3d(ctx0, cur, n_embd, width, height);
  642. if (pad_width || pad_height) {
  643. cur = ggml_pad(ctx0, cur, 0, pad_width, pad_height, 0);
  644. width += pad_width;
  645. height += pad_height;
  646. }
  647. // unshuffle h
  648. cur = ggml_reshape_3d(ctx0, cur, n_embd * scale_factor, width / scale_factor, height);
  649. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  650. // unshuffle w
  651. cur = ggml_cont_3d(ctx0, cur, n_embd * scale_factor * scale_factor, height / scale_factor, width / scale_factor);
  652. cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
  653. cur = ggml_cont_2d(ctx0, cur, cur->ne[0], cur->ne[1] * cur->ne[2]);
  654. cb(cur, "pixel_shuffle", -1);
  655. return cur;
  656. }
  657. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
  658. GGML_ASSERT(imgs.entries.size() == 1 && "n_batch > 1 is not supported");
  659. const clip_image_f32 & img = *imgs.entries[0];
  660. std::unique_ptr<clip_graph> builder;
  661. switch (ctx->proj_type()) {
  662. case PROJECTOR_TYPE_GEMMA3:
  663. case PROJECTOR_TYPE_IDEFICS3:
  664. case PROJECTOR_TYPE_LFM2:
  665. case PROJECTOR_TYPE_JANUS_PRO:
  666. {
  667. builder = std::make_unique<clip_graph_siglip>(ctx, img);
  668. } break;
  669. case PROJECTOR_TYPE_PIXTRAL:
  670. case PROJECTOR_TYPE_LIGHTONOCR:
  671. {
  672. builder = std::make_unique<clip_graph_pixtral>(ctx, img);
  673. } break;
  674. case PROJECTOR_TYPE_QWEN2VL:
  675. case PROJECTOR_TYPE_QWEN25VL:
  676. {
  677. builder = std::make_unique<clip_graph_qwen2vl>(ctx, img);
  678. } break;
  679. case PROJECTOR_TYPE_QWEN3VL:
  680. {
  681. builder = std::make_unique<clip_graph_qwen3vl>(ctx, img);
  682. } break;
  683. case PROJECTOR_TYPE_MINICPMV:
  684. {
  685. builder = std::make_unique<clip_graph_minicpmv>(ctx, img);
  686. } break;
  687. case PROJECTOR_TYPE_INTERNVL:
  688. {
  689. builder = std::make_unique<clip_graph_internvl>(ctx, img);
  690. } break;
  691. case PROJECTOR_TYPE_LLAMA4:
  692. {
  693. builder = std::make_unique<clip_graph_llama4>(ctx, img);
  694. } break;
  695. case PROJECTOR_TYPE_ULTRAVOX:
  696. case PROJECTOR_TYPE_VOXTRAL:
  697. case PROJECTOR_TYPE_QWEN2A:
  698. case PROJECTOR_TYPE_GLMA:
  699. {
  700. builder = std::make_unique<clip_graph_whisper_enc>(ctx, img);
  701. } break;
  702. case PROJECTOR_TYPE_KIMIVL:
  703. {
  704. builder = std::make_unique<clip_graph_kimivl>(ctx, img);
  705. } break;
  706. case PROJECTOR_TYPE_COGVLM:
  707. {
  708. builder = std::make_unique<clip_graph_cogvlm>(ctx, img);
  709. } break;
  710. case PROJECTOR_TYPE_MLP:
  711. case PROJECTOR_TYPE_MLP_NORM:
  712. case PROJECTOR_TYPE_LDP:
  713. case PROJECTOR_TYPE_LDPV2:
  714. case PROJECTOR_TYPE_GLM_EDGE:
  715. {
  716. builder = std::make_unique<clip_graph_llava>(ctx, img);
  717. } break;
  718. default:
  719. GGML_ABORT("missing cgraph builder");
  720. }
  721. return builder->build();
  722. }
  723. //
  724. // clip_model_loader
  725. //
  726. struct clip_model_loader {
  727. ggml_context_ptr ctx_meta;
  728. gguf_context_ptr ctx_gguf;
  729. std::string fname;
  730. size_t model_size = 0; // in bytes
  731. bool has_vision = false;
  732. bool has_audio = false;
  733. // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
  734. clip_model_loader(const char * fname) : fname(fname) {
  735. struct ggml_context * meta = nullptr;
  736. struct gguf_init_params params = {
  737. /*.no_alloc = */ true,
  738. /*.ctx = */ &meta,
  739. };
  740. ctx_gguf = gguf_context_ptr(gguf_init_from_file(fname, params));
  741. if (!ctx_gguf.get()) {
  742. throw std::runtime_error(string_format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  743. }
  744. ctx_meta.reset(meta);
  745. const int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
  746. // print gguf info
  747. {
  748. std::string name;
  749. get_string(KEY_NAME, name, false);
  750. std::string description;
  751. get_string(KEY_DESCRIPTION, description, false);
  752. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  753. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  754. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx_gguf.get()));
  755. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx_gguf.get()));
  756. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  757. LOG_INF("%s: n_kv: %d\n", __func__, (int)gguf_get_n_kv(ctx_gguf.get()));
  758. LOG_INF("\n");
  759. }
  760. // modalities
  761. {
  762. get_bool(KEY_HAS_VISION_ENC, has_vision, false);
  763. get_bool(KEY_HAS_AUDIO_ENC, has_audio, false);
  764. if (has_vision) {
  765. LOG_INF("%s: has vision encoder\n", __func__);
  766. }
  767. if (has_audio) {
  768. LOG_INF("%s: has audio encoder\n", __func__);
  769. }
  770. }
  771. // tensors
  772. {
  773. for (int i = 0; i < n_tensors; ++i) {
  774. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  775. const size_t offset = gguf_get_tensor_offset(ctx_gguf.get(), i);
  776. enum ggml_type type = gguf_get_tensor_type(ctx_gguf.get(), i);
  777. ggml_tensor * cur = ggml_get_tensor(meta, name);
  778. size_t tensor_size = ggml_nbytes(cur);
  779. model_size += tensor_size;
  780. LOG_DBG("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  781. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  782. }
  783. }
  784. }
  785. void load_hparams(clip_model & model, clip_modality modality) {
  786. auto & hparams = model.hparams;
  787. std::string log_ffn_op; // for logging
  788. // sanity check
  789. if (modality == CLIP_MODALITY_VISION) {
  790. GGML_ASSERT(has_vision);
  791. } else if (modality == CLIP_MODALITY_AUDIO) {
  792. GGML_ASSERT(has_audio);
  793. }
  794. model.modality = modality;
  795. // projector type
  796. std::string proj_type;
  797. {
  798. // default key
  799. get_string(KEY_PROJ_TYPE, proj_type, false);
  800. // for models with mixed modalities
  801. if (proj_type.empty()) {
  802. if (modality == CLIP_MODALITY_VISION) {
  803. get_string(KEY_VISION_PROJ_TYPE, proj_type, false);
  804. } else if (modality == CLIP_MODALITY_AUDIO) {
  805. get_string(KEY_AUDIO_PROJ_TYPE, proj_type, false);
  806. } else {
  807. GGML_ABORT("unknown modality");
  808. }
  809. }
  810. model.proj_type = clip_projector_type_from_string(proj_type);
  811. if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
  812. throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
  813. }
  814. // correct arch for multimodal models (legacy method)
  815. if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
  816. model.proj_type = modality == CLIP_MODALITY_VISION
  817. ? PROJECTOR_TYPE_QWEN25VL
  818. : PROJECTOR_TYPE_QWEN2A;
  819. }
  820. }
  821. const bool is_vision = model.modality == CLIP_MODALITY_VISION;
  822. const bool is_audio = model.modality == CLIP_MODALITY_AUDIO;
  823. // other hparams
  824. {
  825. const char * prefix = is_vision ? "vision" : "audio";
  826. get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd);
  827. get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head);
  828. get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff);
  829. get_u32(string_format(KEY_N_BLOCK, prefix), hparams.n_layer);
  830. get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim);
  831. get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
  832. if (is_vision) {
  833. get_u32(KEY_IMAGE_SIZE, hparams.image_size);
  834. get_u32(KEY_PATCH_SIZE, hparams.patch_size);
  835. get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
  836. get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
  837. get_u32(KEY_MINICPMV_QUERY_NUM, hparams.minicpmv_query_num, false);
  838. if (hparams.minicpmv_query_num == 0) {
  839. // Fallback to hardcoded values for legacy models
  840. if (hparams.minicpmv_version == 3) {
  841. hparams.minicpmv_query_num = 64;
  842. } else if (hparams.minicpmv_version == 4) {
  843. hparams.minicpmv_query_num = 64;
  844. } else if (hparams.minicpmv_version == 5) {
  845. hparams.minicpmv_query_num = 64;
  846. } else if (hparams.minicpmv_version == 6) {
  847. hparams.minicpmv_query_num = 64;
  848. } else {
  849. hparams.minicpmv_query_num = 96;
  850. }
  851. }
  852. } else if (is_audio) {
  853. get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
  854. // some hparams are unused, but still need to set to avoid issues
  855. hparams.image_size = 0;
  856. hparams.patch_size = 1;
  857. } else {
  858. GGML_ASSERT(false && "unknown modality");
  859. }
  860. // for pinpoints, we need to convert it into a list of resolution candidates
  861. {
  862. std::vector<int> pinpoints;
  863. get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
  864. if (!pinpoints.empty()) {
  865. for (size_t i = 0; i < pinpoints.size(); i += 2) {
  866. hparams.image_res_candidates.push_back({
  867. pinpoints[i],
  868. pinpoints[i+1],
  869. });
  870. }
  871. }
  872. }
  873. // default warmup value
  874. hparams.warmup_image_size = hparams.image_size;
  875. hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
  876. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  877. || model.proj_type == PROJECTOR_TYPE_LDP
  878. || model.proj_type == PROJECTOR_TYPE_LDPV2;
  879. {
  880. bool use_gelu = false;
  881. bool use_silu = false;
  882. get_bool(KEY_USE_GELU, use_gelu, false);
  883. get_bool(KEY_USE_SILU, use_silu, false);
  884. if (use_gelu && use_silu) {
  885. throw std::runtime_error(string_format("%s: both use_gelu and use_silu are set to true\n", __func__));
  886. }
  887. if (use_gelu) {
  888. hparams.ffn_op = FFN_GELU;
  889. log_ffn_op = "gelu";
  890. } else if (use_silu) {
  891. hparams.ffn_op = FFN_SILU;
  892. log_ffn_op = "silu";
  893. } else {
  894. hparams.ffn_op = FFN_GELU_QUICK;
  895. log_ffn_op = "gelu_quick";
  896. }
  897. }
  898. {
  899. std::string mm_patch_merge_type;
  900. get_string(KEY_MM_PATCH_MERGE_TYPE, mm_patch_merge_type, false);
  901. if (mm_patch_merge_type == "spatial_unpad") {
  902. hparams.mm_patch_merge_type = PATCH_MERGE_SPATIAL_UNPAD;
  903. }
  904. }
  905. if (is_vision) {
  906. int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
  907. int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
  908. GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
  909. GGML_ASSERT(idx_std >= 0 && "image_std not found");
  910. const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
  911. const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
  912. for (int i = 0; i < 3; ++i) {
  913. hparams.image_mean[i] = mean_data[i];
  914. hparams.image_std[i] = std_data[i];
  915. }
  916. }
  917. // Load the vision feature layer indices if they are explicitly provided;
  918. // if multiple vision feature layers are present, the values will be concatenated
  919. // to form the final visual features.
  920. // NOTE: gguf conversions should standardize the values of the vision feature layer to
  921. // be non-negative, since we use -1 to mark values as unset here.
  922. std::vector<int> vision_feature_layer;
  923. get_arr_int(KEY_FEATURE_LAYER, vision_feature_layer, false);
  924. // convert std::vector to std::unordered_set
  925. for (auto & layer : vision_feature_layer) {
  926. hparams.vision_feature_layer.insert(layer);
  927. }
  928. // model-specific params
  929. switch (model.proj_type) {
  930. case PROJECTOR_TYPE_MINICPMV:
  931. {
  932. if (hparams.minicpmv_version == 0) {
  933. hparams.minicpmv_version = 2; // default to 2 if not set
  934. }
  935. } break;
  936. case PROJECTOR_TYPE_INTERNVL:
  937. {
  938. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  939. } break;
  940. case PROJECTOR_TYPE_IDEFICS3:
  941. {
  942. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  943. get_u32(KEY_PREPROC_IMAGE_SIZE, hparams.image_longest_edge, false);
  944. } break;
  945. case PROJECTOR_TYPE_LFM2:
  946. {
  947. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  948. // ref: https://huggingface.co/LiquidAI/LFM2-VL-3B/blob/main/preprocessor_config.json
  949. // config above specifies number of tokens after downsampling, while here it is before, relax lowerbound to 64
  950. hparams.set_limit_image_tokens(64, 1024);
  951. } break;
  952. case PROJECTOR_TYPE_PIXTRAL:
  953. case PROJECTOR_TYPE_LIGHTONOCR:
  954. {
  955. // ref: https://huggingface.co/mistral-community/pixtral-12b/blob/main/preprocessor_config.json
  956. // TODO: verify the image_min_tokens
  957. hparams.n_merge = 1; // the original pixtral does not use patch merging
  958. hparams.rope_theta = 10000.0f;
  959. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  960. hparams.set_limit_image_tokens(8, 1024);
  961. hparams.set_warmup_n_tokens(256); // avoid OOM on warmup
  962. } break;
  963. case PROJECTOR_TYPE_KIMIVL:
  964. {
  965. hparams.rope_theta = 10000.0f;
  966. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  967. // TODO: check kimivl preprocessor for exact values
  968. hparams.set_limit_image_tokens(8, 1024);
  969. hparams.set_warmup_n_tokens(256); // avoid OOM on warmup
  970. } break;
  971. case PROJECTOR_TYPE_GEMMA3:
  972. {
  973. // default value (used by all model sizes in gemma 3 family)
  974. // number of patches for each **side** is reduced by a factor of 4
  975. hparams.n_merge = 4;
  976. // test model (tinygemma3) has a different value, we optionally read it
  977. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  978. } break;
  979. case PROJECTOR_TYPE_QWEN2VL:
  980. case PROJECTOR_TYPE_QWEN25VL:
  981. case PROJECTOR_TYPE_QWEN3VL:
  982. {
  983. hparams.n_merge = 2; // default value for Qwen 2 and 2.5
  984. get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
  985. get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern, model.proj_type == PROJECTOR_TYPE_QWEN25VL); // only 2.5 requires it
  986. // ref: https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/blob/main/preprocessor_config.json
  987. hparams.set_limit_image_tokens(8, 4096);
  988. hparams.set_warmup_n_tokens(46*46); // avoid OOM on warmup
  989. const int warn_min_pixels = 1024 * hparams.n_merge * hparams.n_merge * hparams.patch_size * hparams.patch_size;
  990. if (hparams.image_min_pixels < warn_min_pixels) {
  991. LOG_WRN("%s: Qwen-VL models require at minimum 1024 image tokens to function correctly on grounding tasks\n", __func__);
  992. LOG_WRN("%s: if you encounter problems with accuracy, try adding --image-min-tokens 1024\n", __func__);
  993. LOG_WRN("%s: more info: https://github.com/ggml-org/llama.cpp/issues/16842\n\n", __func__);
  994. }
  995. } break;
  996. case PROJECTOR_TYPE_LLAMA4:
  997. {
  998. hparams.rope_theta = 10000.0f;
  999. get_u32(KEY_PROJ_SCALE_FACTOR, hparams.n_merge, false);
  1000. set_llava_uhd_res_candidates(model, 3);
  1001. } break;
  1002. case PROJECTOR_TYPE_ULTRAVOX:
  1003. case PROJECTOR_TYPE_QWEN2A:
  1004. case PROJECTOR_TYPE_GLMA:
  1005. case PROJECTOR_TYPE_VOXTRAL:
  1006. {
  1007. bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX ||
  1008. model.proj_type == PROJECTOR_TYPE_VOXTRAL ||
  1009. model.proj_type == PROJECTOR_TYPE_GLMA;
  1010. get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
  1011. if (hparams.n_mel_bins != 128) {
  1012. throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
  1013. }
  1014. hparams.ffn_op = FFN_GELU_ERF;
  1015. log_ffn_op = "gelu_erf"; // temporary solution for logging
  1016. } break;
  1017. default:
  1018. break;
  1019. }
  1020. // sanity check
  1021. {
  1022. if (hparams.image_max_pixels < hparams.image_min_pixels) {
  1023. throw std::runtime_error(string_format("%s: image_max_pixels (%d) is less than image_min_pixels (%d)\n", __func__, hparams.image_max_pixels, hparams.image_min_pixels));
  1024. }
  1025. }
  1026. LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str());
  1027. LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd);
  1028. LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head);
  1029. LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff);
  1030. LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer);
  1031. LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str());
  1032. LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim);
  1033. if (is_vision) {
  1034. LOG_INF("\n--- vision hparams ---\n");
  1035. LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size);
  1036. LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size);
  1037. LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector);
  1038. LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version);
  1039. LOG_INF("%s: n_merge: %d\n", __func__, hparams.n_merge);
  1040. LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern);
  1041. if (hparams.image_min_pixels > 0) {
  1042. LOG_INF("%s: image_min_pixels: %d%s\n", __func__, hparams.image_min_pixels, hparams.custom_image_min_tokens > 0 ? " (custom value)" : "");
  1043. }
  1044. if (hparams.image_max_pixels > 0) {
  1045. LOG_INF("%s: image_max_pixels: %d%s\n", __func__, hparams.image_max_pixels, hparams.custom_image_max_tokens > 0 ? " (custom value)" : "");
  1046. }
  1047. } else if (is_audio) {
  1048. LOG_INF("\n--- audio hparams ---\n");
  1049. LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins);
  1050. LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor);
  1051. }
  1052. LOG_INF("\n");
  1053. LOG_INF("%s: model size: %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
  1054. LOG_INF("%s: metadata size: %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
  1055. }
  1056. }
  1057. void load_tensors(clip_ctx & ctx_clip) {
  1058. auto & model = ctx_clip.model;
  1059. auto & hparams = model.hparams;
  1060. std::map<std::string, size_t> tensor_offset;
  1061. std::vector<ggml_tensor *> tensors_to_load;
  1062. // TODO @ngxson : support both audio and video in the future
  1063. const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
  1064. // get offsets
  1065. for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
  1066. const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
  1067. tensor_offset[name] = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), i);
  1068. }
  1069. // create data context
  1070. struct ggml_init_params params = {
  1071. /*.mem_size =*/ static_cast<size_t>(gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
  1072. /*.mem_buffer =*/ NULL,
  1073. /*.no_alloc =*/ true,
  1074. };
  1075. ctx_clip.ctx_data.reset(ggml_init(params));
  1076. if (!ctx_clip.ctx_data) {
  1077. throw std::runtime_error(string_format("%s: failed to init ggml context\n", __func__));
  1078. }
  1079. // helper function
  1080. auto get_tensor = [&](const std::string & name, bool required = true) {
  1081. ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
  1082. if (!cur && required) {
  1083. throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  1084. }
  1085. if (cur) {
  1086. tensors_to_load.push_back(cur);
  1087. // add tensors to context
  1088. ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
  1089. ggml_set_name(data_tensor, cur->name);
  1090. cur = data_tensor;
  1091. }
  1092. return cur;
  1093. };
  1094. model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
  1095. model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
  1096. model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
  1097. model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
  1098. model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false);
  1099. model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
  1100. model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false);
  1101. model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
  1102. model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
  1103. // layers
  1104. model.layers.resize(hparams.n_layer);
  1105. for (int il = 0; il < hparams.n_layer; ++il) {
  1106. auto & layer = model.layers[il];
  1107. layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight"), false);
  1108. layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight"), false);
  1109. layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight"), false);
  1110. layer.o_w = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
  1111. layer.qkv_w = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "weight"), false);
  1112. layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
  1113. layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
  1114. layer.ln_1_w = get_tensor(string_format(TN_LN_1, prefix, il, "weight"), false);
  1115. layer.ln_2_w = get_tensor(string_format(TN_LN_2, prefix, il, "weight"), false);
  1116. layer.ls_1_w = get_tensor(string_format(TN_LS_1, prefix, il, "weight"), false); // no bias
  1117. layer.ls_2_w = get_tensor(string_format(TN_LS_2, prefix, il, "weight"), false); // no bias
  1118. layer.k_b = get_tensor(string_format(TN_ATTN_K, prefix, il, "bias"), false);
  1119. layer.q_b = get_tensor(string_format(TN_ATTN_Q, prefix, il, "bias"), false);
  1120. layer.v_b = get_tensor(string_format(TN_ATTN_V, prefix, il, "bias"), false);
  1121. layer.o_b = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
  1122. layer.qkv_b = get_tensor(string_format(TN_ATTN_QKV, prefix, il, "bias"), false);
  1123. layer.ln_1_b = get_tensor(string_format(TN_LN_1, prefix, il, "bias"), false);
  1124. layer.ln_2_b = get_tensor(string_format(TN_LN_2, prefix, il, "bias"), false);
  1125. // ffn
  1126. layer.ff_up_w = get_tensor(string_format(TN_FFN_UP, prefix, il, "weight"));
  1127. layer.ff_up_b = get_tensor(string_format(TN_FFN_UP, prefix, il, "bias"), false);
  1128. layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
  1129. layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"), false);
  1130. layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
  1131. layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"), false);
  1132. // qwen3vl deepstack layer
  1133. layer.deepstack_norm_w = get_tensor(string_format(TN_DEEPSTACK_NORM, il, "weight"), false);
  1134. layer.deepstack_norm_b = get_tensor(string_format(TN_DEEPSTACK_NORM, il, "bias"), false);
  1135. layer.deepstack_fc1_w = get_tensor(string_format(TN_DEEPSTACK_FC1, il, "weight"), false);
  1136. layer.deepstack_fc1_b = get_tensor(string_format(TN_DEEPSTACK_FC1, il, "bias"), false);
  1137. layer.deepstack_fc2_w = get_tensor(string_format(TN_DEEPSTACK_FC2, il, "weight"), false);
  1138. layer.deepstack_fc2_b = get_tensor(string_format(TN_DEEPSTACK_FC2, il, "bias"), false);
  1139. if (layer.has_deepstack()) {
  1140. model.n_deepstack_layers++;
  1141. }
  1142. // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
  1143. // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
  1144. bool is_ffn_swapped = (
  1145. // only old models need this fix
  1146. model.proj_type == PROJECTOR_TYPE_MLP
  1147. || model.proj_type == PROJECTOR_TYPE_MLP_NORM
  1148. || model.proj_type == PROJECTOR_TYPE_LDP
  1149. || model.proj_type == PROJECTOR_TYPE_LDPV2
  1150. || model.proj_type == PROJECTOR_TYPE_QWEN2VL
  1151. || model.proj_type == PROJECTOR_TYPE_QWEN25VL
  1152. || model.proj_type == PROJECTOR_TYPE_GLM_EDGE
  1153. || model.proj_type == PROJECTOR_TYPE_GEMMA3
  1154. || model.proj_type == PROJECTOR_TYPE_IDEFICS3
  1155. || model.proj_type == PROJECTOR_TYPE_MINICPMV
  1156. ) && layer.ff_up_w && layer.ff_down_w && layer.ff_down_w->ne[0] == hparams.n_embd;
  1157. if (is_ffn_swapped) {
  1158. // swap up and down weights
  1159. ggml_tensor * tmp = layer.ff_up_w;
  1160. layer.ff_up_w = layer.ff_down_w;
  1161. layer.ff_down_w = tmp;
  1162. // swap up and down biases
  1163. tmp = layer.ff_up_b;
  1164. layer.ff_up_b = layer.ff_down_b;
  1165. layer.ff_down_b = tmp;
  1166. if (il == 0) {
  1167. LOG_WRN("%s: ffn up/down are swapped\n", __func__);
  1168. }
  1169. }
  1170. }
  1171. switch (model.proj_type) {
  1172. case PROJECTOR_TYPE_MLP:
  1173. case PROJECTOR_TYPE_MLP_NORM:
  1174. {
  1175. // LLaVA projection
  1176. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
  1177. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
  1178. // Yi-type llava
  1179. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
  1180. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1181. // missing in Yi-type llava
  1182. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
  1183. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1184. // Yi-type llava
  1185. model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
  1186. model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
  1187. model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
  1188. model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
  1189. if (model.mm_3_w) {
  1190. // TODO: this is a hack to support Yi-type llava
  1191. model.proj_type = PROJECTOR_TYPE_MLP_NORM;
  1192. }
  1193. model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
  1194. } break;
  1195. case PROJECTOR_TYPE_LDP:
  1196. {
  1197. // MobileVLM projection
  1198. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1199. model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1200. model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1201. model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1202. model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1203. model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1204. model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1205. model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1206. model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1207. model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1208. model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1209. model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1210. model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1211. model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1212. model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1213. model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1214. model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  1215. model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  1216. model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  1217. model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  1218. model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  1219. model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  1220. model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  1221. model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  1222. } break;
  1223. case PROJECTOR_TYPE_LDPV2:
  1224. {
  1225. // MobilVLM_V2 projection
  1226. model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1227. model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1228. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1229. model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
  1230. model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
  1231. model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
  1232. } break;
  1233. case PROJECTOR_TYPE_MINICPMV:
  1234. {
  1235. // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  1236. model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
  1237. model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
  1238. model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
  1239. model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
  1240. model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
  1241. model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
  1242. model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
  1243. model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
  1244. model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
  1245. model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
  1246. model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
  1247. model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
  1248. model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
  1249. model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
  1250. model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
  1251. model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
  1252. model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
  1253. model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
  1254. } break;
  1255. case PROJECTOR_TYPE_GLM_EDGE:
  1256. {
  1257. model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
  1258. model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
  1259. model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
  1260. model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
  1261. model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
  1262. model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
  1263. model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
  1264. model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
  1265. model.mm_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
  1266. model.mm_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
  1267. } break;
  1268. case PROJECTOR_TYPE_QWEN2VL:
  1269. case PROJECTOR_TYPE_QWEN25VL:
  1270. {
  1271. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1272. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1273. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1274. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1275. } break;
  1276. case PROJECTOR_TYPE_QWEN3VL:
  1277. {
  1278. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1279. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1280. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1281. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1282. } break;
  1283. case PROJECTOR_TYPE_GEMMA3:
  1284. {
  1285. model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
  1286. model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
  1287. } break;
  1288. case PROJECTOR_TYPE_IDEFICS3:
  1289. {
  1290. model.projection = get_tensor(TN_MM_PROJECTOR);
  1291. } break;
  1292. case PROJECTOR_TYPE_LFM2:
  1293. case PROJECTOR_TYPE_KIMIVL:
  1294. {
  1295. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
  1296. model.mm_input_norm_b = get_tensor(TN_MM_INP_NORM_B);
  1297. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1298. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  1299. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1300. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
  1301. } break;
  1302. case PROJECTOR_TYPE_PIXTRAL:
  1303. {
  1304. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1305. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1306. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1307. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1308. // [IMG_BREAK] token embedding
  1309. model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
  1310. // for mistral small 3.1
  1311. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  1312. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  1313. } break;
  1314. case PROJECTOR_TYPE_LIGHTONOCR:
  1315. {
  1316. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1317. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
  1318. model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
  1319. model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
  1320. model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
  1321. model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
  1322. } break;
  1323. case PROJECTOR_TYPE_ULTRAVOX:
  1324. {
  1325. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1326. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1327. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1328. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1329. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1330. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  1331. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  1332. model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
  1333. } break;
  1334. case PROJECTOR_TYPE_QWEN2A:
  1335. {
  1336. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1337. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1338. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1339. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1340. model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
  1341. model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
  1342. } break;
  1343. case PROJECTOR_TYPE_VOXTRAL:
  1344. {
  1345. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1346. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1347. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1348. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1349. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1350. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  1351. } break;
  1352. case PROJECTOR_TYPE_INTERNVL:
  1353. {
  1354. model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1355. model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1356. model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1357. model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1358. model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1359. model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1360. } break;
  1361. case PROJECTOR_TYPE_GLMA:
  1362. {
  1363. model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
  1364. model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
  1365. model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
  1366. model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
  1367. model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
  1368. model.mm_1_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "bias"));
  1369. model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
  1370. model.mm_2_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "bias"));
  1371. model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
  1372. model.mm_norm_pre_b = get_tensor(string_format(TN_MM_NORM_PRE, "bias"));
  1373. model.mm_boi = get_tensor(string_format(TN_TOK_BOI, "weight"));
  1374. model.mm_eoi = get_tensor(string_format(TN_TOK_EOI, "weight"));
  1375. } break;
  1376. case PROJECTOR_TYPE_LLAMA4:
  1377. {
  1378. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  1379. model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1380. model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1381. } break;
  1382. case PROJECTOR_TYPE_COGVLM:
  1383. {
  1384. model.mm_model_proj = get_tensor(TN_MM_PROJECTOR);
  1385. model.mm_post_fc_norm_w = get_tensor(string_format(TN_MM_POST_FC_NORM, "weight"));
  1386. model.mm_post_fc_norm_b = get_tensor(string_format(TN_MM_POST_FC_NORM, "bias"));
  1387. model.mm_h_to_4h_w = get_tensor(string_format(TN_MM_H_TO_4H, "weight"));
  1388. model.mm_gate_w = get_tensor(string_format(TN_MM_GATE, "weight"));
  1389. model.mm_4h_to_h_w = get_tensor(string_format(TN_MM_4H_TO_H, "weight"));
  1390. model.mm_boi = get_tensor(TN_TOK_BOI);
  1391. model.mm_eoi = get_tensor(TN_TOK_EOI);
  1392. } break;
  1393. case PROJECTOR_TYPE_JANUS_PRO:
  1394. {
  1395. model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
  1396. model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
  1397. model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
  1398. model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
  1399. } break;
  1400. default:
  1401. GGML_ASSERT(false && "unknown projector type");
  1402. }
  1403. // load data
  1404. {
  1405. std::vector<uint8_t> read_buf;
  1406. auto fin = std::ifstream(fname, std::ios::binary);
  1407. if (!fin) {
  1408. throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str()));
  1409. }
  1410. // alloc memory and offload data
  1411. ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
  1412. ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
  1413. ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  1414. for (auto & t : tensors_to_load) {
  1415. ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
  1416. const size_t offset = tensor_offset[t->name];
  1417. fin.seekg(offset, std::ios::beg);
  1418. if (!fin) {
  1419. throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
  1420. }
  1421. size_t num_bytes = ggml_nbytes(cur);
  1422. if (ggml_backend_buft_is_host(buft)) {
  1423. // for the CPU and Metal backend, we can read directly into the tensor
  1424. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  1425. } else {
  1426. // read into a temporary buffer first, then copy to device memory
  1427. read_buf.resize(num_bytes);
  1428. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  1429. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  1430. }
  1431. }
  1432. fin.close();
  1433. LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
  1434. }
  1435. }
  1436. struct support_info_op {
  1437. ggml_tensor * op;
  1438. // true if the op runs on the accelerated ctx_clip.backend
  1439. bool is_accel = true;
  1440. };
  1441. struct support_info_graph {
  1442. // whether the clip_ctx.backend supports flash attention
  1443. bool fattn = true;
  1444. ggml_tensor * fattn_op = nullptr; // for debugging
  1445. std::vector<support_info_op> ops;
  1446. };
  1447. static void warmup(clip_ctx & ctx_clip) {
  1448. // create a fake batch
  1449. const auto & hparams = ctx_clip.model.hparams;
  1450. clip_image_f32_batch batch;
  1451. clip_image_f32_ptr img(clip_image_f32_init());
  1452. if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
  1453. img->nx = hparams.warmup_image_size;
  1454. img->ny = hparams.warmup_image_size;
  1455. LOG_INF("%s: warmup with image size = %d x %d\n", __func__, img->nx, img->ny);
  1456. } else {
  1457. img->nx = hparams.warmup_audio_size;
  1458. img->ny = hparams.n_mel_bins;
  1459. LOG_INF("%s: warmup with audio size = %d\n", __func__, img->nx);
  1460. }
  1461. batch.entries.push_back(std::move(img));
  1462. warmup(ctx_clip, batch);
  1463. }
  1464. static void warmup(clip_ctx & ctx_clip, const clip_image_f32_batch & batch) {
  1465. support_info_graph info;
  1466. if (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_AUTO) {
  1467. // try to enable flash attention to see if it's supported
  1468. ctx_clip.flash_attn_type = CLIP_FLASH_ATTN_TYPE_ENABLED;
  1469. info = alloc_compute_meta(ctx_clip, batch);
  1470. if (!info.fattn && info.fattn_op) {
  1471. auto op = info.fattn_op;
  1472. LOG_WRN("%s: *****************************************************************\n", __func__);
  1473. LOG_WRN("%s: WARNING: flash attention not supported by %s, memory usage will increase\n", __func__, ggml_backend_name(ctx_clip.backend));
  1474. LOG_WRN("%s: op params: \n", __func__);
  1475. static auto print_shape = [](const char * fn, const char * name, ggml_tensor * t) {
  1476. LOG_WRN("%s: %s: type = %s, ne = [%d %d %d %d], nb = [%d %d %d %d]\n", fn,
  1477. name, ggml_type_name(t->type),
  1478. t->ne[0], t->ne[1], t->ne[2], t->ne[3],
  1479. t->nb[0], t->nb[1], t->nb[2], t->nb[3]);
  1480. };
  1481. print_shape(__func__, " dst", op);
  1482. print_shape(__func__, "src0", op->src[0]);
  1483. print_shape(__func__, "src1", op->src[1]);
  1484. print_shape(__func__, "src2", op->src[2]);
  1485. LOG_WRN("%s: please report this on github as an issue\n", __func__);
  1486. LOG_WRN("%s: *****************************************************************\n", __func__);
  1487. ctx_clip.flash_attn_type = CLIP_FLASH_ATTN_TYPE_DISABLED;
  1488. alloc_compute_meta(ctx_clip, batch);
  1489. }
  1490. } else {
  1491. info = alloc_compute_meta(ctx_clip, batch);
  1492. if (!info.fattn && ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
  1493. LOG_WRN("%s: flash attention is not supported by the current backend; falling back to CPU (performance will be degraded)\n", __func__);
  1494. }
  1495. }
  1496. ctx_clip.is_allocated = true; // mark buffers as allocated
  1497. LOG_INF("%s: flash attention is %s\n", __func__,
  1498. (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) ? "enabled" : "disabled");
  1499. // print ops that are not supported by the GPU backend (if there is one)
  1500. if (ctx_clip.backend && ctx_clip.backend != ctx_clip.backend_cpu) {
  1501. std::vector<support_info_op> unsupported_ops;
  1502. for (const auto & op : info.ops) {
  1503. if (!op.is_accel) {
  1504. unsupported_ops.push_back(op);
  1505. }
  1506. }
  1507. if (!unsupported_ops.empty()) {
  1508. LOG_WRN("%s: *****************************************************************\n", __func__);
  1509. LOG_WRN("%s: WARNING: the CLIP graph uses unsupported operators by the backend\n", __func__);
  1510. LOG_WRN("%s: the performance will be suboptimal \n", __func__);
  1511. LOG_WRN("%s: list of unsupported ops (backend=%s):\n", __func__, ggml_backend_name(ctx_clip.backend));
  1512. for (const auto & op : unsupported_ops) {
  1513. LOG_WRN("%s: %16s: type = %s, ne = [%d %d %d %d]\n", __func__,
  1514. ggml_op_name(op.op->op),
  1515. ggml_type_name(op.op->type),
  1516. op.op->ne[0], op.op->ne[1], op.op->ne[2], op.op->ne[3]);
  1517. }
  1518. LOG_WRN("%s: flash attention is %s\n", __func__,
  1519. (ctx_clip.flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) ? "enabled" : "disabled");
  1520. LOG_WRN("%s: please report this on github as an issue\n", __func__);
  1521. LOG_WRN("%s: ref: https://github.com/ggml-org/llama.cpp/pull/16837#issuecomment-3461676118\n", __func__);
  1522. LOG_WRN("%s: *****************************************************************\n", __func__);
  1523. }
  1524. }
  1525. }
  1526. static support_info_graph alloc_compute_meta(clip_ctx & ctx_clip, const clip_image_f32_batch & batch) {
  1527. ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
  1528. ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
  1529. ggml_backend_sched_reserve(ctx_clip.sched.get(), gf);
  1530. for (size_t i = 0; i < ctx_clip.backend_ptrs.size(); ++i) {
  1531. ggml_backend_t backend = ctx_clip.backend_ptrs[i];
  1532. ggml_backend_buffer_type_t buft = ctx_clip.backend_buft[i];
  1533. size_t size = ggml_backend_sched_get_buffer_size(ctx_clip.sched.get(), backend);
  1534. if (size > 1) {
  1535. LOG_INF("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  1536. ggml_backend_buft_name(buft),
  1537. size / 1024.0 / 1024.0);
  1538. }
  1539. }
  1540. const int n_splits = ggml_backend_sched_get_n_splits(ctx_clip.sched.get());
  1541. const int n_nodes = ggml_graph_n_nodes(gf);
  1542. LOG_INF("%s: graph splits = %d, nodes = %d\n", __func__, n_splits, n_nodes);
  1543. support_info_graph res {
  1544. /*.fattn = */ true,
  1545. /*.fattn_op = */ nullptr,
  1546. /*.ops = */ {},
  1547. };
  1548. // check op support
  1549. for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
  1550. ggml_tensor * node = ggml_graph_node(gf, i);
  1551. res.ops.push_back({node, true});
  1552. if (!ggml_backend_supports_op(ctx_clip.backend, node)) {
  1553. res.ops.back().is_accel = false;
  1554. if (node->op == GGML_OP_FLASH_ATTN_EXT) {
  1555. res.fattn = false;
  1556. res.fattn_op = node;
  1557. }
  1558. }
  1559. }
  1560. return res;
  1561. }
  1562. void get_bool(const std::string & key, bool & output, bool required = true) const {
  1563. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1564. if (i < 0) {
  1565. if (required) {
  1566. throw std::runtime_error("Key not found: " + key);
  1567. }
  1568. return;
  1569. }
  1570. output = gguf_get_val_bool(ctx_gguf.get(), i);
  1571. }
  1572. void get_i32(const std::string & key, int & output, bool required = true) const {
  1573. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1574. if (i < 0) {
  1575. if (required) {
  1576. throw std::runtime_error("Key not found: " + key);
  1577. }
  1578. return;
  1579. }
  1580. output = gguf_get_val_i32(ctx_gguf.get(), i);
  1581. }
  1582. void get_u32(const std::string & key, int & output, bool required = true) const {
  1583. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1584. if (i < 0) {
  1585. if (required) {
  1586. throw std::runtime_error("Key not found: " + key);
  1587. }
  1588. return;
  1589. }
  1590. output = gguf_get_val_u32(ctx_gguf.get(), i);
  1591. }
  1592. void get_f32(const std::string & key, float & output, bool required = true) const {
  1593. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1594. if (i < 0) {
  1595. if (required) {
  1596. throw std::runtime_error("Key not found: " + key);
  1597. }
  1598. return;
  1599. }
  1600. output = gguf_get_val_f32(ctx_gguf.get(), i);
  1601. }
  1602. void get_string(const std::string & key, std::string & output, bool required = true) const {
  1603. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1604. if (i < 0) {
  1605. if (required) {
  1606. throw std::runtime_error("Key not found: " + key);
  1607. }
  1608. return;
  1609. }
  1610. output = std::string(gguf_get_val_str(ctx_gguf.get(), i));
  1611. }
  1612. void get_arr_int(const std::string & key, std::vector<int> & output, bool required = true) const {
  1613. const int i = gguf_find_key(ctx_gguf.get(), key.c_str());
  1614. if (i < 0) {
  1615. if (required) {
  1616. throw std::runtime_error("Key not found: " + key);
  1617. }
  1618. return;
  1619. }
  1620. int n = gguf_get_arr_n(ctx_gguf.get(), i);
  1621. output.resize(n);
  1622. const int32_t * values = (const int32_t *)gguf_get_arr_data(ctx_gguf.get(), i);
  1623. for (int i = 0; i < n; ++i) {
  1624. output[i] = values[i];
  1625. }
  1626. }
  1627. static void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
  1628. auto & hparams = model.hparams;
  1629. for (int x = 1; x <= max_patches_per_side; x++) {
  1630. for (int y = 1; y <= max_patches_per_side; y++) {
  1631. if (x == 1 && y == 1) {
  1632. continue; // skip the first point
  1633. }
  1634. hparams.image_res_candidates.push_back(clip_image_size{
  1635. x*hparams.image_size,
  1636. y*hparams.image_size,
  1637. });
  1638. }
  1639. }
  1640. }
  1641. };
  1642. struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
  1643. clip_ctx * ctx_vision = nullptr;
  1644. clip_ctx * ctx_audio = nullptr;
  1645. try {
  1646. clip_model_loader loader(fname);
  1647. if (loader.has_vision) {
  1648. ctx_vision = new clip_ctx(ctx_params);
  1649. loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
  1650. loader.load_tensors(*ctx_vision);
  1651. if (ctx_params.warmup) {
  1652. loader.warmup(*ctx_vision);
  1653. }
  1654. }
  1655. if (loader.has_audio) {
  1656. ctx_audio = new clip_ctx(ctx_params);
  1657. loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
  1658. loader.load_tensors(*ctx_audio);
  1659. if (ctx_params.warmup) {
  1660. loader.warmup(*ctx_audio);
  1661. }
  1662. }
  1663. } catch (const std::exception & e) {
  1664. LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
  1665. delete ctx_vision;
  1666. delete ctx_audio;
  1667. return {nullptr, nullptr};
  1668. }
  1669. return {ctx_vision, ctx_audio};
  1670. }
  1671. struct clip_image_size * clip_image_size_init() {
  1672. struct clip_image_size * load_image_size = new struct clip_image_size();
  1673. load_image_size->width = 448;
  1674. load_image_size->height = 448;
  1675. return load_image_size;
  1676. }
  1677. struct clip_image_u8 * clip_image_u8_init() {
  1678. return new clip_image_u8();
  1679. }
  1680. struct clip_image_f32 * clip_image_f32_init() {
  1681. return new clip_image_f32();
  1682. }
  1683. struct clip_image_f32_batch * clip_image_f32_batch_init() {
  1684. return new clip_image_f32_batch();
  1685. }
  1686. unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny) {
  1687. if (nx) *nx = img->nx;
  1688. if (ny) *ny = img->ny;
  1689. return img->buf.data();
  1690. }
  1691. void clip_image_size_free(struct clip_image_size * load_image_size) {
  1692. if (load_image_size == nullptr) {
  1693. return;
  1694. }
  1695. delete load_image_size;
  1696. }
  1697. void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
  1698. void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
  1699. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { delete batch; }
  1700. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { delete batch; }
  1701. size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch) {
  1702. return batch->entries.size();
  1703. }
  1704. size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx) {
  1705. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1706. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1707. return 0;
  1708. }
  1709. return batch->entries[idx]->nx;
  1710. }
  1711. size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx) {
  1712. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1713. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1714. return 0;
  1715. }
  1716. return batch->entries[idx]->ny;
  1717. }
  1718. clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx) {
  1719. if (idx < 0 || idx >= (int)batch->entries.size()) {
  1720. LOG_ERR("%s: invalid index %d\n", __func__, idx);
  1721. return nullptr;
  1722. }
  1723. return batch->entries[idx].get();
  1724. }
  1725. void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, clip_image_u8 * img) {
  1726. img->nx = nx;
  1727. img->ny = ny;
  1728. img->buf.resize(3 * nx * ny);
  1729. memcpy(img->buf.data(), rgb_pixels, img->buf.size());
  1730. }
  1731. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  1732. static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
  1733. dst.nx = src.nx;
  1734. dst.ny = src.ny;
  1735. dst.buf.resize(src.buf.size());
  1736. // TODO @ngxson : seems like this could be done more efficiently on cgraph
  1737. for (size_t i = 0; i < src.buf.size(); ++i) {
  1738. int c = i % 3; // rgb
  1739. dst.buf[i] = (static_cast<float>(src.buf[i]) / 255.0f - mean[c]) / std[c];
  1740. }
  1741. }
  1742. // set of tools to manupulate images
  1743. // in the future, we can have HW acceleration by allowing this struct to access 3rd party lib like imagick or opencv
  1744. struct img_tool {
  1745. enum resize_algo {
  1746. RESIZE_ALGO_BILINEAR,
  1747. RESIZE_ALGO_BICUBIC,
  1748. // RESIZE_ALGO_LANCZOS, // TODO
  1749. };
  1750. static void resize(
  1751. const clip_image_u8 & src,
  1752. clip_image_u8 & dst,
  1753. const clip_image_size & target_resolution,
  1754. resize_algo algo,
  1755. bool add_padding = true, // TODO: define the behavior for add_padding = false
  1756. std::array<uint8_t, 3> pad_color = {0, 0, 0}) {
  1757. dst.nx = target_resolution.width;
  1758. dst.ny = target_resolution.height;
  1759. dst.buf.resize(3 * dst.nx * dst.ny);
  1760. if (dst.nx == src.nx && dst.ny == src.ny) {
  1761. // no resize needed, simple copy
  1762. dst.buf = src.buf;
  1763. return;
  1764. }
  1765. if (!add_padding) {
  1766. // direct resize
  1767. switch (algo) {
  1768. case RESIZE_ALGO_BILINEAR:
  1769. resize_bilinear(src, dst, target_resolution.width, target_resolution.height);
  1770. break;
  1771. case RESIZE_ALGO_BICUBIC:
  1772. resize_bicubic(src, dst, target_resolution.width, target_resolution.height);
  1773. break;
  1774. default:
  1775. throw std::runtime_error("Unsupported resize algorithm");
  1776. }
  1777. } else {
  1778. // resize with padding
  1779. clip_image_u8 resized_image;
  1780. float scale_w = static_cast<float>(target_resolution.width) / src.nx;
  1781. float scale_h = static_cast<float>(target_resolution.height) / src.ny;
  1782. float scale = std::min(scale_w, scale_h);
  1783. int new_width = std::min(static_cast<int>(std::ceil(src.nx * scale)), target_resolution.width);
  1784. int new_height = std::min(static_cast<int>(std::ceil(src.ny * scale)), target_resolution.height);
  1785. switch (algo) {
  1786. case RESIZE_ALGO_BILINEAR:
  1787. resize_bilinear(src, resized_image, new_width, new_height);
  1788. break;
  1789. case RESIZE_ALGO_BICUBIC:
  1790. resize_bicubic(src, resized_image, new_width, new_height);
  1791. break;
  1792. default:
  1793. throw std::runtime_error("Unsupported resize algorithm");
  1794. }
  1795. // fill dst with pad_color
  1796. fill(dst, pad_color);
  1797. int offset_x = (target_resolution.width - new_width) / 2;
  1798. int offset_y = (target_resolution.height - new_height) / 2;
  1799. composite(dst, resized_image, offset_x, offset_y);
  1800. }
  1801. }
  1802. static void crop(const clip_image_u8 & image, clip_image_u8 & dst, int x, int y, int w, int h) {
  1803. dst.nx = w;
  1804. dst.ny = h;
  1805. dst.buf.resize(3 * w * h);
  1806. for (int i = 0; i < h; ++i) {
  1807. for (int j = 0; j < w; ++j) {
  1808. int src_idx = 3 * ((y + i)*image.nx + (x + j));
  1809. int dst_idx = 3 * (i*w + j);
  1810. dst.buf[dst_idx] = image.buf[src_idx];
  1811. dst.buf[dst_idx + 1] = image.buf[src_idx + 1];
  1812. dst.buf[dst_idx + 2] = image.buf[src_idx + 2];
  1813. }
  1814. }
  1815. }
  1816. // calculate the size of the **resized** image, while preserving the aspect ratio
  1817. // the calculated size will be aligned to the nearest multiple of align_size
  1818. // if H or W size is larger than longest_edge, it will be resized to longest_edge
  1819. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int longest_edge) {
  1820. GGML_ASSERT(align_size > 0);
  1821. if (inp_size.width <= 0 || inp_size.height <= 0 || longest_edge <= 0) {
  1822. return {0, 0};
  1823. }
  1824. float scale = std::min(static_cast<float>(longest_edge) / inp_size.width,
  1825. static_cast<float>(longest_edge) / inp_size.height);
  1826. float target_width_f = static_cast<float>(inp_size.width) * scale;
  1827. float target_height_f = static_cast<float>(inp_size.height) * scale;
  1828. auto ceil_by_factor = [f = align_size](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  1829. int aligned_width = ceil_by_factor(target_width_f);
  1830. int aligned_height = ceil_by_factor(target_height_f);
  1831. return {aligned_width, aligned_height};
  1832. }
  1833. // calculate the size of the **resized** image, while preserving the aspect ratio
  1834. // the calculated size will have min_pixels <= W*H <= max_pixels
  1835. // this is referred as "smart_resize" in transformers code
  1836. static clip_image_size calc_size_preserved_ratio(const clip_image_size & inp_size, const int align_size, const int min_pixels, const int max_pixels) {
  1837. GGML_ASSERT(align_size > 0);
  1838. const int width = inp_size.width;
  1839. const int height = inp_size.height;
  1840. auto round_by_factor = [f = align_size](float x) { return static_cast<int>(std::round(x / static_cast<float>(f))) * f; };
  1841. auto ceil_by_factor = [f = align_size](float x) { return static_cast<int>(std::ceil(x / static_cast<float>(f))) * f; };
  1842. auto floor_by_factor = [f = align_size](float x) { return static_cast<int>(std::floor(x / static_cast<float>(f))) * f; };
  1843. // always align up first
  1844. int h_bar = std::max(align_size, round_by_factor(height));
  1845. int w_bar = std::max(align_size, round_by_factor(width));
  1846. if (h_bar * w_bar > max_pixels) {
  1847. const auto beta = std::sqrt(static_cast<float>(height * width) / max_pixels);
  1848. h_bar = std::max(align_size, floor_by_factor(height / beta));
  1849. w_bar = std::max(align_size, floor_by_factor(width / beta));
  1850. } else if (h_bar * w_bar < min_pixels) {
  1851. const auto beta = std::sqrt(static_cast<float>(min_pixels) / (height * width));
  1852. h_bar = ceil_by_factor(height * beta);
  1853. w_bar = ceil_by_factor(width * beta);
  1854. }
  1855. return {w_bar, h_bar};
  1856. }
  1857. // draw src image into dst image at offset (offset_x, offset_y)
  1858. static void composite(clip_image_u8 & dst, const clip_image_u8 & src, int offset_x, int offset_y) {
  1859. for (int y = 0; y < src.ny; ++y) {
  1860. for (int x = 0; x < src.nx; ++x) {
  1861. int dx = x + offset_x;
  1862. int dy = y + offset_y;
  1863. // skip pixels that would be out of bounds in the destination
  1864. if (dx < 0 || dy < 0 || dx >= dst.nx || dy >= dst.ny) {
  1865. continue;
  1866. }
  1867. size_t dst_idx = 3 * (static_cast<size_t>(dy) * dst.nx + static_cast<size_t>(dx));
  1868. size_t src_idx = 3 * (static_cast<size_t>(y) * src.nx + static_cast<size_t>(x));
  1869. dst.buf[dst_idx + 0] = src.buf[src_idx + 0];
  1870. dst.buf[dst_idx + 1] = src.buf[src_idx + 1];
  1871. dst.buf[dst_idx + 2] = src.buf[src_idx + 2];
  1872. }
  1873. }
  1874. }
  1875. // fill the image with a solid color
  1876. static void fill(clip_image_u8 & img, const std::array<uint8_t, 3> & color) {
  1877. for (size_t i = 0; i < img.buf.size(); i += 3) {
  1878. img.buf[i] = color[0];
  1879. img.buf[i + 1] = color[1];
  1880. img.buf[i + 2] = color[2];
  1881. }
  1882. }
  1883. private:
  1884. // Bilinear resize function
  1885. static void resize_bilinear(const clip_image_u8 & src, clip_image_u8 & dst, int target_width, int target_height) {
  1886. dst.nx = target_width;
  1887. dst.ny = target_height;
  1888. dst.buf.resize(3 * target_width * target_height);
  1889. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  1890. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  1891. for (int y = 0; y < target_height; y++) {
  1892. for (int x = 0; x < target_width; x++) {
  1893. float px = x_ratio * x;
  1894. float py = y_ratio * y;
  1895. int x_floor = static_cast<int>(px);
  1896. int y_floor = static_cast<int>(py);
  1897. float x_lerp = px - x_floor;
  1898. float y_lerp = py - y_floor;
  1899. for (int c = 0; c < 3; c++) {
  1900. float top = lerp(
  1901. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  1902. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  1903. x_lerp
  1904. );
  1905. float bottom = lerp(
  1906. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  1907. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  1908. x_lerp
  1909. );
  1910. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
  1911. }
  1912. }
  1913. }
  1914. }
  1915. // Bicubic resize function
  1916. // part of image will be cropped if the aspect ratio is different
  1917. static bool resize_bicubic(const clip_image_u8 & img, clip_image_u8 & dst, int target_width, int target_height) {
  1918. const int nx = img.nx;
  1919. const int ny = img.ny;
  1920. dst.nx = target_width;
  1921. dst.ny = target_height;
  1922. dst.buf.resize(3 * target_width * target_height);
  1923. float Cc;
  1924. float C[5] = {};
  1925. float d0, d2, d3, a0, a1, a2, a3;
  1926. int i, j, k, jj;
  1927. int x, y;
  1928. float dx, dy;
  1929. float tx, ty;
  1930. tx = (float)nx / (float)target_width;
  1931. ty = (float)ny / (float)target_height;
  1932. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  1933. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  1934. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  1935. for (i = 0; i < target_height; i++) {
  1936. for (j = 0; j < target_width; j++) {
  1937. x = (int)(tx * j);
  1938. y = (int)(ty * i);
  1939. dx = tx * j - x;
  1940. dy = ty * i - y;
  1941. for (k = 0; k < 3; k++) {
  1942. for (jj = 0; jj <= 3; jj++) {
  1943. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1944. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1945. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1946. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1947. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1948. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1949. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1950. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  1951. d0 = C[0] - C[1];
  1952. d2 = C[2] - C[1];
  1953. d3 = C[3] - C[1];
  1954. a0 = C[1];
  1955. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1956. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1957. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1958. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  1959. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  1960. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  1961. }
  1962. }
  1963. }
  1964. }
  1965. return true;
  1966. }
  1967. static inline int clip(int x, int lower, int upper) {
  1968. return std::max(lower, std::min(x, upper));
  1969. }
  1970. // Linear interpolation between two points
  1971. static inline float lerp(float s, float e, float t) {
  1972. return s + (e - s) * t;
  1973. }
  1974. };
  1975. /**
  1976. * implementation of LLaVA-UHD:
  1977. * - https://arxiv.org/pdf/2403.11703
  1978. * - https://github.com/thunlp/LLaVA-UHD
  1979. * - https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  1980. *
  1981. * overview:
  1982. * - an image always have a single overview (downscaled image)
  1983. * - an image can have 0 or multiple slices, depending on the image size
  1984. * - each slice can then be considered as a separate image
  1985. *
  1986. * for example:
  1987. *
  1988. * [overview] --> [slice 1] --> [slice 2]
  1989. * | |
  1990. * +--> [slice 3] --> [slice 4]
  1991. */
  1992. struct llava_uhd {
  1993. struct slice_coordinates {
  1994. int x;
  1995. int y;
  1996. clip_image_size size;
  1997. };
  1998. struct slice_instructions {
  1999. clip_image_size overview_size; // size of downscaled image
  2000. clip_image_size refined_size; // size of image right before slicing (must be multiple of slice size)
  2001. clip_image_size grid_size; // grid_size.width * grid_size.height = number of slices
  2002. std::vector<slice_coordinates> slices;
  2003. img_tool::resize_algo interpolation_overview = img_tool::RESIZE_ALGO_BILINEAR;
  2004. bool padding_overview = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2005. std::array<uint8_t, 3> pad_color_overview = {0, 0, 0};
  2006. img_tool::resize_algo interpolation_refined = img_tool::RESIZE_ALGO_BICUBIC;
  2007. bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6)
  2008. std::array<uint8_t, 3> pad_color_refined = {0, 0, 0};
  2009. };
  2010. static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
  2011. slice_instructions res;
  2012. const int patch_size = clip_get_patch_size(ctx);
  2013. const int slice_size = clip_get_image_size(ctx);
  2014. const int original_width = original_size.width;
  2015. const int original_height = original_size.height;
  2016. const bool has_slices = original_size.width > slice_size || original_size.height > slice_size;
  2017. const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
  2018. if (!has_slices) {
  2019. // skip slicing logic
  2020. res.overview_size = clip_image_size{slice_size, slice_size};
  2021. res.refined_size = clip_image_size{0, 0};
  2022. res.grid_size = clip_image_size{0, 0};
  2023. return res;
  2024. }
  2025. if (has_pinpoints) {
  2026. // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
  2027. auto refine_size = llava_uhd::select_best_resolution(
  2028. original_size,
  2029. ctx->model.hparams.image_res_candidates);
  2030. res.overview_size = clip_image_size{slice_size, slice_size};
  2031. res.refined_size = refine_size;
  2032. res.grid_size = clip_image_size{0, 0};
  2033. res.padding_refined = true;
  2034. res.interpolation_refined = img_tool::RESIZE_ALGO_BILINEAR; // preserve old behavior when padding
  2035. LOG_DBG("%s: using pinpoints for slicing\n", __func__);
  2036. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
  2037. __func__, original_width, original_height,
  2038. res.overview_size.width, res.overview_size.height,
  2039. res.refined_size.width, res.refined_size.height);
  2040. for (int y = 0; y < refine_size.height; y += slice_size) {
  2041. for (int x = 0; x < refine_size.width; x += slice_size) {
  2042. slice_coordinates slice;
  2043. slice.x = x;
  2044. slice.y = y;
  2045. slice.size.width = std::min(slice_size, refine_size.width - x);
  2046. slice.size.height = std::min(slice_size, refine_size.height - y);
  2047. res.slices.push_back(slice);
  2048. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2049. __func__, (int)res.slices.size() - 1,
  2050. slice.x, slice.y, slice.size.width, slice.size.height);
  2051. }
  2052. }
  2053. res.grid_size.height = refine_size.height / slice_size;
  2054. res.grid_size.width = refine_size.width / slice_size;
  2055. LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
  2056. return res;
  2057. }
  2058. // no pinpoints, dynamically calculate the grid size (e.g. minicpmv)
  2059. auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices);
  2060. res.overview_size = best_size;
  2061. {
  2062. const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
  2063. const float log_ratio = log((float)original_width / original_height);
  2064. const float ratio = (float)original_width * original_height / (slice_size * slice_size);
  2065. const int multiple = fmin(ceil(ratio), max_slice_nums);
  2066. auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio);
  2067. auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
  2068. res.grid_size = best_grid;
  2069. res.refined_size = refine_size;
  2070. LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
  2071. __func__, original_width, original_height,
  2072. res.overview_size.width, res.overview_size.height,
  2073. res.refined_size.width, res.refined_size.height,
  2074. res.grid_size.width, res.grid_size.height);
  2075. int width = refine_size.width;
  2076. int height = refine_size.height;
  2077. int grid_x = int(width / best_grid.width);
  2078. int grid_y = int(height / best_grid.height);
  2079. for (int patches_y = 0, ic = 0;
  2080. patches_y < refine_size.height && ic < best_grid.height;
  2081. patches_y += grid_y, ic += 1) {
  2082. for (int patches_x = 0, jc = 0;
  2083. patches_x < refine_size.width && jc < best_grid.width;
  2084. patches_x += grid_x, jc += 1) {
  2085. slice_coordinates slice;
  2086. slice.x = patches_x;
  2087. slice.y = patches_y;
  2088. slice.size.width = grid_x;
  2089. slice.size.height = grid_y;
  2090. res.slices.push_back(slice);
  2091. LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
  2092. __func__, (int)res.slices.size() - 1,
  2093. slice.x, slice.y, slice.size.width, slice.size.height);
  2094. }
  2095. }
  2096. }
  2097. return res;
  2098. }
  2099. static std::vector<clip_image_u8_ptr> slice_image(const clip_image_u8 * img, const slice_instructions & inst) {
  2100. std::vector<clip_image_u8_ptr> output;
  2101. // resize to overview size
  2102. clip_image_u8_ptr resized_img(clip_image_u8_init());
  2103. img_tool::resize(*img, *resized_img, inst.overview_size, inst.interpolation_overview,
  2104. inst.padding_overview, inst.pad_color_overview);
  2105. output.push_back(std::move(resized_img));
  2106. if (inst.slices.empty()) {
  2107. // no slices, just return the resized image
  2108. return output;
  2109. }
  2110. // resize to refined size
  2111. clip_image_u8_ptr refined_img(clip_image_u8_init());
  2112. img_tool::resize(*img, *refined_img, inst.refined_size, inst.interpolation_refined,
  2113. inst.padding_refined, inst.pad_color_refined);
  2114. // create slices
  2115. for (const auto & slice : inst.slices) {
  2116. int x = slice.x;
  2117. int y = slice.y;
  2118. int w = slice.size.width;
  2119. int h = slice.size.height;
  2120. clip_image_u8_ptr img_slice(clip_image_u8_init());
  2121. img_tool::crop(*refined_img, *img_slice, x, y, w, h);
  2122. output.push_back(std::move(img_slice));
  2123. }
  2124. return output;
  2125. }
  2126. private:
  2127. static clip_image_size get_best_resize(const clip_image_size & original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2128. int width = original_size.width;
  2129. int height = original_size.height;
  2130. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  2131. float r = static_cast<float>(width) / height;
  2132. height = static_cast<int>(scale_resolution / std::sqrt(r));
  2133. width = static_cast<int>(height * r);
  2134. }
  2135. clip_image_size res;
  2136. res.width = ensure_divide(width, patch_size);
  2137. res.height = ensure_divide(height, patch_size);
  2138. return res;
  2139. }
  2140. static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
  2141. float scale_width = static_cast<float>(target_max.width) / orig.width;
  2142. float scale_height = static_cast<float>(target_max.height) / orig.height;
  2143. float scale = std::min(scale_width, scale_height);
  2144. return clip_image_size{
  2145. static_cast<int>(orig.width * scale),
  2146. static_cast<int>(orig.height * scale),
  2147. };
  2148. }
  2149. /**
  2150. * Selects the best resolution from a list of possible resolutions based on the original size.
  2151. *
  2152. * For example, when given a list of resolutions:
  2153. * - 100x100
  2154. * - 200x100
  2155. * - 100x200
  2156. * - 200x200
  2157. *
  2158. * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
  2159. *
  2160. * @param original_size The original size of the image
  2161. * @param possible_resolutions A list of possible resolutions
  2162. * @return The best fit resolution
  2163. */
  2164. static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector<clip_image_size> & possible_resolutions) {
  2165. clip_image_size best_fit;
  2166. int min_wasted_area = std::numeric_limits<int>::max();
  2167. int max_effective_resolution = 0;
  2168. for (const clip_image_size & candidate : possible_resolutions) {
  2169. auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
  2170. int effective_resolution = std::min(
  2171. target_size.width * target_size.height,
  2172. original_size.width * original_size.height);
  2173. int wasted_area = (candidate.width * candidate.height) - effective_resolution;
  2174. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
  2175. max_effective_resolution = effective_resolution;
  2176. min_wasted_area = wasted_area;
  2177. best_fit = candidate;
  2178. }
  2179. LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
  2180. }
  2181. return best_fit;
  2182. }
  2183. static int ensure_divide(int length, int patch_size) {
  2184. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  2185. }
  2186. static clip_image_size get_refine_size(const clip_image_size & original_size, const clip_image_size & grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  2187. int width = original_size.width;
  2188. int height = original_size.height;
  2189. int grid_x = grid.width;
  2190. int grid_y = grid.height;
  2191. int refine_width = ensure_divide(width, grid_x);
  2192. int refine_height = ensure_divide(height, grid_y);
  2193. clip_image_size grid_size;
  2194. grid_size.width = refine_width / grid_x;
  2195. grid_size.height = refine_height / grid_y;
  2196. auto best_grid_size = get_best_resize(grid_size, scale_resolution, patch_size, allow_upscale);
  2197. int best_grid_width = best_grid_size.width;
  2198. int best_grid_height = best_grid_size.height;
  2199. clip_image_size refine_size;
  2200. refine_size.width = best_grid_width * grid_x;
  2201. refine_size.height = best_grid_height * grid_y;
  2202. return refine_size;
  2203. }
  2204. static clip_image_size get_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  2205. std::vector<int> candidate_split_grids_nums;
  2206. for (int i : {multiple - 1, multiple, multiple + 1}) {
  2207. if (i == 1 || i > max_slice_nums) {
  2208. continue;
  2209. }
  2210. candidate_split_grids_nums.push_back(i);
  2211. }
  2212. std::vector<clip_image_size> candidate_grids;
  2213. for (int split_grids_nums : candidate_split_grids_nums) {
  2214. int m = 1;
  2215. while (m <= split_grids_nums) {
  2216. if (split_grids_nums % m == 0) {
  2217. candidate_grids.push_back(clip_image_size{m, split_grids_nums / m});
  2218. }
  2219. ++m;
  2220. }
  2221. }
  2222. clip_image_size best_grid{1, 1};
  2223. float min_error = std::numeric_limits<float>::infinity();
  2224. for (const auto& grid : candidate_grids) {
  2225. float error = std::abs(log_ratio - std::log(1.0 * grid.width / grid.height));
  2226. if (error < min_error) {
  2227. best_grid = grid;
  2228. min_error = error;
  2229. }
  2230. }
  2231. return best_grid;
  2232. }
  2233. };
  2234. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  2235. // res_imgs memory is being allocated here, previous allocations will be freed if found
  2236. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
  2237. clip_image_size original_size{img->nx, img->ny};
  2238. auto & params = ctx->model.hparams;
  2239. switch (ctx->proj_type()) {
  2240. case PROJECTOR_TYPE_MINICPMV:
  2241. {
  2242. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2243. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2244. for (size_t i = 0; i < imgs.size(); ++i) {
  2245. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2246. clip_image_f32_ptr res(clip_image_f32_init());
  2247. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2248. res_imgs->entries.push_back(std::move(res));
  2249. }
  2250. res_imgs->grid_x = inst.grid_size.width;
  2251. res_imgs->grid_y = inst.grid_size.height;
  2252. } break;
  2253. case PROJECTOR_TYPE_QWEN2VL:
  2254. case PROJECTOR_TYPE_QWEN25VL:
  2255. case PROJECTOR_TYPE_QWEN3VL:
  2256. {
  2257. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  2258. clip_image_u8 resized;
  2259. const clip_image_size new_size = img_tool::calc_size_preserved_ratio(
  2260. original_size,
  2261. params.patch_size * 2,
  2262. params.image_min_pixels,
  2263. params.image_max_pixels);
  2264. img_tool::resize(*img, resized, new_size, img_tool::RESIZE_ALGO_BILINEAR, false);
  2265. // clip_image_save_to_bmp(resized, "preproc.bmp");
  2266. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2267. // clip_image_f32_ptr res(clip_image_f32_init());
  2268. normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
  2269. // res_imgs->data[0] = *res;
  2270. res_imgs->entries.push_back(std::move(img_f32));
  2271. } break;
  2272. case PROJECTOR_TYPE_IDEFICS3:
  2273. {
  2274. // The refined size has two steps:
  2275. // 1. Resize w/ aspect-ratio preserving such that the longer side is
  2276. // the preprocessor longest size
  2277. // 2. Resize w/out preserving aspect ratio such that both sides are
  2278. // multiples of image_size (always rounding up)
  2279. //
  2280. // CITE: https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics3/image_processing_idefics3.py#L737
  2281. const clip_image_size refined_size = img_tool::calc_size_preserved_ratio(
  2282. original_size, params.image_size, params.image_longest_edge);
  2283. // LOG_INF("%s: original size: %d x %d, refined size: %d x %d\n",
  2284. // __func__, original_size.width, original_size.height,
  2285. // refined_size.width, refined_size.height);
  2286. llava_uhd::slice_instructions instructions;
  2287. instructions.overview_size = clip_image_size{params.image_size, params.image_size};
  2288. instructions.refined_size = refined_size;
  2289. instructions.grid_size = clip_image_size{
  2290. static_cast<int>(std::ceil(static_cast<float>(refined_size.width) / params.image_size)),
  2291. static_cast<int>(std::ceil(static_cast<float>(refined_size.height) / params.image_size)),
  2292. };
  2293. for (int y = 0; y < refined_size.height; y += params.image_size) {
  2294. for (int x = 0; x < refined_size.width; x += params.image_size) {
  2295. // LOG_INF("%s: adding slice at x=%d, y=%d\n", __func__, x, y);
  2296. instructions.slices.push_back(llava_uhd::slice_coordinates{
  2297. /* x */x,
  2298. /* y */y,
  2299. /* size */clip_image_size{
  2300. std::min(params.image_size, refined_size.width - x),
  2301. std::min(params.image_size, refined_size.height - y)
  2302. }
  2303. });
  2304. }
  2305. }
  2306. auto imgs = llava_uhd::slice_image(img, instructions);
  2307. // cast and normalize to f32
  2308. for (size_t i = 0; i < imgs.size(); ++i) {
  2309. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2310. clip_image_f32_ptr res(clip_image_f32_init());
  2311. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2312. res_imgs->entries.push_back(std::move(res));
  2313. }
  2314. res_imgs->grid_x = instructions.grid_size.width;
  2315. res_imgs->grid_y = instructions.grid_size.height;
  2316. } break;
  2317. case PROJECTOR_TYPE_GLM_EDGE:
  2318. case PROJECTOR_TYPE_GEMMA3:
  2319. case PROJECTOR_TYPE_INTERNVL: // TODO @ngxson : support dynamic resolution
  2320. {
  2321. clip_image_u8 resized_image;
  2322. int sz = params.image_size;
  2323. img_tool::resize(*img, resized_image, {sz, sz}, img_tool::RESIZE_ALGO_BILINEAR);
  2324. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2325. //clip_image_save_to_bmp(resized_image, "resized.bmp");
  2326. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2327. res_imgs->entries.push_back(std::move(img_f32));
  2328. } break;
  2329. case PROJECTOR_TYPE_JANUS_PRO:
  2330. {
  2331. // Janus Pro preprocessing: pad to square with gray(127), resize to 384x384
  2332. const std::array<uint8_t, 3> pad_color = {127, 127, 127};
  2333. clip_image_u8 resized_image;
  2334. int sz = params.image_size;
  2335. img_tool::resize(*img, resized_image, {sz, sz}, img_tool::RESIZE_ALGO_BILINEAR, true, pad_color);
  2336. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2337. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2338. res_imgs->entries.push_back(std::move(img_f32));
  2339. } break;
  2340. case PROJECTOR_TYPE_PIXTRAL:
  2341. case PROJECTOR_TYPE_LIGHTONOCR:
  2342. {
  2343. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  2344. clip_image_u8 resized_image;
  2345. // the original pixtral model doesn't have n_merge
  2346. const int cur_merge = params.n_merge == 0 ? 1 : params.n_merge;
  2347. const clip_image_size target_size = img_tool::calc_size_preserved_ratio(
  2348. original_size,
  2349. params.patch_size * cur_merge,
  2350. params.image_min_pixels,
  2351. params.image_max_pixels);
  2352. img_tool::resize(*img, resized_image, target_size, img_tool::RESIZE_ALGO_BILINEAR);
  2353. clip_image_f32_ptr img_f32(clip_image_f32_init());
  2354. normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
  2355. res_imgs->entries.push_back(std::move(img_f32));
  2356. } break;
  2357. case PROJECTOR_TYPE_LLAMA4:
  2358. {
  2359. GGML_ASSERT(!params.image_res_candidates.empty());
  2360. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2361. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2362. for (size_t i = 0; i < imgs.size(); ++i) {
  2363. clip_image_f32_ptr res(clip_image_f32_init());
  2364. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2365. res_imgs->entries.push_back(std::move(res));
  2366. }
  2367. res_imgs->grid_x = inst.grid_size.width;
  2368. res_imgs->grid_y = inst.grid_size.height;
  2369. } break;
  2370. case PROJECTOR_TYPE_LFM2:
  2371. case PROJECTOR_TYPE_KIMIVL:
  2372. {
  2373. GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
  2374. const clip_image_size target_size = img_tool::calc_size_preserved_ratio(
  2375. original_size,
  2376. params.patch_size * params.n_merge,
  2377. params.image_min_pixels,
  2378. params.image_max_pixels);
  2379. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2380. clip_image_u8 resized_img;
  2381. const bool pad = (ctx->proj_type() != PROJECTOR_TYPE_LFM2);
  2382. img_tool::resize(*img, resized_img, target_size, img_tool::RESIZE_ALGO_BILINEAR, pad, pad_color);
  2383. clip_image_f32_ptr res(clip_image_f32_init());
  2384. normalize_image_u8_to_f32(resized_img, *res, params.image_mean, params.image_std);
  2385. res_imgs->entries.push_back(std::move(res));
  2386. } break;
  2387. case PROJECTOR_TYPE_MLP:
  2388. case PROJECTOR_TYPE_MLP_NORM:
  2389. case PROJECTOR_TYPE_LDP:
  2390. case PROJECTOR_TYPE_LDPV2:
  2391. case PROJECTOR_TYPE_COGVLM: // TODO @ngxson : is this correct for cogvlm?
  2392. {
  2393. // TODO @ngxson : refactor the code below to avoid duplicated logic
  2394. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  2395. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2396. clip_image_u8_ptr temp(clip_image_u8_init()); // we will keep the input image data here temporarily
  2397. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  2398. if (params.image_res_candidates.empty()) { // pad_to_square
  2399. // for llava-1.5, we resize image to a square, and pad the shorter side with a background color
  2400. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  2401. const int longer_side = std::max(img->nx, img->ny);
  2402. temp->nx = longer_side;
  2403. temp->ny = longer_side;
  2404. temp->buf.resize(3 * longer_side * longer_side);
  2405. // background color in RGB from LLaVA (this is the mean rgb color * 255)
  2406. const std::array<uint8_t, 3> pad_color = {122, 116, 104};
  2407. // resize the image to the target_size
  2408. img_tool::resize(*img, *temp, clip_image_size{params.image_size, params.image_size}, img_tool::RESIZE_ALGO_BILINEAR, true, pad_color);
  2409. clip_image_f32_ptr res(clip_image_f32_init());
  2410. normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
  2411. res_imgs->entries.push_back(std::move(res));
  2412. } else {
  2413. // "spatial_unpad" with "anyres" processing for llava-1.6
  2414. auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
  2415. std::vector<clip_image_u8_ptr> imgs = llava_uhd::slice_image(img, inst);
  2416. for (size_t i = 0; i < imgs.size(); ++i) {
  2417. // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
  2418. clip_image_f32_ptr res(clip_image_f32_init());
  2419. normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
  2420. res_imgs->entries.push_back(std::move(res));
  2421. }
  2422. }
  2423. } break;
  2424. default:
  2425. LOG_ERR("%s: unsupported projector type %d\n", __func__, ctx->proj_type());
  2426. return false;
  2427. }
  2428. return true;
  2429. }
  2430. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  2431. return ctx->model.image_newline;
  2432. }
  2433. void clip_free(clip_ctx * ctx) {
  2434. if (ctx == nullptr) {
  2435. return;
  2436. }
  2437. delete ctx;
  2438. }
  2439. // deprecated
  2440. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  2441. const int32_t nx = ctx->model.hparams.image_size;
  2442. const int32_t ny = ctx->model.hparams.image_size;
  2443. return clip_embd_nbytes_by_img(ctx, nx, ny);
  2444. }
  2445. size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h) {
  2446. clip_image_f32 img;
  2447. img.nx = img_w;
  2448. img.ny = img_h;
  2449. return clip_n_output_tokens(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float);
  2450. }
  2451. int32_t clip_get_image_size(const struct clip_ctx * ctx) {
  2452. return ctx->model.hparams.image_size;
  2453. }
  2454. int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
  2455. return ctx->model.hparams.patch_size;
  2456. }
  2457. int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
  2458. return ctx->model.hparams.n_embd;
  2459. }
  2460. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  2461. return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
  2462. }
  2463. int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2464. const auto & params = ctx->model.hparams;
  2465. const int n_total = clip_n_output_tokens(ctx, img);
  2466. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL) {
  2467. return img->nx / (params.patch_size * 2);
  2468. }
  2469. return n_total;
  2470. }
  2471. int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2472. const auto & params = ctx->model.hparams;
  2473. if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL) {
  2474. return img->ny / (params.patch_size * 2);
  2475. }
  2476. return 1;
  2477. }
  2478. int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
  2479. const auto & params = ctx->model.hparams;
  2480. // for models with fixed size image, the input image is already pre-processed and resized to square
  2481. int patch_size = params.patch_size;
  2482. int n_patches = (img->nx / patch_size) * (img->ny / patch_size);
  2483. projector_type proj = ctx->proj_type();
  2484. switch (proj) {
  2485. case PROJECTOR_TYPE_MLP:
  2486. case PROJECTOR_TYPE_MLP_NORM:
  2487. case PROJECTOR_TYPE_JANUS_PRO:
  2488. {
  2489. // do nothing
  2490. } break;
  2491. case PROJECTOR_TYPE_LDP:
  2492. case PROJECTOR_TYPE_LDPV2:
  2493. case PROJECTOR_TYPE_GLM_EDGE:
  2494. {
  2495. n_patches /= 4;
  2496. if (ctx->model.mm_boi) {
  2497. n_patches += 2; // for BOI and EOI token embeddings
  2498. }
  2499. } break;
  2500. case PROJECTOR_TYPE_MINICPMV:
  2501. {
  2502. // Use actual config value if available, otherwise fall back to hardcoded values
  2503. if (params.minicpmv_query_num > 0) {
  2504. n_patches = params.minicpmv_query_num;
  2505. } else {
  2506. // Fallback to hardcoded values for legacy models
  2507. if (params.minicpmv_version == 2) {
  2508. n_patches = 96;
  2509. } else if (params.minicpmv_version == 3) {
  2510. n_patches = 64;
  2511. } else if (params.minicpmv_version == 4) {
  2512. n_patches = 64;
  2513. } else if (params.minicpmv_version == 5) {
  2514. // MiniCPM-V 4.0
  2515. n_patches = 64;
  2516. } else if (params.minicpmv_version == 6) {
  2517. // MiniCPM-V 4.5
  2518. n_patches = 64;
  2519. } else {
  2520. GGML_ABORT("Unknown minicpmv version");
  2521. }
  2522. }
  2523. } break;
  2524. case PROJECTOR_TYPE_QWEN2VL:
  2525. case PROJECTOR_TYPE_QWEN25VL:
  2526. case PROJECTOR_TYPE_QWEN3VL:
  2527. {
  2528. // dynamic size (2 conv, so double patch size)
  2529. int x_patch = img->nx / (params.patch_size * 2);
  2530. int y_patch = img->ny / (params.patch_size * 2);
  2531. n_patches = x_patch * y_patch;
  2532. } break;
  2533. case PROJECTOR_TYPE_GEMMA3:
  2534. case PROJECTOR_TYPE_IDEFICS3:
  2535. case PROJECTOR_TYPE_INTERNVL:
  2536. case PROJECTOR_TYPE_LLAMA4:
  2537. {
  2538. // both X and Y are downscaled by the scale factor
  2539. int scale_factor = ctx->model.hparams.n_merge;
  2540. n_patches /= (scale_factor * scale_factor);
  2541. } break;
  2542. case PROJECTOR_TYPE_LFM2:
  2543. case PROJECTOR_TYPE_KIMIVL:
  2544. {
  2545. // dynamic size
  2546. int out_patch_size = params.patch_size * ctx->model.hparams.n_merge;
  2547. int x_patch = CLIP_ALIGN(img->nx, out_patch_size) / out_patch_size;
  2548. int y_patch = CLIP_ALIGN(img->ny, out_patch_size) / out_patch_size;
  2549. n_patches = x_patch * y_patch;
  2550. } break;
  2551. case PROJECTOR_TYPE_PIXTRAL:
  2552. case PROJECTOR_TYPE_LIGHTONOCR:
  2553. {
  2554. // dynamic size
  2555. int n_merge = ctx->model.hparams.n_merge;
  2556. int n_patches_x = img->nx / patch_size / (n_merge > 0 ? n_merge : 1);
  2557. int n_patches_y = img->ny / patch_size / (n_merge > 0 ? n_merge : 1);
  2558. if (ctx->model.token_embd_img_break) {
  2559. n_patches = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
  2560. } else {
  2561. n_patches = n_patches_y * n_patches_x;
  2562. }
  2563. } break;
  2564. case PROJECTOR_TYPE_VOXTRAL:
  2565. case PROJECTOR_TYPE_ULTRAVOX:
  2566. case PROJECTOR_TYPE_QWEN2A:
  2567. {
  2568. n_patches = img->nx;
  2569. const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
  2570. if (ctx->model.audio_has_stack_frames()) {
  2571. GGML_ASSERT(proj_stack_factor > 0);
  2572. const int n_len = CLIP_ALIGN(n_patches, proj_stack_factor);
  2573. n_patches = n_len / proj_stack_factor;
  2574. }
  2575. // whisper downscales input token by half after conv1d
  2576. n_patches /= 2;
  2577. if (ctx->model.audio_has_avgpool()) {
  2578. // divide by 2 because of nn.AvgPool1d(2, stride=2)
  2579. n_patches /= 2;
  2580. }
  2581. } break;
  2582. case PROJECTOR_TYPE_GLMA:
  2583. {
  2584. n_patches = img->nx;
  2585. // whisper downscales input token by half after conv1d
  2586. n_patches /= 2;
  2587. // reshape by merge_factor
  2588. n_patches /= ctx->model.hparams.proj_stack_factor;
  2589. // for BOI and EOI token embeddings
  2590. n_patches += 2;
  2591. } break;
  2592. case PROJECTOR_TYPE_COGVLM:
  2593. {
  2594. n_patches += 2; // for BOI and EOI token embeddings
  2595. } break;
  2596. default:
  2597. GGML_ABORT("unsupported projector type");
  2598. }
  2599. return n_patches;
  2600. }
  2601. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  2602. clip_image_f32_batch imgs;
  2603. clip_image_f32_ptr img_copy(clip_image_f32_init());
  2604. *img_copy = *img;
  2605. imgs.entries.push_back(std::move(img_copy));
  2606. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  2607. }
  2608. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs_c_ptr, float * vec) {
  2609. const clip_image_f32_batch & imgs = *imgs_c_ptr;
  2610. int batch_size = imgs.entries.size();
  2611. // TODO @ngxson : implement batch size > 1 as a loop
  2612. // we don't need true batching support because the cgraph will gonna be big anyway
  2613. if (batch_size != 1) {
  2614. return false; // only support batch size of 1
  2615. }
  2616. // if buffers are not allocated, we need to do a warmup run to allocate them
  2617. if (!ctx->is_allocated) {
  2618. clip_model_loader::warmup(*ctx, *imgs_c_ptr);
  2619. }
  2620. // build the inference graph
  2621. ctx->debug_print_tensors.clear();
  2622. ggml_backend_sched_reset(ctx->sched.get());
  2623. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
  2624. ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
  2625. // set inputs
  2626. const auto & model = ctx->model;
  2627. const auto & hparams = model.hparams;
  2628. const int image_size_width = imgs.entries[0]->nx;
  2629. const int image_size_height = imgs.entries[0]->ny;
  2630. const int patch_size = hparams.patch_size;
  2631. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  2632. const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
  2633. const int pos_w = image_size_width / patch_size;
  2634. const int pos_h = image_size_height / patch_size;
  2635. const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
  2636. auto get_inp_tensor = [&gf](const char * name) {
  2637. ggml_tensor * inp = ggml_graph_get_tensor(gf, name);
  2638. if (inp == nullptr) {
  2639. GGML_ABORT("Failed to get tensor %s", name);
  2640. }
  2641. if (!(inp->flags & GGML_TENSOR_FLAG_INPUT)) {
  2642. GGML_ABORT("Tensor %s is not an input tensor", name);
  2643. }
  2644. return inp;
  2645. };
  2646. auto set_input_f32 = [&get_inp_tensor](const char * name, std::vector<float> & values) {
  2647. ggml_tensor * cur = get_inp_tensor(name);
  2648. GGML_ASSERT(cur->type == GGML_TYPE_F32);
  2649. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2650. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2651. };
  2652. auto set_input_i32 = [&get_inp_tensor](const char * name, std::vector<int32_t> & values) {
  2653. ggml_tensor * cur = get_inp_tensor(name);
  2654. GGML_ASSERT(cur->type == GGML_TYPE_I32);
  2655. GGML_ASSERT(ggml_nelements(cur) == (int64_t)values.size());
  2656. ggml_backend_tensor_set(cur, values.data(), 0, ggml_nbytes(cur));
  2657. };
  2658. // set input pixel values
  2659. if (!imgs.is_audio) {
  2660. size_t nelem = 0;
  2661. for (const auto & img : imgs.entries) {
  2662. nelem += img->nx * img->ny * 3;
  2663. }
  2664. std::vector<float> inp_raw(nelem);
  2665. // layout of data (note: the channel dim is unrolled to better visualize the layout):
  2666. //
  2667. // ┌──W──┐
  2668. // │ H │ channel = R
  2669. // ├─────┤ │
  2670. // │ H │ channel = G
  2671. // ├─────┤ │
  2672. // │ H │ channel = B
  2673. // └─────┘ │
  2674. // ──────┘ x B
  2675. for (size_t i = 0; i < imgs.entries.size(); i++) {
  2676. const int nx = imgs.entries[i]->nx;
  2677. const int ny = imgs.entries[i]->ny;
  2678. const int n = nx * ny;
  2679. for (int b = 0; b < batch_size; b++) {
  2680. float * batch_entry = inp_raw.data() + b * (3*n);
  2681. for (int y = 0; y < ny; y++) {
  2682. for (int x = 0; x < nx; x++) {
  2683. size_t base_src = 3*(y * nx + x); // idx of the first channel
  2684. size_t base_dst = y * nx + x; // idx of the first channel
  2685. batch_entry[ base_dst] = imgs.entries[b]->buf[base_src ];
  2686. batch_entry[1*n + base_dst] = imgs.entries[b]->buf[base_src + 1];
  2687. batch_entry[2*n + base_dst] = imgs.entries[b]->buf[base_src + 2];
  2688. }
  2689. }
  2690. }
  2691. }
  2692. set_input_f32("inp_raw", inp_raw);
  2693. } else {
  2694. // audio input
  2695. GGML_ASSERT(imgs.entries.size() == 1);
  2696. const auto & mel_inp = imgs.entries[0];
  2697. const int n_step = mel_inp->nx;
  2698. const int n_mel = mel_inp->ny;
  2699. std::vector<float> inp_raw(n_step * n_mel);
  2700. std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
  2701. set_input_f32("inp_raw", inp_raw);
  2702. }
  2703. // set input per projector
  2704. switch (ctx->model.proj_type) {
  2705. case PROJECTOR_TYPE_MINICPMV:
  2706. {
  2707. // inspired from siglip:
  2708. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  2709. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  2710. std::vector<int32_t> positions(pos_h * pos_w);
  2711. int bucket_coords_h[1024];
  2712. int bucket_coords_w[1024];
  2713. for (int i = 0; i < pos_h; i++){
  2714. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  2715. }
  2716. for (int i = 0; i < pos_w; i++){
  2717. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  2718. }
  2719. for (int i = 0, id = 0; i < pos_h; i++){
  2720. for (int j = 0; j < pos_w; j++){
  2721. positions[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  2722. }
  2723. }
  2724. set_input_i32("positions", positions);
  2725. // inputs for resampler projector
  2726. // set the 2D positions (using float for sinusoidal embedding)
  2727. int n_patches_per_col = image_size_width / patch_size;
  2728. std::vector<float> pos_data(n_pos);
  2729. // dimension H
  2730. for (int i = 0; i < n_pos; i++) {
  2731. pos_data[i] = static_cast<float>(i / n_patches_per_col);
  2732. }
  2733. set_input_f32("pos_h", pos_data);
  2734. // dimension W
  2735. for (int i = 0; i < n_pos; i++) {
  2736. pos_data[i] = static_cast<float>(i % n_patches_per_col);
  2737. }
  2738. set_input_f32("pos_w", pos_data);
  2739. // base frequency omega
  2740. const float base_freq = 10000.0f;
  2741. const int n_embd_proj = clip_n_mmproj_embd(ctx);
  2742. std::vector<float> omega(n_embd_proj / 4);
  2743. for (int i = 0; i < n_embd_proj / 4; ++i) {
  2744. omega[i] = 1.0f / std::pow(base_freq, static_cast<float>(i) / (n_embd_proj / 4));
  2745. }
  2746. set_input_f32("omega", omega);
  2747. } break;
  2748. case PROJECTOR_TYPE_QWEN2VL:
  2749. case PROJECTOR_TYPE_QWEN3VL:
  2750. {
  2751. const int merge_ratio = hparams.n_merge;
  2752. const int pw = image_size_width / patch_size;
  2753. const int ph = image_size_height / patch_size;
  2754. std::vector<int> positions(n_pos * 4);
  2755. int ptr = 0;
  2756. for (int y = 0; y < ph; y += merge_ratio) {
  2757. for (int x = 0; x < pw; x += merge_ratio) {
  2758. for (int dy = 0; dy < 2; dy++) {
  2759. for (int dx = 0; dx < 2; dx++) {
  2760. positions[ ptr] = y + dy;
  2761. positions[ num_patches + ptr] = x + dx;
  2762. positions[2 * num_patches + ptr] = y + dy;
  2763. positions[3 * num_patches + ptr] = x + dx;
  2764. ptr++;
  2765. }
  2766. }
  2767. }
  2768. }
  2769. set_input_i32("positions", positions);
  2770. } break;
  2771. case PROJECTOR_TYPE_QWEN25VL:
  2772. {
  2773. // pw * ph = number of tokens output by ViT after apply patch merger
  2774. // ipw * ipw = number of vision token been processed inside ViT
  2775. const int merge_ratio = 2;
  2776. const int pw = image_size_width / patch_size / merge_ratio;
  2777. const int ph = image_size_height / patch_size / merge_ratio;
  2778. const int ipw = image_size_width / patch_size;
  2779. const int iph = image_size_height / patch_size;
  2780. std::vector<int> idx (ph * pw);
  2781. std::vector<int> inv_idx(ph * pw);
  2782. if (use_window_attn) {
  2783. const int attn_window_size = 112;
  2784. const int grid_window = attn_window_size / patch_size / merge_ratio;
  2785. int dst = 0;
  2786. // [num_vision_tokens, num_vision_tokens] attention mask tensor
  2787. std::vector<float> mask(pow(ipw * iph, 2), std::numeric_limits<float>::lowest());
  2788. int mask_row = 0;
  2789. for (int y = 0; y < ph; y += grid_window) {
  2790. for (int x = 0; x < pw; x += grid_window) {
  2791. const int win_h = std::min(grid_window, ph - y);
  2792. const int win_w = std::min(grid_window, pw - x);
  2793. const int dst_0 = dst;
  2794. // group all tokens belong to the same window togather (to a continue range)
  2795. for (int dy = 0; dy < win_h; dy++) {
  2796. for (int dx = 0; dx < win_w; dx++) {
  2797. const int src = (y + dy) * pw + (x + dx);
  2798. GGML_ASSERT(src < (int)idx.size());
  2799. GGML_ASSERT(dst < (int)inv_idx.size());
  2800. idx [src] = dst;
  2801. inv_idx[dst] = src;
  2802. dst++;
  2803. }
  2804. }
  2805. for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
  2806. int row_offset = mask_row * (ipw * iph);
  2807. std::fill(
  2808. mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
  2809. mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
  2810. 0.0);
  2811. mask_row++;
  2812. }
  2813. }
  2814. }
  2815. set_input_i32("window_idx", idx);
  2816. set_input_i32("inv_window_idx", inv_idx);
  2817. set_input_f32("window_mask", mask);
  2818. } else {
  2819. for (int i = 0; i < ph * pw; i++) {
  2820. idx[i] = i;
  2821. }
  2822. }
  2823. const int mpow = merge_ratio * merge_ratio;
  2824. std::vector<int> positions(n_pos * 4);
  2825. int ptr = 0;
  2826. for (int y = 0; y < iph; y += merge_ratio) {
  2827. for (int x = 0; x < ipw; x += merge_ratio) {
  2828. for (int dy = 0; dy < 2; dy++) {
  2829. for (int dx = 0; dx < 2; dx++) {
  2830. auto remap = idx[ptr / mpow];
  2831. remap = (remap * mpow) + (ptr % mpow);
  2832. positions[ remap] = y + dy;
  2833. positions[ num_patches + remap] = x + dx;
  2834. positions[2 * num_patches + remap] = y + dy;
  2835. positions[3 * num_patches + remap] = x + dx;
  2836. ptr++;
  2837. }
  2838. }
  2839. }
  2840. }
  2841. set_input_i32("positions", positions);
  2842. } break;
  2843. case PROJECTOR_TYPE_PIXTRAL:
  2844. case PROJECTOR_TYPE_KIMIVL:
  2845. case PROJECTOR_TYPE_LIGHTONOCR:
  2846. {
  2847. // set the 2D positions
  2848. int n_patches_per_col = image_size_width / patch_size;
  2849. std::vector<int> pos_data(n_pos);
  2850. // dimension H
  2851. for (int i = 0; i < n_pos; i++) {
  2852. pos_data[i] = i / n_patches_per_col;
  2853. }
  2854. set_input_i32("pos_h", pos_data);
  2855. // dimension W
  2856. for (int i = 0; i < n_pos; i++) {
  2857. pos_data[i] = i % n_patches_per_col;
  2858. }
  2859. set_input_i32("pos_w", pos_data);
  2860. } break;
  2861. case PROJECTOR_TYPE_GLM_EDGE:
  2862. {
  2863. // llava and other models
  2864. std::vector<int32_t> positions(n_pos);
  2865. for (int i = 0; i < n_pos; i++) {
  2866. positions[i] = i;
  2867. }
  2868. set_input_i32("positions", positions);
  2869. } break;
  2870. case PROJECTOR_TYPE_MLP:
  2871. case PROJECTOR_TYPE_MLP_NORM:
  2872. case PROJECTOR_TYPE_LDP:
  2873. case PROJECTOR_TYPE_LDPV2:
  2874. {
  2875. // llava and other models
  2876. std::vector<int32_t> positions(n_pos);
  2877. for (int i = 0; i < n_pos; i++) {
  2878. positions[i] = i;
  2879. }
  2880. set_input_i32("positions", positions);
  2881. // The patches vector is used to get rows to index into the embeds with;
  2882. // we should skip dim 0 only if we have CLS to avoid going out of bounds
  2883. // when retrieving the rows.
  2884. int patch_offset = model.class_embedding ? 1 : 0;
  2885. std::vector<int32_t> patches(num_patches);
  2886. for (int i = 0; i < num_patches; i++) {
  2887. patches[i] = i + patch_offset;
  2888. }
  2889. set_input_i32("patches", patches);
  2890. } break;
  2891. case PROJECTOR_TYPE_GEMMA3:
  2892. case PROJECTOR_TYPE_IDEFICS3:
  2893. case PROJECTOR_TYPE_INTERNVL:
  2894. case PROJECTOR_TYPE_QWEN2A:
  2895. case PROJECTOR_TYPE_GLMA:
  2896. case PROJECTOR_TYPE_ULTRAVOX:
  2897. case PROJECTOR_TYPE_LFM2:
  2898. case PROJECTOR_TYPE_VOXTRAL:
  2899. case PROJECTOR_TYPE_JANUS_PRO:
  2900. case PROJECTOR_TYPE_COGVLM:
  2901. {
  2902. // do nothing
  2903. } break;
  2904. case PROJECTOR_TYPE_LLAMA4:
  2905. {
  2906. // set the 2D positions
  2907. int n_patches_per_col = image_size_width / patch_size;
  2908. std::vector<int> pos_data(num_patches + 1, 0); // +1 for the [CLS] token
  2909. // last pos is always kept 0, it's for CLS
  2910. // dimension H
  2911. for (int i = 0; i < num_patches; i++) {
  2912. pos_data[i] = (i / n_patches_per_col) + 1;
  2913. }
  2914. set_input_i32("pos_h", pos_data);
  2915. // dimension W
  2916. for (int i = 0; i < num_patches; i++) {
  2917. pos_data[i] = (i % n_patches_per_col) + 1;
  2918. }
  2919. set_input_i32("pos_w", pos_data);
  2920. } break;
  2921. default:
  2922. GGML_ABORT("Unknown projector type");
  2923. }
  2924. // ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
  2925. ggml_backend_dev_t dev = ggml_backend_get_device(ctx->backend_cpu);
  2926. ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
  2927. if (reg) {
  2928. auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
  2929. if (ggml_backend_set_n_threads_fn) {
  2930. ggml_backend_set_n_threads_fn(ctx->backend_cpu, n_threads);
  2931. }
  2932. }
  2933. auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
  2934. if (status != GGML_STATUS_SUCCESS) {
  2935. LOG_ERR("%s: ggml_backend_sched_graph_compute failed with error %d\n", __func__, status);
  2936. return false;
  2937. }
  2938. // print debug nodes
  2939. if (ctx->debug_graph) {
  2940. LOG_INF("\n\n---\n\n");
  2941. LOG_INF("\n\nDebug graph:\n\n");
  2942. for (ggml_tensor * t : ctx->debug_print_tensors) {
  2943. std::vector<uint8_t> data(ggml_nbytes(t));
  2944. ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
  2945. print_tensor_shape(t);
  2946. print_tensor_data(t, data.data(), 3);
  2947. }
  2948. }
  2949. // the last node is the embedding tensor
  2950. ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  2951. // sanity check (only support batch size of 1 for now)
  2952. const int n_tokens_out = embeddings->ne[1];
  2953. const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
  2954. if (n_tokens_out != expected_n_tokens_out) {
  2955. LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
  2956. GGML_ABORT("Invalid number of output tokens");
  2957. }
  2958. // copy the embeddings to the location passed by the user
  2959. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  2960. return true;
  2961. }
  2962. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  2963. switch (ctx->model.proj_type) {
  2964. case PROJECTOR_TYPE_LDP:
  2965. return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
  2966. case PROJECTOR_TYPE_LDPV2:
  2967. return ctx->model.mm_model_peg_0_b->ne[0];
  2968. case PROJECTOR_TYPE_MLP:
  2969. case PROJECTOR_TYPE_PIXTRAL:
  2970. case PROJECTOR_TYPE_LIGHTONOCR:
  2971. return ctx->model.mm_2_w->ne[1];
  2972. case PROJECTOR_TYPE_MLP_NORM:
  2973. return ctx->model.mm_3_b->ne[0];
  2974. case PROJECTOR_TYPE_MINICPMV:
  2975. return ctx->model.mm_model_proj->ne[0];
  2976. case PROJECTOR_TYPE_GLM_EDGE:
  2977. return ctx->model.mm_model_mlp_3_w->ne[1];
  2978. case PROJECTOR_TYPE_QWEN2VL:
  2979. case PROJECTOR_TYPE_QWEN25VL:
  2980. case PROJECTOR_TYPE_JANUS_PRO:
  2981. return ctx->model.mm_1_b->ne[0];
  2982. case PROJECTOR_TYPE_QWEN3VL:
  2983. // main path + deepstack paths
  2984. return ctx->model.mm_1_b->ne[0] * (1 + ctx->model.n_deepstack_layers);
  2985. case PROJECTOR_TYPE_GEMMA3:
  2986. return ctx->model.mm_input_proj_w->ne[0];
  2987. case PROJECTOR_TYPE_IDEFICS3:
  2988. return ctx->model.projection->ne[1];
  2989. case PROJECTOR_TYPE_ULTRAVOX:
  2990. case PROJECTOR_TYPE_VOXTRAL:
  2991. return ctx->model.mm_2_w->ne[1];
  2992. case PROJECTOR_TYPE_INTERNVL:
  2993. return ctx->model.mm_3_w->ne[1];
  2994. case PROJECTOR_TYPE_LLAMA4:
  2995. return ctx->model.mm_model_proj->ne[1];
  2996. case PROJECTOR_TYPE_QWEN2A:
  2997. return ctx->model.mm_fc_w->ne[1];
  2998. case PROJECTOR_TYPE_GLMA:
  2999. return ctx->model.mm_2_w->ne[1];
  3000. case PROJECTOR_TYPE_LFM2:
  3001. case PROJECTOR_TYPE_KIMIVL:
  3002. return ctx->model.mm_2_w->ne[1];
  3003. case PROJECTOR_TYPE_COGVLM:
  3004. return ctx->model.mm_4h_to_h_w->ne[1];
  3005. default:
  3006. GGML_ABORT("Unknown projector type");
  3007. }
  3008. }
  3009. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  3010. if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
  3011. return ctx->model.hparams.minicpmv_version;
  3012. }
  3013. return 0;
  3014. }
  3015. bool clip_is_glm(const struct clip_ctx * ctx) {
  3016. return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
  3017. }
  3018. bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
  3019. return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
  3020. || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
  3021. || ctx->proj_type() == PROJECTOR_TYPE_QWEN3VL;
  3022. }
  3023. bool clip_is_llava(const struct clip_ctx * ctx) {
  3024. return ctx->model.hparams.has_llava_projector;
  3025. }
  3026. bool clip_is_gemma3(const struct clip_ctx * ctx) {
  3027. return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
  3028. }
  3029. bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
  3030. return ctx->model.modality == CLIP_MODALITY_VISION;
  3031. }
  3032. bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
  3033. return ctx->model.modality == CLIP_MODALITY_AUDIO;
  3034. }
  3035. bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
  3036. return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
  3037. || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A
  3038. || ctx->proj_type() == PROJECTOR_TYPE_GLMA
  3039. || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL;
  3040. }
  3041. bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
  3042. clip_image_f32 clip_img;
  3043. clip_img.buf.resize(h * w * 3);
  3044. for (int i = 0; i < h*w*3; i++)
  3045. {
  3046. clip_img.buf[i] = img[i];
  3047. }
  3048. clip_img.nx = w;
  3049. clip_img.ny = h;
  3050. clip_image_encode(ctx, n_threads, &clip_img, vec);
  3051. return true;
  3052. }
  3053. //
  3054. // API used internally with mtmd
  3055. //
  3056. projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
  3057. return ctx->proj_type();
  3058. }
  3059. void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
  3060. clip_image_f32 * audio = new clip_image_f32;
  3061. audio->nx = n_frames;
  3062. audio->ny = n_mel;
  3063. audio->buf.resize(n_frames * n_mel);
  3064. std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
  3065. batch->entries.push_back(clip_image_f32_ptr(audio));
  3066. batch->is_audio = true;
  3067. }