1
0

ggml-vulkan.cpp 256 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895
  1. #include "ggml-vulkan.h"
  2. #ifdef GGML_VULKAN_RUN_TESTS
  3. #include <chrono>
  4. #endif
  5. #include <vulkan/vulkan.hpp>
  6. #include <algorithm>
  7. #include <cmath>
  8. #include <iostream>
  9. #include <iomanip>
  10. #include <limits>
  11. #include <tuple>
  12. #include <vector>
  13. #include <sstream>
  14. #include <utility>
  15. #include <memory>
  16. #include "ggml.h"
  17. #include "ggml-backend-impl.h"
  18. #include "ggml-vulkan-shaders.hpp"
  19. #define VK_API_VERSION VK_API_VERSION_1_2
  20. #define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
  21. #define VK_VENDOR_ID_AMD 0x1002
  22. #define VK_VENDOR_ID_APPLE 0x106b
  23. #define VK_VENDOR_ID_INTEL 0x8086
  24. #define VK_VENDOR_ID_NVIDIA 0x10de
  25. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN 0
  26. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI 1
  27. #define VK_DEVICE_DESCRIPTOR_POOL_MODE_SINGLE 2
  28. #define VK_NUM_TYPES 16
  29. #define GGML_VK_MAX_NODES 8192
  30. #define MAX_VK_BUFFERS 256
  31. #ifndef K_QUANTS_PER_ITERATION
  32. #define K_QUANTS_PER_ITERATION 1
  33. #else
  34. static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
  35. #endif
  36. #define VK_CHECK(err, msg) \
  37. do { \
  38. vk::Result err_ = (err); \
  39. if (err_ != vk::Result::eSuccess) { \
  40. fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
  41. #err, to_string(err_).c_str(), __FILE__, __LINE__); \
  42. exit(1); \
  43. } \
  44. } while (0)
  45. struct ggml_backend_vk_context;
  46. struct vk_queue {
  47. uint32_t queue_family_index;
  48. vk::Queue queue;
  49. vk::CommandPool pool;
  50. uint32_t cmd_buffer_idx;
  51. std::vector<vk::CommandBuffer> cmd_buffers;
  52. vk::PipelineStageFlags stage_flags;
  53. };
  54. struct vk_device {
  55. vk::PhysicalDevice physical_device;
  56. vk::PhysicalDeviceProperties properties;
  57. std::string name;
  58. uint64_t max_memory_allocation_size;
  59. bool fp16;
  60. vk::Device device;
  61. uint32_t vendor_id;
  62. vk_queue compute_queue;
  63. vk_queue transfer_queue;
  64. bool single_queue;
  65. uint32_t descriptor_set_mode;
  66. uint32_t subgroup_size;
  67. bool uma;
  68. ~vk_device() {
  69. #ifdef GGML_VULKAN_DEBUG
  70. std::cerr << "destroy device " << name << std::endl;
  71. #endif
  72. device.destroy();
  73. }
  74. };
  75. struct vk_buffer_struct {
  76. vk::Buffer buffer;
  77. vk::DeviceMemory device_memory;
  78. vk::MemoryPropertyFlags memory_property_flags;
  79. void * ptr;
  80. size_t size = 0;
  81. ggml_backend_vk_context * ctx;
  82. std::shared_ptr<vk_device> device;
  83. ~vk_buffer_struct() {
  84. if (size == 0) {
  85. return;
  86. }
  87. #ifdef GGML_VULKAN_DEBUG
  88. std::cerr << "~vk_buffer_struct(" << buffer << ", " << size << ")" << std::endl;
  89. #endif
  90. device->device.freeMemory(device_memory);
  91. device->device.destroyBuffer(buffer);
  92. }
  93. };
  94. typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
  95. typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
  96. struct vk_subbuffer {
  97. vk_buffer buffer;
  98. uint64_t offset;
  99. uint64_t size;
  100. };
  101. struct vk_pipeline {
  102. std::string name;
  103. vk::ShaderModule shader_module;
  104. vk::DescriptorSetLayout dsl;
  105. std::vector<vk::DescriptorPool> descriptor_pools;
  106. std::vector<vk::DescriptorSet> descriptor_sets;
  107. uint32_t descriptor_set_idx;
  108. vk::PipelineLayout layout;
  109. vk::Pipeline pipeline;
  110. uint32_t push_constant_size;
  111. uint32_t parameter_count;
  112. std::array<uint32_t, 3> wg_denoms;
  113. uint32_t align;
  114. };
  115. struct vk_semaphore {
  116. vk::Semaphore s;
  117. uint64_t value;
  118. };
  119. struct vk_submission {
  120. vk::CommandBuffer buffer;
  121. std::vector<vk_semaphore> wait_semaphores;
  122. std::vector<vk_semaphore> signal_semaphores;
  123. };
  124. typedef std::vector<vk_submission> vk_sequence;
  125. struct vk_op_push_constants {
  126. uint32_t KX;
  127. uint32_t KY;
  128. float param1;
  129. float param2;
  130. };
  131. struct vk_op_cpy_push_constants {
  132. uint32_t ne;
  133. uint32_t ne00; uint32_t ne01; uint32_t nb00; uint32_t nb01; uint32_t nb02;
  134. uint32_t ne10; uint32_t ne11; uint32_t nb10; uint32_t nb11; uint32_t nb12;
  135. uint32_t d_offset;
  136. };
  137. struct vk_op_diag_mask_push_constants {
  138. uint32_t ncols;
  139. uint32_t rows_per_channel;
  140. int32_t n_past;
  141. };
  142. struct vk_op_rope_push_constants {
  143. uint32_t ncols;
  144. float freq_scale;
  145. uint32_t p_delta_rows;
  146. float freq_base;
  147. float ext_factor;
  148. float attn_factor;
  149. float corr_dims[4];
  150. };
  151. struct vk_op_rope_neox_push_constants {
  152. uint32_t ncols;
  153. uint32_t ndims;
  154. float freq_scale;
  155. uint32_t p_delta_rows;
  156. float freq_base;
  157. float ext_factor;
  158. float attn_factor;
  159. float corr_dims[4];
  160. float theta_scale;
  161. float inv_ndims;
  162. };
  163. // Allow pre-recording command buffers
  164. struct vk_staging_memcpy {
  165. vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
  166. void * dst;
  167. const void * src;
  168. size_t n;
  169. };
  170. struct vk_context {
  171. size_t idx;
  172. vk_submission * s;
  173. std::vector<vk_sequence> seqs;
  174. ggml_tensor * exit_tensor;
  175. std::vector<vk_staging_memcpy> in_memcpys;
  176. std::vector<vk_staging_memcpy> out_memcpys;
  177. vk_queue * q;
  178. };
  179. struct ggml_tensor_extra_gpu {
  180. bool ready;
  181. size_t ctx_idx;
  182. vk_buffer_ref buffer_gpu;
  183. uint64_t offset;
  184. void reset() {
  185. ready = false;
  186. ctx_idx = 0;
  187. buffer_gpu.reset();
  188. offset = 0;
  189. }
  190. };
  191. struct ggml_vk_garbage_collector {
  192. std::vector<vk_pipeline *> pipelines;
  193. std::vector<vk_semaphore> tl_semaphores;
  194. std::vector<vk_semaphore> semaphores;
  195. std::vector<vk::Event> events;
  196. std::vector<vk_buffer> temp_buffers;
  197. std::vector<vk_context> contexts;
  198. };
  199. struct ggml_backend_vk_context {
  200. std::string name;
  201. std::weak_ptr<vk_device> device;
  202. vk_pipeline pipeline_matmul_f32_l, pipeline_matmul_f32_m, pipeline_matmul_f32_s;
  203. vk_pipeline pipeline_matmul_f32_aligned_l, pipeline_matmul_f32_aligned_m, pipeline_matmul_f32_aligned_s;
  204. vk_pipeline pipeline_matmul_f16_l, pipeline_matmul_f16_m, pipeline_matmul_f16_s;
  205. vk_pipeline pipeline_matmul_f16_aligned_l, pipeline_matmul_f16_aligned_m, pipeline_matmul_f16_aligned_s;
  206. vk_pipeline pipeline_matmul_f16_f32_l, pipeline_matmul_f16_f32_m, pipeline_matmul_f16_f32_s;
  207. vk_pipeline pipeline_matmul_f16_f32_aligned_l, pipeline_matmul_f16_f32_aligned_m, pipeline_matmul_f16_f32_aligned_s;
  208. vk_pipeline pipeline_matmul_split_k_reduce;
  209. vk_pipeline pipeline_dequant[VK_NUM_TYPES];
  210. vk_pipeline pipeline_dequant_mul_mat_vec_f32[VK_NUM_TYPES];
  211. vk_pipeline pipeline_mul_mat_vec_p021_f16_f32;
  212. vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
  213. vk_pipeline pipeline_get_rows[VK_NUM_TYPES];
  214. vk_pipeline pipeline_get_rows_f32[VK_NUM_TYPES];
  215. vk_pipeline pipeline_mul_f32;
  216. vk_pipeline pipeline_add_f32;
  217. vk_pipeline pipeline_scale_f32;
  218. vk_pipeline pipeline_sqr_f32;
  219. vk_pipeline pipeline_clamp_f32;
  220. vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
  221. vk_pipeline pipeline_norm_f32;
  222. vk_pipeline pipeline_rms_norm_f32;
  223. vk_pipeline pipeline_gelu_f32;
  224. vk_pipeline pipeline_silu_f32;
  225. vk_pipeline pipeline_relu_f32;
  226. vk_pipeline pipeline_diag_mask_inf_f32;
  227. vk_pipeline pipeline_soft_max_f32;
  228. vk_pipeline pipeline_rope_f32, pipeline_rope_f16;
  229. vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
  230. size_t semaphore_idx, event_idx;
  231. ggml_vk_garbage_collector gc;
  232. std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
  233. size_t prealloc_size_qx, prealloc_size_qy, prealloc_size_x, prealloc_size_y, prealloc_size_split_k;
  234. vk_buffer prealloc_qx, prealloc_qy, prealloc_x, prealloc_y, prealloc_split_k;
  235. vk::Fence fence;
  236. vk_buffer staging;
  237. size_t staging_size;
  238. size_t staging_offset;
  239. vk_buffer sync_staging;
  240. vk_buffer buffer_pool[MAX_VK_BUFFERS];
  241. vk_context * compute_ctx;
  242. vk_context * transfer_ctx;
  243. bool disable;
  244. bool initialized;
  245. size_t idx;
  246. };
  247. struct vk_instance {
  248. vk::Instance instance;
  249. std::vector<size_t> device_indices;
  250. std::shared_ptr<vk_device> devices[GGML_VK_MAX_DEVICES];
  251. ggml_backend_t backends[GGML_VK_MAX_DEVICES];
  252. ggml_backend_vk_context contexts[GGML_VK_MAX_DEVICES];
  253. ggml_backend_buffer_type buffer_types[GGML_VK_MAX_DEVICES];
  254. bool initialized[GGML_VK_MAX_DEVICES];
  255. };
  256. #ifdef GGML_VULKAN_CHECK_RESULTS
  257. static size_t vk_skip_checks;
  258. static size_t vk_output_tensor;
  259. static void ggml_vk_print_tensor(ggml_backend * ctx, const ggml_tensor * tensor, const char * name);
  260. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor);
  261. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor);
  262. #endif
  263. typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  264. static bool vk_instance_initialized = false;
  265. static vk_instance vk_instance;
  266. GGML_CALL static void ggml_backend_vk_free(ggml_backend_t backend);
  267. static void ggml_vk_create_pipeline(ggml_backend_vk_context * ctx, vk_pipeline& pipeline, const std::string& name, size_t spv_size, const void* spv_data, const std::string& entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t>&& specialization_constants, uint32_t align) {
  268. #ifdef GGML_VULKAN_DEBUG
  269. std::cerr << "ggml_vk_create_pipeline(" << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << ")" << std::endl;
  270. #endif
  271. GGML_ASSERT(parameter_count > 0);
  272. GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
  273. pipeline.name = name;
  274. pipeline.parameter_count = parameter_count;
  275. pipeline.push_constant_size = push_constant_size;
  276. pipeline.wg_denoms = wg_denoms;
  277. pipeline.align = align;
  278. vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
  279. pipeline.shader_module = ctx->device.lock()->device.createShaderModule(shader_module_create_info);
  280. std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
  281. std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
  282. for (uint32_t i = 0; i < parameter_count; i++) {
  283. dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
  284. dsl_binding_flags.push_back({});
  285. }
  286. vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
  287. vk::PushConstantRange pcr(
  288. vk::ShaderStageFlagBits::eCompute,
  289. 0,
  290. pipeline.push_constant_size
  291. );
  292. vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
  293. {},
  294. dsl_binding);
  295. descriptor_set_layout_create_info.setPNext(&dslbfci);
  296. pipeline.dsl = ctx->device.lock()->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
  297. // Check if device supports multiple descriptors per pool
  298. if (ctx->device.lock()->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN) {
  299. const uint32_t alloc_count = 2;
  300. // Try allocating multiple sets from one pool
  301. // This fails on AMD for some reason, so add a fall back to allocating one pool per set
  302. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline.parameter_count);
  303. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, alloc_count, descriptor_pool_size);
  304. vk::DescriptorPool pool = ctx->device.lock()->device.createDescriptorPool(descriptor_pool_create_info);
  305. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  306. for (uint32_t i = 0; i < alloc_count; i++) {
  307. layouts[i] = pipeline.dsl;
  308. }
  309. try {
  310. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pool, alloc_count, layouts.data());
  311. std::vector<vk::DescriptorSet> sets = ctx->device.lock()->device.allocateDescriptorSets(descriptor_set_alloc_info);
  312. } catch(vk::OutOfPoolMemoryError const&) {
  313. ctx->device.lock()->descriptor_set_mode = VK_DEVICE_DESCRIPTOR_POOL_MODE_SINGLE;
  314. }
  315. ctx->device.lock()->device.destroyDescriptorPool(pool);
  316. }
  317. if (ctx->device.lock()->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI) {
  318. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline.parameter_count);
  319. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, 128, descriptor_pool_size);
  320. pipeline.descriptor_pools.push_back(ctx->device.lock()->device.createDescriptorPool(descriptor_pool_create_info));
  321. }
  322. pipeline.descriptor_set_idx = 0;
  323. vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), pipeline.dsl, pcr);
  324. pipeline.layout = ctx->device.lock()->device.createPipelineLayout(pipeline_layout_create_info);
  325. std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
  326. for (size_t i = 0; i < specialization_constants.size(); i++) {
  327. specialization_entries[i].constantID = i;
  328. specialization_entries[i].offset = i * sizeof(uint32_t);
  329. specialization_entries[i].size = sizeof(uint32_t);
  330. }
  331. vk::SpecializationInfo specialization_info(
  332. specialization_entries.size(),
  333. specialization_entries.data(),
  334. specialization_constants.size() * sizeof(uint32_t),
  335. specialization_constants.data()
  336. );
  337. vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
  338. vk::PipelineShaderStageCreateFlags(),
  339. vk::ShaderStageFlagBits::eCompute,
  340. pipeline.shader_module,
  341. entrypoint.c_str(),
  342. &specialization_info);
  343. vk::ComputePipelineCreateInfo compute_pipeline_create_info(
  344. vk::PipelineCreateFlags(),
  345. pipeline_shader_create_info,
  346. pipeline.layout);
  347. pipeline.pipeline = ctx->device.lock()->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
  348. ctx->gc.pipelines.push_back(&pipeline);
  349. }
  350. static void ggml_vk_destroy_pipeline(ggml_backend_vk_context * ctx, vk_pipeline * pipeline) {
  351. for (auto& pool : pipeline->descriptor_pools) {
  352. ctx->device.lock()->device.destroyDescriptorPool(pool);
  353. }
  354. pipeline->descriptor_pools.clear();
  355. pipeline->descriptor_sets.clear();
  356. pipeline->descriptor_set_idx = 0;
  357. ctx->device.lock()->device.destroyDescriptorSetLayout(pipeline->dsl);
  358. ctx->device.lock()->device.destroyPipelineLayout(pipeline->layout);
  359. ctx->device.lock()->device.destroyShaderModule(pipeline->shader_module);
  360. ctx->device.lock()->device.destroyPipeline(pipeline->pipeline);
  361. }
  362. static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx, vk_pipeline& pipeline, uint32_t n) {
  363. #ifdef GGML_VULKAN_DEBUG
  364. std::cerr << "ggml_pipeline_allocate_descriptor_sets(" << pipeline.name << ", " << n << ")" << std::endl;
  365. #endif
  366. if (pipeline.descriptor_sets.size() >= pipeline.descriptor_set_idx + n) {
  367. // Enough descriptors are available
  368. return;
  369. }
  370. if (ctx->device.lock()->descriptor_set_mode == VK_DEVICE_DESCRIPTOR_POOL_MODE_MULTI) {
  371. const uint32_t alloc_count = pipeline.descriptor_set_idx + n - pipeline.descriptor_sets.size();
  372. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  373. for (uint32_t i = 0; i < alloc_count; i++) {
  374. layouts[i] = pipeline.dsl;
  375. }
  376. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline.descriptor_pools[0], alloc_count, layouts.data());
  377. std::vector<vk::DescriptorSet> sets = ctx->device.lock()->device.allocateDescriptorSets(descriptor_set_alloc_info);
  378. pipeline.descriptor_sets.insert(pipeline.descriptor_sets.end(), sets.begin(), sets.end());
  379. } else {
  380. for (uint32_t i = pipeline.descriptor_sets.size(); i < pipeline.descriptor_set_idx + n; i++) {
  381. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline.parameter_count);
  382. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, 1, descriptor_pool_size);
  383. pipeline.descriptor_pools.push_back(ctx->device.lock()->device.createDescriptorPool(descriptor_pool_create_info));
  384. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline.descriptor_pools[i], 1, &pipeline.dsl);
  385. std::vector<vk::DescriptorSet> sets = ctx->device.lock()->device.allocateDescriptorSets(descriptor_set_alloc_info);
  386. pipeline.descriptor_sets.push_back(sets[0]);
  387. }
  388. }
  389. }
  390. static void ggml_pipeline_cleanup(vk_pipeline& pipeline) {
  391. #ifdef GGML_VULKAN_DEBUG
  392. std::cerr << "ggml_pipeline_cleanup(" << pipeline.name << ")" << std::endl;
  393. #endif
  394. pipeline.descriptor_set_idx = 0;
  395. }
  396. static vk::CommandBuffer ggml_vk_create_cmd_buffer(ggml_backend_vk_context * ctx, vk_queue& q) {
  397. #ifdef GGML_VULKAN_DEBUG
  398. std::cerr << "ggml_vk_create_cmd_buffer()" << std::endl;
  399. #endif
  400. if (q.cmd_buffers.size() > q.cmd_buffer_idx) {
  401. // Reuse command buffer
  402. return q.cmd_buffers[q.cmd_buffer_idx++];
  403. }
  404. vk::CommandBufferAllocateInfo command_buffer_alloc_info(
  405. q.pool,
  406. vk::CommandBufferLevel::ePrimary,
  407. 1);
  408. const std::vector<vk::CommandBuffer> cmd_buffers = ctx->device.lock()->device.allocateCommandBuffers(command_buffer_alloc_info);
  409. auto buf = cmd_buffers.front();
  410. q.cmd_buffers.push_back(buf);
  411. q.cmd_buffer_idx++;
  412. return buf;
  413. }
  414. static vk_submission ggml_vk_create_submission(ggml_backend_vk_context * ctx, vk_queue& q, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  415. #ifdef GGML_VULKAN_DEBUG
  416. std::cerr << "ggml_vk_create_submission()" << std::endl;
  417. #endif
  418. vk_submission s;
  419. s.buffer = ggml_vk_create_cmd_buffer(ctx, q);
  420. s.wait_semaphores = std::move(wait_semaphores);
  421. s.signal_semaphores = std::move(signal_semaphores);
  422. return s;
  423. }
  424. static void ggml_vk_submit(vk_context * ctx, vk::Fence fence) {
  425. #ifdef GGML_VULKAN_DEBUG
  426. std::cerr << "ggml_vk_submit(" << ctx->seqs.size() << ", " << fence << ")" << std::endl;
  427. #endif
  428. if (ctx->seqs.empty()) {
  429. return;
  430. }
  431. std::vector<std::vector<uint64_t>> tl_wait_vals;
  432. std::vector<std::vector<uint64_t>> tl_signal_vals;
  433. std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
  434. std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
  435. std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
  436. std::vector<vk::SubmitInfo> submit_infos;
  437. int idx = -1;
  438. std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
  439. size_t reserve = 0;
  440. for (const auto& sequence : ctx->seqs) {
  441. reserve += sequence.size();
  442. }
  443. // Pre-reserve vectors to prevent reallocation, which invalidates pointers
  444. tl_wait_semaphores.reserve(reserve);
  445. tl_wait_vals.reserve(reserve);
  446. tl_signal_semaphores.reserve(reserve);
  447. tl_signal_vals.reserve(reserve);
  448. tl_submit_infos.reserve(reserve);
  449. submit_infos.reserve(reserve);
  450. stage_flags.reserve(reserve);
  451. for (const auto& sequence : ctx->seqs) {
  452. for (const auto& submission : sequence) {
  453. stage_flags.push_back({});
  454. idx++;
  455. tl_wait_vals.push_back({});
  456. tl_wait_semaphores.push_back({});
  457. tl_signal_vals.push_back({});
  458. tl_signal_semaphores.push_back({});
  459. for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
  460. stage_flags[idx].push_back(ctx->q->stage_flags);
  461. tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
  462. tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
  463. }
  464. for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
  465. tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
  466. tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
  467. }
  468. tl_submit_infos.push_back({
  469. (uint32_t) submission.wait_semaphores.size(),
  470. tl_wait_vals[idx].data(),
  471. (uint32_t) submission.signal_semaphores.size(),
  472. tl_signal_vals[idx].data(),
  473. });
  474. tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
  475. tl_submit_infos[idx].pNext = nullptr;
  476. vk::SubmitInfo si{
  477. (uint32_t) submission.wait_semaphores.size(),
  478. tl_wait_semaphores[idx].data(),
  479. stage_flags[idx].data(),
  480. 1,
  481. &submission.buffer,
  482. (uint32_t) submission.signal_semaphores.size(),
  483. tl_signal_semaphores[idx].data(),
  484. };
  485. si.setPNext(&tl_submit_infos[idx]);
  486. submit_infos.push_back(si);
  487. }
  488. }
  489. ctx->q->queue.submit(submit_infos, fence);
  490. ctx->seqs.clear();
  491. }
  492. static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
  493. #ifdef GGML_VULKAN_DEBUG
  494. std::cerr << "ggml_vk_find_queue_family_index()" << std::endl;
  495. #endif
  496. const uint32_t qfsize = queue_family_props.size();
  497. // Try with avoid preferences first
  498. for (uint32_t i = 0; i < qfsize; i++) {
  499. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
  500. return i;
  501. }
  502. }
  503. // Fall back to only required
  504. for (size_t i = 0; i < qfsize; i++) {
  505. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
  506. return i;
  507. }
  508. }
  509. // Fall back to reusing compute queue
  510. for (size_t i = 0; i < qfsize; i++) {
  511. if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
  512. return i;
  513. }
  514. }
  515. // Fall back to ignoring min_num_queries
  516. for (size_t i = 0; i < qfsize; i++) {
  517. if (queue_family_props[i].queueFlags & required) {
  518. return i;
  519. }
  520. }
  521. std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
  522. for(auto &q_family : queue_family_props) {
  523. std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
  524. }
  525. abort();
  526. }
  527. static void ggml_vk_create_queue(ggml_backend_vk_context * ctx, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags) {
  528. #ifdef GGML_VULKAN_DEBUG
  529. std::cerr << "ggml_vk_create_queue()" << std::endl;
  530. #endif
  531. q.queue_family_index = queue_family_index;
  532. vk::CommandPoolCreateInfo command_pool_create_info_compute(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), queue_family_index);
  533. q.pool = ctx->device.lock()->device.createCommandPool(command_pool_create_info_compute);
  534. q.cmd_buffer_idx = 0;
  535. q.queue = ctx->device.lock()->device.getQueue(queue_family_index, queue_index);
  536. q.stage_flags = stage_flags;
  537. }
  538. static vk_context * ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_queue& q) {
  539. #ifdef GGML_VULKAN_DEBUG
  540. std::cerr << "ggml_vk_create_context()" << std::endl;
  541. #endif
  542. ctx->gc.contexts.emplace_back();
  543. vk_context * result = &ctx->gc.contexts[ctx->gc.contexts.size() - 1];
  544. memset((void *) result, 0, sizeof(vk_context));
  545. result->idx = ctx->gc.contexts.size() - 1;
  546. result->q = &q;
  547. return result;
  548. }
  549. static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
  550. #ifdef GGML_VULKAN_DEBUG
  551. std::cerr << "ggml_vk_create_timeline_semaphore()" << std::endl;
  552. #endif
  553. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
  554. vk::SemaphoreCreateInfo ci{};
  555. ci.setPNext(&tci);
  556. vk::Semaphore semaphore = ctx->device.lock()->device.createSemaphore(ci);
  557. ctx->gc.semaphores.push_back({ semaphore, 0 });
  558. return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
  559. }
  560. static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
  561. #ifdef GGML_VULKAN_DEBUG
  562. std::cerr << "ggml_vk_create_timeline_semaphore()" << std::endl;
  563. #endif
  564. if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
  565. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
  566. vk::SemaphoreCreateInfo ci{};
  567. ci.setPNext(&tci);
  568. vk::Semaphore semaphore = ctx->device.lock()->device.createSemaphore(ci);
  569. ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
  570. }
  571. return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
  572. }
  573. static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
  574. if (ctx->event_idx >= ctx->gc.events.size()) {
  575. ctx->gc.events.push_back(ctx->device.lock()->device.createEvent({}));
  576. }
  577. return ctx->gc.events[ctx->event_idx++];
  578. }
  579. static void ggml_vk_queue_cleanup(ggml_backend_vk_context * ctx, vk_queue& q) {
  580. #ifdef GGML_VULKAN_DEBUG
  581. std::cerr << "ggml_vk_queue_cleanup()" << std::endl;
  582. #endif
  583. // Requires command buffers to be done
  584. ctx->device.lock()->device.resetCommandPool(q.pool);
  585. q.cmd_buffer_idx = 0;
  586. }
  587. static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
  588. for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
  589. vk::MemoryType memory_type = mem_props->memoryTypes[i];
  590. if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
  591. (flags & memory_type.propertyFlags) == flags &&
  592. mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
  593. return static_cast<int32_t>(i);
  594. }
  595. }
  596. return UINT32_MAX;
  597. }
  598. static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  599. #ifdef GGML_VULKAN_DEBUG
  600. std::cerr << "ggml_vk_create_buffer(" << size << ", " << to_string(req_flags) << ", " << to_string(fallback_flags) << ")" << std::endl;
  601. #endif
  602. vk_buffer buf = std::make_shared<vk_buffer_struct>();
  603. if (size == 0) {
  604. buf->size = 0;
  605. return buf;
  606. }
  607. buf->size = size;
  608. vk::BufferCreateInfo buffer_create_info{
  609. vk::BufferCreateFlags(),
  610. size,
  611. vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst,
  612. vk::SharingMode::eExclusive,
  613. 0,
  614. nullptr,
  615. };
  616. buf->buffer = ctx->device.lock()->device.createBuffer(buffer_create_info);
  617. vk::MemoryRequirements mem_req = ctx->device.lock()->device.getBufferMemoryRequirements(buf->buffer);
  618. vk::PhysicalDeviceMemoryProperties mem_props = ctx->device.lock()->physical_device.getMemoryProperties();
  619. uint32_t memory_type_index = UINT32_MAX;
  620. memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
  621. buf->memory_property_flags = req_flags;
  622. if (memory_type_index == UINT32_MAX && fallback_flags) {
  623. memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
  624. buf->memory_property_flags = fallback_flags;
  625. }
  626. if (memory_type_index == UINT32_MAX) {
  627. ctx->device.lock()->device.destroyBuffer(buf->buffer);
  628. buf->size = 0;
  629. throw vk::OutOfDeviceMemoryError("No suitable memory type found");
  630. }
  631. try {
  632. buf->device_memory = ctx->device.lock()->device.allocateMemory({ mem_req.size, memory_type_index });
  633. } catch (const vk::SystemError& e) {
  634. // Out of Host/Device memory, clean up buffer
  635. ctx->device.lock()->device.destroyBuffer(buf->buffer);
  636. buf->size = 0;
  637. throw e;
  638. }
  639. buf->ptr = nullptr;
  640. if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  641. buf->ptr = ctx->device.lock()->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
  642. }
  643. ctx->device.lock()->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
  644. buf->ctx = ctx;
  645. buf->device = ctx->device.lock();
  646. #ifdef GGML_VULKAN_DEBUG
  647. std::cerr << "Created buffer " << buf->buffer << std::endl;
  648. #endif
  649. return buf;
  650. }
  651. static vk_buffer ggml_vk_create_buffer_check(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  652. try {
  653. return ggml_vk_create_buffer(ctx, size, req_flags, fallback_flags);
  654. } catch (const vk::SystemError& e) {
  655. std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
  656. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  657. throw e;
  658. }
  659. }
  660. static vk_buffer ggml_vk_create_buffer_device(ggml_backend_vk_context * ctx, size_t size) {
  661. vk_buffer buf;
  662. try {
  663. if (ctx->device.lock()->uma) {
  664. // Fall back to host memory type
  665. buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  666. } else {
  667. buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal);
  668. }
  669. } catch (const vk::SystemError& e) {
  670. std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
  671. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  672. throw e;
  673. }
  674. return buf;
  675. }
  676. static void ggml_vk_destroy_buffer(vk_buffer& buf) {
  677. buf.reset();
  678. }
  679. static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) {
  680. return { buf, 0, VK_WHOLE_SIZE };
  681. }
  682. static void ggml_vk_sync_buffers(vk_context * ctx) {
  683. #ifdef GGML_VULKAN_DEBUG
  684. std::cerr << "ggml_vk_sync_buffers()" << std::endl;
  685. #endif
  686. const std::vector<vk::MemoryBarrier> mem_barriers{ { { vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite }, { vk::AccessFlagBits::eMemoryRead | vk::AccessFlagBits::eMemoryWrite } } };
  687. ctx->s->buffer.pipelineBarrier(
  688. ctx->q->stage_flags,
  689. ctx->q->stage_flags,
  690. {},
  691. mem_barriers,
  692. {},
  693. {}
  694. );
  695. }
  696. static void ggml_vk_wait_events(vk_context * ctx, std::vector<vk::Event>&& events) {
  697. #ifdef GGML_VULKAN_DEBUG
  698. std::cerr << "ggml_vk_wait_events()" << std::endl;
  699. #endif
  700. if (events.empty()) {
  701. return;
  702. }
  703. ctx->s->buffer.waitEvents(
  704. events,
  705. ctx->q->stage_flags,
  706. ctx->q->stage_flags,
  707. {},
  708. {},
  709. {}
  710. );
  711. }
  712. static bool ggml_vk_build_shader(ggml_type type) {
  713. switch(type) {
  714. case GGML_TYPE_F16:
  715. case GGML_TYPE_Q4_0:
  716. case GGML_TYPE_Q4_1:
  717. case GGML_TYPE_Q5_0:
  718. case GGML_TYPE_Q5_1:
  719. case GGML_TYPE_Q8_0:
  720. case GGML_TYPE_Q2_K:
  721. case GGML_TYPE_Q3_K:
  722. case GGML_TYPE_Q4_K:
  723. case GGML_TYPE_Q5_K:
  724. case GGML_TYPE_Q6_K:
  725. return true;
  726. default:
  727. return false;
  728. }
  729. }
  730. static void ggml_vk_load_shaders(ggml_backend_vk_context * ctx) {
  731. #ifdef GGML_VULKAN_DEBUG
  732. std::cerr << "ggml_vk_load_shaders(" << ctx->name << ")" << std::endl;
  733. #endif
  734. // mulmat
  735. std::initializer_list<uint32_t> warptile_l = { 128, 128, 128, 16, ctx->device.lock()->subgroup_size * 2, 64, 2, 4, 4, ctx->device.lock()->subgroup_size };
  736. std::initializer_list<uint32_t> warptile_m = { 128, 64, 64, 16, ctx->device.lock()->subgroup_size, 32, 2, 4, 2, ctx->device.lock()->subgroup_size };
  737. std::initializer_list<uint32_t> warptile_s = { ctx->device.lock()->subgroup_size, 32, 32, 16, 32, 32, 2, 2, 2, ctx->device.lock()->subgroup_size };
  738. std::array<uint32_t, 3> l_wg_denoms = {128, 128, 1 };
  739. std::array<uint32_t, 3> m_wg_denoms = { 64, 64, 1 };
  740. std::array<uint32_t, 3> s_wg_denoms = { 32, 32, 1 };
  741. uint32_t l_align = 128;
  742. uint32_t m_align = 64;
  743. uint32_t s_align = 32;
  744. if (ctx->device.lock()->fp16) {
  745. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_l, "matmul_f32_l", matmul_f32_l_len, matmul_f32_l_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, 1);
  746. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_m, "matmul_f32_m", matmul_f32_m_len, matmul_f32_m_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, 1);
  747. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_s, "matmul_f32_s", matmul_f32_s_len, matmul_f32_s_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, 1);
  748. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_aligned_l, "matmul_f32_aligned_l", matmul_f32_aligned_l_len, matmul_f32_aligned_l_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, l_align);
  749. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_aligned_m, "matmul_f32_aligned_m", matmul_f32_aligned_m_len, matmul_f32_aligned_m_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, m_align);
  750. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_aligned_s, "matmul_f32_aligned_s", matmul_f32_aligned_s_len, matmul_f32_aligned_s_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, s_align);
  751. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_l, "matmul_f16_l", matmul_f16_l_len, matmul_f16_l_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, 1);
  752. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_m, "matmul_f16_m", matmul_f16_m_len, matmul_f16_m_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, 1);
  753. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_s, "matmul_f16_s", matmul_f16_s_len, matmul_f16_s_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, 1);
  754. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_aligned_l, "matmul_f16_aligned_l", matmul_f16_aligned_l_len, matmul_f16_aligned_l_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, l_align);
  755. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_aligned_m, "matmul_f16_aligned_m", matmul_f16_aligned_m_len, matmul_f16_aligned_m_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, m_align);
  756. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_aligned_s, "matmul_f16_aligned_s", matmul_f16_aligned_s_len, matmul_f16_aligned_s_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, s_align);
  757. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_l, "matmul_f16_f32_l", matmul_f16_f32_l_len, matmul_f16_f32_l_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, 1);
  758. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_m, "matmul_f16_f32_m", matmul_f16_f32_m_len, matmul_f16_f32_m_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, 1);
  759. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_s, "matmul_f16_f32_s", matmul_f16_f32_s_len, matmul_f16_f32_s_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, 1);
  760. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_aligned_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_l_len, matmul_f16_f32_aligned_l_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, l_align);
  761. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_aligned_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_m_len, matmul_f16_f32_aligned_m_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, m_align);
  762. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_aligned_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_s_len, matmul_f16_f32_aligned_s_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, s_align);
  763. } else {
  764. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_l, "matmul_f32_l", matmul_f32_l_fp32_len, matmul_f32_l_fp32_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, 1);
  765. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_m, "matmul_f32_m", matmul_f32_m_fp32_len, matmul_f32_m_fp32_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, 1);
  766. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_s, "matmul_f32_s", matmul_f32_s_fp32_len, matmul_f32_s_fp32_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, 1);
  767. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_aligned_l, "matmul_f32_aligned_l", matmul_f32_aligned_l_fp32_len, matmul_f32_aligned_l_fp32_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, l_align);
  768. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_aligned_m, "matmul_f32_aligned_m", matmul_f32_aligned_m_fp32_len, matmul_f32_aligned_m_fp32_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, m_align);
  769. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f32_aligned_s, "matmul_f32_aligned_s", matmul_f32_aligned_s_fp32_len, matmul_f32_aligned_s_fp32_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, s_align);
  770. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_l, "matmul_f16_l", matmul_f16_l_fp32_len, matmul_f16_l_fp32_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, 1);
  771. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_m, "matmul_f16_m", matmul_f16_m_fp32_len, matmul_f16_m_fp32_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, 1);
  772. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_s, "matmul_f16_s", matmul_f16_s_fp32_len, matmul_f16_s_fp32_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, 1);
  773. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_aligned_l, "matmul_f16_aligned_l", matmul_f16_aligned_l_fp32_len, matmul_f16_aligned_l_fp32_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, l_align);
  774. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_aligned_m, "matmul_f16_aligned_m", matmul_f16_aligned_m_fp32_len, matmul_f16_aligned_m_fp32_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, m_align);
  775. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_aligned_s, "matmul_f16_aligned_s", matmul_f16_aligned_s_fp32_len, matmul_f16_aligned_s_fp32_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, s_align);
  776. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_l, "matmul_f16_f32_l", matmul_f16_f32_l_fp32_len, matmul_f16_f32_l_fp32_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, 1);
  777. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_m, "matmul_f16_f32_m", matmul_f16_f32_m_fp32_len, matmul_f16_f32_m_fp32_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, 1);
  778. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_s, "matmul_f16_f32_s", matmul_f16_f32_s_fp32_len, matmul_f16_f32_s_fp32_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, 1);
  779. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_aligned_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_l_fp32_len, matmul_f16_f32_aligned_l_fp32_data, "main", 3, 14 * sizeof(uint32_t), l_wg_denoms, warptile_l, l_align);
  780. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_aligned_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_m_fp32_len, matmul_f16_f32_aligned_m_fp32_data, "main", 3, 14 * sizeof(uint32_t), m_wg_denoms, warptile_m, m_align);
  781. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_f16_f32_aligned_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_s_fp32_len, matmul_f16_f32_aligned_s_fp32_data, "main", 3, 14 * sizeof(uint32_t), s_wg_denoms, warptile_s, s_align);
  782. }
  783. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32", mul_mat_vec_f16_f32_len, mul_mat_vec_f16_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  784. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32", mul_mat_vec_q4_0_f32_len, mul_mat_vec_q4_0_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  785. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32", mul_mat_vec_q4_1_f32_len, mul_mat_vec_q4_1_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  786. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32", mul_mat_vec_q5_0_f32_len, mul_mat_vec_q5_0_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  787. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32", mul_mat_vec_q5_1_f32_len, mul_mat_vec_q5_1_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  788. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32", mul_mat_vec_q8_0_f32_len, mul_mat_vec_q8_0_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  789. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_K_f32", mul_mat_vec_q2_K_f32_len, mul_mat_vec_q2_K_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  790. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_K_f32", mul_mat_vec_q3_K_f32_len, mul_mat_vec_q3_K_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  791. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_K_f32", mul_mat_vec_q4_K_f32_len, mul_mat_vec_q4_K_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  792. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_K_f32", mul_mat_vec_q5_K_f32_len, mul_mat_vec_q5_K_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  793. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant_mul_mat_vec_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_K_f32", mul_mat_vec_q6_K_f32_len, mul_mat_vec_q6_K_f32_data, "main", 3, 3 * sizeof(int), {1, 1, 1}, {}, 1);
  794. // dequant shaders
  795. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", f32_to_f16_len, f32_to_f16_data, "main", 2, 4 * sizeof(int), { 64, 1, 1}, {}, 1);
  796. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_F16 ], "dequant_f16", dequant_f16_len, dequant_f16_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  797. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  798. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  799. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  800. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  801. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  802. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_K", dequant_q2_K_len, dequant_q2_K_data, "main", 2, 4 * sizeof(int), {256 * 64, 1, 1}, {}, 1);
  803. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_K", dequant_q3_K_len, dequant_q3_K_data, "main", 2, 4 * sizeof(int), {256 * 64, 1, 1}, {}, 1);
  804. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_K", dequant_q4_K_len, dequant_q4_K_data, "main", 2, 4 * sizeof(int), {256 * 32, 1, 1}, {}, 1);
  805. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_K", dequant_q5_K_len, dequant_q5_K_data, "main", 2, 4 * sizeof(int), {256 * 64, 1, 1}, {}, 1);
  806. ggml_vk_create_pipeline(ctx, ctx->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_K", dequant_q6_K_len, dequant_q6_K_data, "main", 2, 4 * sizeof(int), {256 * 64, 1, 1}, {}, 1);
  807. // get_rows
  808. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  809. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  810. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  811. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  812. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  813. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  814. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  815. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  816. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  817. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  818. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  819. ggml_vk_create_pipeline(ctx, ctx->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  820. ggml_vk_create_pipeline(ctx, ctx->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256, 1, 1}, {}, 1);
  821. ggml_vk_create_pipeline(ctx, ctx->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  822. ggml_vk_create_pipeline(ctx, ctx->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  823. ggml_vk_create_pipeline(ctx, ctx->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  824. ggml_vk_create_pipeline(ctx, ctx->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  825. ggml_vk_create_pipeline(ctx, ctx->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_cpy_push_constants), {512, 1, 1}, {}, 1);
  826. ggml_vk_create_pipeline(ctx, ctx->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_cpy_push_constants), {512, 1, 1}, {}, 1);
  827. ggml_vk_create_pipeline(ctx, ctx->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_cpy_push_constants), {512, 1, 1}, {}, 1);
  828. ggml_vk_create_pipeline(ctx, ctx->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  829. ggml_vk_create_pipeline(ctx, ctx->pipeline_mul_f32, "mul_f32", mul_f32_len, mul_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  830. ggml_vk_create_pipeline(ctx, ctx->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  831. ggml_vk_create_pipeline(ctx, ctx->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  832. ggml_vk_create_pipeline(ctx, ctx->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  833. ggml_vk_create_pipeline(ctx, ctx->pipeline_gelu_f32, "gelu_f32", gelu_f32_len, gelu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  834. ggml_vk_create_pipeline(ctx, ctx->pipeline_silu_f32, "silu_f32", silu_f32_len, silu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  835. ggml_vk_create_pipeline(ctx, ctx->pipeline_relu_f32, "relu_f32", relu_f32_len, relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  836. ggml_vk_create_pipeline(ctx, ctx->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1);
  837. ggml_vk_create_pipeline(ctx, ctx->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  838. ggml_vk_create_pipeline(ctx, ctx->pipeline_rope_f32, "rope_f32", rope_f32_len, rope_f32_data, "main", 3, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  839. ggml_vk_create_pipeline(ctx, ctx->pipeline_rope_f16, "rope_f16", rope_f16_len, rope_f16_data, "main", 3, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  840. ggml_vk_create_pipeline(ctx, ctx->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 3, sizeof(vk_op_rope_neox_push_constants), {1, 512, 1}, {}, 1);
  841. ggml_vk_create_pipeline(ctx, ctx->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 3, sizeof(vk_op_rope_neox_push_constants), {1, 512, 1}, {}, 1);
  842. }
  843. static void ggml_vk_print_gpu_info(size_t idx) {
  844. GGML_ASSERT(idx < vk_instance.device_indices.size());
  845. size_t dev_num = vk_instance.device_indices[idx];
  846. #ifdef GGML_VULKAN_DEBUG
  847. std::cerr << "ggml_vk_print_gpu_info(" << dev_num << ")" << std::endl;
  848. #endif
  849. GGML_ASSERT(vk_instance.initialized);
  850. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  851. if (dev_num >= devices.size()) {
  852. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  853. throw std::runtime_error("Device not found");
  854. }
  855. vk::PhysicalDevice physical_device = devices[dev_num];
  856. std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
  857. vk::PhysicalDeviceProperties2 props2;
  858. vk::PhysicalDeviceMaintenance3Properties props3;
  859. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  860. props2.pNext = &props3;
  861. props3.pNext = &subgroup_props;
  862. physical_device.getProperties2(&props2);
  863. const size_t subgroup_size = subgroup_props.subgroupSize;
  864. const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  865. bool fp16_storage = false;
  866. bool fp16_compute = false;
  867. for (auto properties : ext_props) {
  868. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  869. fp16_storage = true;
  870. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  871. fp16_compute = true;
  872. }
  873. }
  874. const char* GGML_VULKAN_DISABLE_F16 = getenv("GGML_VULKAN_DISABLE_F16");
  875. bool force_disable_f16 = GGML_VULKAN_DISABLE_F16 != nullptr;
  876. bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  877. vk::PhysicalDeviceFeatures device_features = physical_device.getFeatures();
  878. VkPhysicalDeviceFeatures2 device_features2;
  879. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  880. device_features2.pNext = nullptr;
  881. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  882. VkPhysicalDeviceVulkan11Features vk11_features;
  883. vk11_features.pNext = nullptr;
  884. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  885. device_features2.pNext = &vk11_features;
  886. VkPhysicalDeviceVulkan12Features vk12_features;
  887. vk12_features.pNext = nullptr;
  888. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  889. vk11_features.pNext = &vk12_features;
  890. vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
  891. fp16 = fp16 && vk12_features.shaderFloat16;
  892. std::string device_name = props2.properties.deviceName.data();
  893. std::cerr << GGML_VK_NAME << idx << ": " << device_name << " | uma: " << uma << " | fp16: " << fp16 << " | warp size: " << subgroup_size << std::endl;
  894. if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
  895. std::cerr << "ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want." << std::endl;
  896. }
  897. }
  898. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  899. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  900. void ggml_vk_instance_init() {
  901. if (vk_instance_initialized) {
  902. return;
  903. }
  904. #ifdef GGML_VULKAN_DEBUG
  905. std::cerr << "ggml_vk_instance_init()" << std::endl;
  906. #endif
  907. vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION };
  908. const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
  909. const bool validation_ext = ggml_vk_instance_validation_ext_available(instance_extensions);
  910. #ifdef __APPLE__
  911. const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
  912. #endif
  913. std::vector<const char*> layers;
  914. if (validation_ext) {
  915. layers.push_back("VK_LAYER_KHRONOS_validation");
  916. }
  917. std::vector<const char*> extensions;
  918. if (validation_ext) {
  919. extensions.push_back("VK_EXT_validation_features");
  920. }
  921. #ifdef __APPLE__
  922. if (portability_enumeration_ext) {
  923. extensions.push_back("VK_KHR_portability_enumeration");
  924. }
  925. #endif
  926. vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
  927. #ifdef __APPLE__
  928. if (portability_enumeration_ext) {
  929. instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
  930. }
  931. #endif
  932. std::vector<vk::ValidationFeatureEnableEXT> features_enable;
  933. vk::ValidationFeaturesEXT validation_features;
  934. if (validation_ext) {
  935. features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
  936. validation_features = {
  937. features_enable,
  938. {},
  939. };
  940. validation_features.setPNext(nullptr);
  941. instance_create_info.setPNext(&validation_features);
  942. std::cerr << "ggml_vulkan: Validation layers enabled" << std::endl;
  943. }
  944. vk_instance.instance = vk::createInstance(instance_create_info);
  945. memset(vk_instance.initialized, 0, sizeof(bool) * GGML_VK_MAX_DEVICES);
  946. size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size();
  947. // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
  948. char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
  949. if (devices_env != nullptr) {
  950. std::string devices(devices_env);
  951. std::replace(devices.begin(), devices.end(), ',', ' ');
  952. std::stringstream ss(devices);
  953. size_t tmp;
  954. while (ss >> tmp) {
  955. if(tmp >= num_available_devices) {
  956. std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
  957. throw std::runtime_error("Invalid Vulkan device index");
  958. }
  959. vk_instance.device_indices.push_back(tmp);
  960. }
  961. } else {
  962. vk_instance.device_indices.push_back(0);
  963. }
  964. vk_instance_initialized = true;
  965. }
  966. static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
  967. GGML_ASSERT(idx < vk_instance.device_indices.size());
  968. size_t dev_num = vk_instance.device_indices[idx];
  969. #ifdef GGML_VULKAN_DEBUG
  970. std::cerr << "ggml_vk_init(" << ctx->name << ", " << dev_num << ")" << std::endl;
  971. #endif
  972. ggml_vk_instance_init();
  973. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  974. if (dev_num >= devices.size()) {
  975. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  976. throw std::runtime_error("Device not found");
  977. }
  978. vk_instance.devices[idx] = std::make_shared<vk_device>();
  979. ctx->device = vk_instance.devices[idx];
  980. ctx->device.lock()->physical_device = devices[dev_num];
  981. const std::vector<vk::ExtensionProperties> ext_props = ctx->device.lock()->physical_device.enumerateDeviceExtensionProperties();
  982. bool maintenance4_support = false;
  983. // Check if maintenance4 is supported
  984. for (const auto& properties : ext_props) {
  985. if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
  986. maintenance4_support = true;
  987. }
  988. }
  989. vk::PhysicalDeviceProperties2 props2;
  990. vk::PhysicalDeviceMaintenance3Properties props3;
  991. vk::PhysicalDeviceMaintenance4Properties props4;
  992. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  993. props2.pNext = &props3;
  994. props3.pNext = &subgroup_props;
  995. if (maintenance4_support) {
  996. subgroup_props.pNext = &props4;
  997. }
  998. ctx->device.lock()->physical_device.getProperties2(&props2);
  999. ctx->device.lock()->properties = props2.properties;
  1000. if (maintenance4_support) {
  1001. ctx->device.lock()->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
  1002. } else {
  1003. ctx->device.lock()->max_memory_allocation_size = props3.maxMemoryAllocationSize;
  1004. }
  1005. ctx->device.lock()->vendor_id = ctx->device.lock()->properties.vendorID;
  1006. ctx->device.lock()->subgroup_size = subgroup_props.subgroupSize;
  1007. ctx->device.lock()->uma = ctx->device.lock()->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1008. bool fp16_storage = false;
  1009. bool fp16_compute = false;
  1010. for (const auto& properties : ext_props) {
  1011. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1012. fp16_storage = true;
  1013. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1014. fp16_compute = true;
  1015. }
  1016. }
  1017. const char* GGML_VULKAN_DISABLE_F16 = getenv("GGML_VULKAN_DISABLE_F16");
  1018. bool force_disable_f16 = GGML_VULKAN_DISABLE_F16 != nullptr;
  1019. ctx->device.lock()->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1020. std::vector<vk::QueueFamilyProperties> queue_family_props = ctx->device.lock()->physical_device.getQueueFamilyProperties();
  1021. // Try to find a non-graphics compute queue and transfer-focused queues
  1022. const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
  1023. const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
  1024. const float priorities[] = { 1.0f, 1.0f };
  1025. ctx->device.lock()->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
  1026. std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
  1027. if (compute_queue_family_index != transfer_queue_family_index) {
  1028. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1029. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
  1030. } else if(!ctx->device.lock()->single_queue) {
  1031. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
  1032. } else {
  1033. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1034. }
  1035. vk::DeviceCreateInfo device_create_info;
  1036. std::vector<const char *> device_extensions;
  1037. vk::PhysicalDeviceFeatures device_features = ctx->device.lock()->physical_device.getFeatures();
  1038. VkPhysicalDeviceFeatures2 device_features2;
  1039. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1040. device_features2.pNext = nullptr;
  1041. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1042. VkPhysicalDeviceVulkan11Features vk11_features;
  1043. vk11_features.pNext = nullptr;
  1044. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1045. device_features2.pNext = &vk11_features;
  1046. VkPhysicalDeviceVulkan12Features vk12_features;
  1047. vk12_features.pNext = nullptr;
  1048. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1049. vk11_features.pNext = &vk12_features;
  1050. vkGetPhysicalDeviceFeatures2(ctx->device.lock()->physical_device, &device_features2);
  1051. ctx->device.lock()->fp16 = ctx->device.lock()->fp16 && vk12_features.shaderFloat16;
  1052. if (!vk11_features.storageBuffer16BitAccess) {
  1053. std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
  1054. throw std::runtime_error("Unsupported device");
  1055. }
  1056. device_extensions.push_back("VK_KHR_16bit_storage");
  1057. #ifdef GGML_VULKAN_VALIDATE
  1058. device_extensions.push_back("VK_KHR_shader_non_semantic_info");
  1059. #endif
  1060. if (ctx->device.lock()->fp16) {
  1061. device_extensions.push_back("VK_KHR_shader_float16_int8");
  1062. }
  1063. ctx->device.lock()->name = ctx->device.lock()->properties.deviceName.data();
  1064. device_create_info = {
  1065. vk::DeviceCreateFlags(),
  1066. device_queue_create_infos,
  1067. {},
  1068. device_extensions
  1069. };
  1070. device_create_info.setPNext(&device_features2);
  1071. ctx->device.lock()->device = ctx->device.lock()->physical_device.createDevice(device_create_info);
  1072. ctx->device.lock()->descriptor_set_mode = VK_DEVICE_DESCRIPTOR_POOL_MODE_UNKNOWN;
  1073. // Shaders
  1074. ggml_vk_load_shaders(ctx);
  1075. // Queues
  1076. ggml_vk_create_queue(ctx, ctx->device.lock()->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer });
  1077. if (!ctx->device.lock()->single_queue) {
  1078. const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
  1079. ggml_vk_create_queue(ctx, ctx->device.lock()->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer });
  1080. } else {
  1081. // TODO: Use pointer or reference to avoid copy
  1082. ctx->device.lock()->transfer_queue = ctx->device.lock()->compute_queue;
  1083. }
  1084. ctx->fence = ctx->device.lock()->device.createFence({});
  1085. ctx->compute_ctx = nullptr;
  1086. ctx->transfer_ctx = nullptr;
  1087. ctx->disable = false;
  1088. ctx->initialized = true;
  1089. ctx->idx = idx;
  1090. #ifdef GGML_VULKAN_CHECK_RESULTS
  1091. const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
  1092. vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
  1093. const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
  1094. vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
  1095. #endif
  1096. }
  1097. static vk_pipeline* ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
  1098. #ifdef GGML_VULKAN_DEBUG
  1099. std::cerr << "ggml_vk_get_to_fp16()" << std::endl;
  1100. #endif
  1101. switch (type) {
  1102. case GGML_TYPE_F32:
  1103. case GGML_TYPE_Q4_0:
  1104. case GGML_TYPE_Q4_1:
  1105. case GGML_TYPE_Q5_0:
  1106. case GGML_TYPE_Q5_1:
  1107. case GGML_TYPE_Q8_0:
  1108. case GGML_TYPE_Q2_K:
  1109. case GGML_TYPE_Q3_K:
  1110. case GGML_TYPE_Q4_K:
  1111. case GGML_TYPE_Q5_K:
  1112. case GGML_TYPE_Q6_K:
  1113. break;
  1114. default:
  1115. return nullptr;
  1116. }
  1117. return &ctx->pipeline_dequant[type];
  1118. }
  1119. static vk_pipeline* ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type type) {
  1120. #ifdef GGML_VULKAN_DEBUG
  1121. std::cerr << "ggml_vk_get_dequantize_mul_mat_vec()" << std::endl;
  1122. #endif
  1123. switch (type) {
  1124. case GGML_TYPE_F16:
  1125. case GGML_TYPE_Q4_0:
  1126. case GGML_TYPE_Q4_1:
  1127. case GGML_TYPE_Q5_0:
  1128. case GGML_TYPE_Q5_1:
  1129. case GGML_TYPE_Q8_0:
  1130. case GGML_TYPE_Q2_K:
  1131. case GGML_TYPE_Q3_K:
  1132. case GGML_TYPE_Q4_K:
  1133. case GGML_TYPE_Q5_K:
  1134. case GGML_TYPE_Q6_K:
  1135. break;
  1136. default:
  1137. return nullptr;
  1138. }
  1139. return &ctx->pipeline_dequant_mul_mat_vec_f32[type];
  1140. }
  1141. static vk_buffer ggml_vk_pool_malloc(ggml_backend_vk_context * ctx, size_t size) {
  1142. #ifdef GGML_VULKAN_DEBUG
  1143. std::cerr << "ggml_vk_pool_malloc(" << size << ")" << std::endl;
  1144. #endif
  1145. int best_i = -1;
  1146. size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
  1147. int worst_i = -1;
  1148. size_t worst_size = 0; //largest unused buffer seen so far
  1149. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  1150. vk_buffer &b = ctx->buffer_pool[i];
  1151. if (b != nullptr && b->size >= size && b->size < best_size) {
  1152. best_i = i;
  1153. best_size = b->size;
  1154. }
  1155. if (b != nullptr && b->size > worst_size) {
  1156. worst_i = i;
  1157. worst_size = b->size;
  1158. }
  1159. }
  1160. if(best_i != -1) {
  1161. //found the smallest buffer that fits our needs
  1162. vk_buffer b = ctx->buffer_pool[best_i];
  1163. ctx->buffer_pool[best_i].reset();
  1164. return b;
  1165. }
  1166. if(worst_i != -1) {
  1167. //no buffer that fits our needs, resize largest one to save memory
  1168. vk_buffer& b = ctx->buffer_pool[worst_i];
  1169. ggml_vk_destroy_buffer(b);
  1170. }
  1171. return ggml_vk_create_buffer_check(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal);
  1172. }
  1173. static void ggml_vk_pool_free(ggml_backend_vk_context * ctx, vk_buffer& buffer) {
  1174. #ifdef GGML_VULKAN_DEBUG
  1175. std::cerr << "ggml_vk_pool_free(" << buffer->size << ")" << std::endl;
  1176. #endif
  1177. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  1178. vk_buffer& b = ctx->buffer_pool[i];
  1179. if (b == nullptr) {
  1180. b = buffer;
  1181. return;
  1182. }
  1183. }
  1184. std::cerr << "ggml_vulkan: WARNING: vk buffer pool full, increase MAX_VK_BUFFERS" << std::endl;
  1185. ggml_vk_destroy_buffer(buffer);
  1186. }
  1187. // Returns an available temporary buffer that may only be used temporarily, it will be reused
  1188. static vk_buffer ggml_vk_create_buffer_temp(ggml_backend_vk_context * ctx, size_t size) {
  1189. // Try to find existing temp buffer with enough capacity
  1190. for (auto& buffer : ctx->gc.temp_buffers) {
  1191. if (buffer->size >= size) {
  1192. return buffer;
  1193. }
  1194. }
  1195. // Otherwise create new buffer
  1196. vk_buffer buf = ggml_vk_pool_malloc(ctx, size);
  1197. ctx->gc.temp_buffers.push_back(buf);
  1198. return buf;
  1199. }
  1200. static void * ggml_vk_host_malloc(ggml_backend_vk_context * ctx, size_t size) {
  1201. #ifdef GGML_VULKAN_DEBUG
  1202. std::cerr << "ggml_vk_host_malloc(" << size << ")" << std::endl;
  1203. #endif
  1204. vk_buffer buf = ggml_vk_create_buffer(ctx, size,
  1205. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  1206. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  1207. if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
  1208. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
  1209. size/1024.0/1024.0);
  1210. ctx->device.lock()->device.freeMemory(buf->device_memory);
  1211. ctx->device.lock()->device.destroyBuffer(buf->buffer);
  1212. return nullptr;
  1213. }
  1214. ctx->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
  1215. return buf->ptr;
  1216. }
  1217. static void ggml_vk_host_free(ggml_backend_vk_context * ctx, void* ptr) {
  1218. if (ptr == nullptr) {
  1219. return;
  1220. }
  1221. #ifdef GGML_VULKAN_DEBUG
  1222. std::cerr << "ggml_vk_host_free(" << ptr << ")" << std::endl;
  1223. #endif
  1224. vk_buffer buf;
  1225. size_t index;
  1226. for (size_t i = 0; i < ctx->pinned_memory.size(); i++) {
  1227. const uint8_t* addr = (const uint8_t*) std::get<0>(ctx->pinned_memory[i]);
  1228. const uint8_t* endr = addr + std::get<1>(ctx->pinned_memory[i]);
  1229. if (ptr >= addr && ptr < endr) {
  1230. buf = std::get<2>(ctx->pinned_memory[i]);
  1231. index = i;
  1232. break;
  1233. }
  1234. }
  1235. if (buf == nullptr) {
  1236. fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
  1237. return;
  1238. }
  1239. ggml_vk_destroy_buffer(buf);
  1240. ctx->pinned_memory.erase(ctx->pinned_memory.begin() + index);
  1241. }
  1242. static void ggml_vk_host_get(ggml_backend_vk_context * ctx, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
  1243. buf = nullptr;
  1244. buf_offset = 0;
  1245. for (size_t i = 0; i < ctx->pinned_memory.size(); i++) {
  1246. const uint8_t* addr = (const uint8_t*) std::get<0>(ctx->pinned_memory[i]);
  1247. const uint8_t* endr = addr + std::get<1>(ctx->pinned_memory[i]);
  1248. if (ptr >= addr && ptr < endr) {
  1249. buf = std::get<2>(ctx->pinned_memory[i]);
  1250. buf_offset = ((const uint8_t *)ptr) - addr;
  1251. break;
  1252. }
  1253. }
  1254. }
  1255. static vk_submission ggml_vk_begin_submission(ggml_backend_vk_context * ctx, vk_queue& q, bool one_time = true) {
  1256. vk_submission s;
  1257. s.buffer = ggml_vk_create_cmd_buffer(ctx, q);
  1258. if (one_time) {
  1259. s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
  1260. } else {
  1261. s.buffer.begin({ vk::CommandBufferUsageFlags{} });
  1262. }
  1263. return s;
  1264. }
  1265. static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline& pipeline, std::vector<vk_subbuffer>&& buffers, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
  1266. const uint32_t wg0 = CEIL_DIV(elements[0], pipeline.wg_denoms[0]);
  1267. const uint32_t wg1 = CEIL_DIV(elements[1], pipeline.wg_denoms[1]);
  1268. const uint32_t wg2 = CEIL_DIV(elements[2], pipeline.wg_denoms[2]);
  1269. #ifdef GGML_VULKAN_DEBUG
  1270. std::cerr << "ggml_vk_dispatch_pipeline(" << pipeline.name << ", (" << wg0 << "," << wg1 << "," << wg2 << "))" << std::endl;
  1271. #endif
  1272. std::vector<vk::DescriptorBufferInfo> descriptor_buffer_infos;
  1273. std::vector<vk::WriteDescriptorSet> write_descriptor_sets;
  1274. GGML_ASSERT(pipeline.descriptor_set_idx < pipeline.descriptor_sets.size());
  1275. GGML_ASSERT(buffers.size() == pipeline.parameter_count);
  1276. vk::DescriptorSet& descriptor_set = pipeline.descriptor_sets[pipeline.descriptor_set_idx++];
  1277. for (uint32_t i = 0; i < pipeline.parameter_count; i++) {
  1278. descriptor_buffer_infos.push_back({buffers[i].buffer->buffer, buffers[i].offset, buffers[i].size});
  1279. }
  1280. for (uint32_t i = 0; i < pipeline.parameter_count; i++) {
  1281. write_descriptor_sets.push_back({descriptor_set, i, 0, 1, vk::DescriptorType::eStorageBuffer, nullptr, &descriptor_buffer_infos[i]});
  1282. }
  1283. ctx->device.lock()->device.updateDescriptorSets(write_descriptor_sets, {});
  1284. subctx->s->buffer.pushConstants(pipeline.layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
  1285. subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline.pipeline);
  1286. subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
  1287. pipeline.layout,
  1288. 0,
  1289. { descriptor_set },
  1290. {});
  1291. subctx->s->buffer.dispatch(wg0, wg1, wg2);
  1292. }
  1293. static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  1294. s.buffer.end();
  1295. s.wait_semaphores = std::move(wait_semaphores);
  1296. s.signal_semaphores = std::move(signal_semaphores);
  1297. }
  1298. static void ggml_vk_ctx_end(vk_context * ctx) {
  1299. #ifdef GGML_VULKAN_DEBUG
  1300. std::cerr << "ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")" << std::endl;
  1301. #endif
  1302. if (ctx->s == nullptr) {
  1303. return;
  1304. }
  1305. ctx->s->buffer.end();
  1306. ctx->s = nullptr;
  1307. }
  1308. static void ggml_vk_ctx_begin(ggml_backend_vk_context * ctx, vk_context * subctx) {
  1309. #ifdef GGML_VULKAN_DEBUG
  1310. std::cerr << "ggml_vk_ctx_begin(" << ctx << ")" << std::endl;
  1311. #endif
  1312. if (subctx->s != nullptr) {
  1313. ggml_vk_ctx_end(subctx);
  1314. }
  1315. subctx->seqs.push_back({ ggml_vk_begin_submission(ctx, *subctx->q) });
  1316. subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
  1317. }
  1318. static size_t ggml_vk_align_size(size_t width, size_t align) {
  1319. return CEIL_DIV(width, align) * align;
  1320. }
  1321. static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
  1322. if (memcpys == nullptr) {
  1323. memcpy(dst, src, size);
  1324. } else {
  1325. memcpys->emplace_back(dst, src, size);
  1326. }
  1327. }
  1328. static void ggml_vk_ensure_sync_staging_buffer(ggml_backend_vk_context * ctx, size_t size) {
  1329. if (ctx->sync_staging == nullptr || ctx->sync_staging->size < size) {
  1330. ggml_vk_destroy_buffer(ctx->sync_staging);
  1331. ctx->sync_staging = ggml_vk_create_buffer_check(ctx, size,
  1332. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  1333. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  1334. }
  1335. }
  1336. static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
  1337. #ifdef GGML_VULKAN_DEBUG
  1338. std::cerr << "ggml_vk_buffer_write_nc_async(" << tensor << ")" << std::endl;
  1339. #endif
  1340. GGML_ASSERT(!ggml_is_contiguous(tensor));
  1341. // Buffer is already mapped
  1342. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  1343. std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
  1344. GGML_ASSERT(false);
  1345. }
  1346. // Check if src is pinned memory
  1347. vk_buffer buf;
  1348. size_t buf_offset;
  1349. ggml_vk_host_get(ctx, tensor->data, buf, buf_offset);
  1350. const uint64_t ne0 = tensor->ne[0];
  1351. const uint64_t ne1 = tensor->ne[1];
  1352. const uint64_t ne2 = tensor->ne[2];
  1353. const uint64_t ne3 = tensor->ne[3];
  1354. const uint64_t nb0 = tensor->nb[0];
  1355. const uint64_t nb1 = tensor->nb[1];
  1356. const uint64_t nb2 = tensor->nb[2];
  1357. const uint64_t nb3 = tensor->nb[3];
  1358. const ggml_type type = tensor->type;
  1359. const uint64_t ts = ggml_type_size(type);
  1360. const uint64_t bs = ggml_blck_size(type);
  1361. const uint64_t dstnb0 = ts;
  1362. const uint64_t dstnb1 = dstnb0*(ne0/bs);
  1363. const uint64_t dstnb2 = dstnb1*ne1;
  1364. const uint64_t dstnb3 = dstnb2*ne2;
  1365. const uint64_t ne = ggml_nelements(tensor);
  1366. if (buf != nullptr) {
  1367. // Memory is pinned, use as staging buffer
  1368. std::vector<vk::BufferCopy> slices;
  1369. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  1370. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  1371. // Find longest contiguous slice
  1372. if (ne1*nb1 == dstnb2) {
  1373. slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
  1374. } else {
  1375. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  1376. if (ne0*nb0/bs == dstnb1) {
  1377. slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
  1378. } else {
  1379. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  1380. const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  1381. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  1382. slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
  1383. }
  1384. }
  1385. }
  1386. }
  1387. }
  1388. }
  1389. ggml_vk_sync_buffers(subctx);
  1390. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  1391. return;
  1392. }
  1393. // Staging buffer required
  1394. vk_buffer staging = ctx->staging;
  1395. size_t staging_offset = ctx->staging_offset;
  1396. const size_t copy_size = ts*ne/bs;
  1397. if (ctx->staging->size < ctx->staging_offset + copy_size) {
  1398. if (sync_staging) {
  1399. // Create temporary larger buffer
  1400. ggml_vk_ensure_sync_staging_buffer(ctx, copy_size);
  1401. staging = ctx->sync_staging;
  1402. staging_offset = 0;
  1403. } else {
  1404. GGML_ASSERT(false);
  1405. }
  1406. }
  1407. VkBufferCopy buf_copy{ staging_offset, offset, copy_size };
  1408. ggml_vk_sync_buffers(subctx);
  1409. vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy);
  1410. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  1411. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  1412. // Find longest contiguous slice
  1413. if (ne1*nb1 == dstnb2) {
  1414. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
  1415. } else {
  1416. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  1417. if (ne0*nb0/bs == dstnb1) {
  1418. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
  1419. } else {
  1420. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  1421. const uint64_t d_off = staging_offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  1422. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  1423. deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
  1424. }
  1425. }
  1426. }
  1427. }
  1428. }
  1429. }
  1430. }
  1431. static void ggml_vk_buffer_write_2d_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, bool sync_staging = false) {
  1432. #ifdef GGML_VULKAN_DEBUG
  1433. std::cerr << "ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")" << std::endl;
  1434. #endif
  1435. // Make sure ctx owns the buffer
  1436. GGML_ASSERT(dst->ctx == ctx);
  1437. // Buffer is already mapped
  1438. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  1439. std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
  1440. GGML_ASSERT(false);
  1441. }
  1442. // Check if src is pinned memory
  1443. vk_buffer buf = nullptr;
  1444. size_t buf_offset;
  1445. ggml_vk_host_get(ctx, src, buf, buf_offset);
  1446. if (buf != nullptr) {
  1447. // Memory is pinned, use as staging buffer
  1448. std::vector<vk::BufferCopy> slices(1);
  1449. if (width == spitch) {
  1450. // Only do single write if stride is equal
  1451. slices[0].srcOffset = buf_offset;
  1452. slices[0].dstOffset = offset;
  1453. slices[0].size = width * height;
  1454. } else {
  1455. slices.resize(height);
  1456. for (size_t i = 0; i < height; i++) {
  1457. slices[i].srcOffset = buf_offset + i * spitch;
  1458. slices[i].dstOffset = offset + i * width;
  1459. slices[i].size = width;
  1460. }
  1461. }
  1462. ggml_vk_sync_buffers(subctx);
  1463. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  1464. return;
  1465. }
  1466. #ifdef GGML_VULKAN_DEBUG
  1467. std::cerr << "STAGING" << std::endl;
  1468. #endif
  1469. // Staging buffer required
  1470. vk_buffer staging = ctx->staging;
  1471. size_t staging_offset = ctx->staging_offset;
  1472. const size_t copy_size = width*height;
  1473. if (ctx->staging == nullptr || ctx->staging->size < ctx->staging_offset + copy_size) {
  1474. if (sync_staging) {
  1475. ggml_vk_ensure_sync_staging_buffer(ctx, copy_size);
  1476. staging = ctx->sync_staging;
  1477. staging_offset = 0;
  1478. } else {
  1479. GGML_ASSERT(false);
  1480. }
  1481. }
  1482. VkBufferCopy buf_copy = {
  1483. staging_offset,
  1484. offset,
  1485. copy_size};
  1486. ggml_vk_sync_buffers(subctx);
  1487. vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy);
  1488. if (width == spitch) {
  1489. deferred_memcpy((uint8_t *)staging->ptr + staging_offset, src, width * height, &subctx->in_memcpys);
  1490. } else {
  1491. for (size_t i = 0; i < height; i++) {
  1492. deferred_memcpy((uint8_t *)staging->ptr + staging_offset + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
  1493. }
  1494. }
  1495. }
  1496. static void ggml_vk_buffer_write_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, bool sync_staging = false) {
  1497. #ifdef GGML_VULKAN_DEBUG
  1498. std::cerr << "ggml_vk_buffer_write_async(" << size << ")" << std::endl;
  1499. #endif
  1500. return ggml_vk_buffer_write_2d_async(ctx, subctx, dst, offset, src, size, size, 1, sync_staging);
  1501. }
  1502. static void ggml_vk_buffer_write_2d(ggml_backend_vk_context * ctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
  1503. #ifdef GGML_VULKAN_DEBUG
  1504. std::cerr << "ggml_vk_buffer_write_2d(" << width << ", " << height << ")" << std::endl;
  1505. #endif
  1506. // Buffer is already mapped
  1507. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  1508. GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  1509. for (size_t i = 0; i < height; i++) {
  1510. memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
  1511. }
  1512. } else {
  1513. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  1514. ggml_vk_ctx_begin(ctx, subctx);
  1515. ggml_vk_buffer_write_2d_async(ctx, subctx, dst, offset, src, spitch, width, height, true);
  1516. ggml_vk_ctx_end(subctx);
  1517. for (auto& cpy : subctx->in_memcpys) {
  1518. memcpy(cpy.dst, cpy.src, cpy.n);
  1519. }
  1520. ggml_vk_submit(subctx, ctx->fence);
  1521. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
  1522. ctx->device.lock()->device.resetFences({ ctx->fence });
  1523. }
  1524. }
  1525. static void ggml_vk_buffer_write(ggml_backend_vk_context * ctx, vk_buffer& dst, size_t offset, const void * src, size_t size) {
  1526. #ifdef GGML_VULKAN_DEBUG
  1527. std::cerr << "ggml_vk_buffer_write(" << size << ")" << std::endl;
  1528. #endif
  1529. ggml_vk_buffer_write_2d(ctx, dst, offset, src, 0, size, 1);
  1530. }
  1531. static void ggml_vk_buffer_read_2d_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) {
  1532. #ifdef GGML_VULKAN_DEBUG
  1533. std::cerr << "ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")" << std::endl;
  1534. #endif
  1535. GGML_ASSERT(width > 0);
  1536. GGML_ASSERT(height > 0);
  1537. GGML_ASSERT(src != nullptr);
  1538. // Make sure ctx owns the buffer
  1539. GGML_ASSERT(src->ctx == ctx);
  1540. // Check if dst is pinned memory
  1541. vk_buffer buf = nullptr;
  1542. size_t buf_offset;
  1543. ggml_vk_host_get(ctx, dst, buf, buf_offset);
  1544. std::vector<vk::BufferCopy> slices(1);
  1545. if (width == spitch && width == dpitch) {
  1546. // Only do single write if stride is equal
  1547. slices[0].srcOffset = offset;
  1548. slices[0].dstOffset = buf_offset;
  1549. slices[0].size = width * height;
  1550. } else {
  1551. slices.resize(height);
  1552. for (size_t i = 0; i < height; i++) {
  1553. slices[i].srcOffset = offset + i * spitch;
  1554. slices[i].dstOffset = buf_offset + i * dpitch;
  1555. slices[i].size = width;
  1556. }
  1557. }
  1558. if (buf != nullptr) {
  1559. // Memory is pinned, use as staging buffer
  1560. ggml_vk_sync_buffers(subctx);
  1561. subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
  1562. return;
  1563. }
  1564. #ifdef GGML_VULKAN_DEBUG
  1565. std::cerr << "STAGING" << std::endl;
  1566. #endif
  1567. // Fall back to staging buffer
  1568. vk_buffer staging = ctx->staging;
  1569. const size_t copy_size = dpitch * height;
  1570. if (ctx->staging == nullptr || ctx->staging->size < ctx->staging_offset + copy_size) {
  1571. if (sync_staging) {
  1572. // Create temporary larger buffer
  1573. ggml_vk_ensure_sync_staging_buffer(ctx, copy_size);
  1574. staging = ctx->sync_staging;
  1575. } else {
  1576. GGML_ASSERT(false);
  1577. }
  1578. }
  1579. ggml_vk_sync_buffers(subctx);
  1580. subctx->s->buffer.copyBuffer(src->buffer, staging->buffer, slices);
  1581. deferred_memcpy(dst, staging->ptr, copy_size, &subctx->out_memcpys);
  1582. }
  1583. static void ggml_vk_buffer_read_async(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) {
  1584. return ggml_vk_buffer_read_2d_async(ctx, subctx, src, offset, dst, size, size, size, 1, sync_staging);
  1585. }
  1586. static void ggml_vk_buffer_read(ggml_backend_vk_context * ctx, vk_buffer& src, size_t offset, void * dst, size_t size) {
  1587. #ifdef GGML_VULKAN_DEBUG
  1588. std::cerr << "ggml_vk_buffer_read(" << offset << ", " << size << ")" << std::endl;
  1589. #endif
  1590. if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  1591. GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  1592. memcpy(dst, (uint8_t *) src->ptr + offset, size);
  1593. } else {
  1594. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  1595. ggml_vk_ctx_begin(ctx, subctx);
  1596. ggml_vk_buffer_read_async(ctx, subctx, src, offset, dst, size, true);
  1597. ggml_vk_ctx_end(subctx);
  1598. ggml_vk_submit(subctx, ctx->fence);
  1599. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
  1600. ctx->device.lock()->device.resetFences({ ctx->fence });
  1601. for (auto& cpy : subctx->out_memcpys) {
  1602. memcpy(cpy.dst, cpy.src, cpy.n);
  1603. }
  1604. }
  1605. }
  1606. static void ggml_vk_buffer_copy_async(vk_context * ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  1607. #ifdef GGML_VULKAN_DEBUG
  1608. std::cerr << "ggml_vk_buffer_copy_async(" << size << ")" << std::endl;
  1609. #endif
  1610. // Make sure both buffers are on same ctx
  1611. GGML_ASSERT(src->ctx == dst->ctx);
  1612. VkBufferCopy bc{ src_offset, dst_offset, size };
  1613. vkCmdCopyBuffer(ctx->s->buffer, src->buffer, dst->buffer, 1, &bc);
  1614. }
  1615. static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  1616. if (src->ctx == dst->ctx) {
  1617. #ifdef GGML_VULKAN_DEBUG
  1618. std::cerr << "ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")" << std::endl;
  1619. #endif
  1620. // Copy within the device
  1621. ggml_backend_vk_context * ctx = src->ctx;
  1622. VkBufferCopy bc{ src_offset, dst_offset, size };
  1623. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  1624. ggml_vk_ctx_begin(ctx, subctx);
  1625. ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
  1626. ggml_vk_ctx_end(subctx);
  1627. ggml_vk_submit(subctx, ctx->fence);
  1628. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
  1629. ctx->device.lock()->device.resetFences({ ctx->fence });
  1630. } else {
  1631. #ifdef GGML_VULKAN_DEBUG
  1632. std::cerr << "ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")" << std::endl;
  1633. #endif
  1634. // Copy device to device
  1635. ggml_backend_vk_context * src_ctx = src->ctx;
  1636. ggml_backend_vk_context * dst_ctx = dst->ctx;
  1637. ggml_vk_ensure_sync_staging_buffer(src_ctx, size);
  1638. ggml_vk_ensure_sync_staging_buffer(dst_ctx, size);
  1639. // Copy to src staging buffer
  1640. ggml_vk_buffer_copy(src_ctx->sync_staging, 0, src, src_offset, size);
  1641. // memcpy to dst staging buffer
  1642. memcpy(dst_ctx->sync_staging->ptr, src_ctx->sync_staging->ptr, size);
  1643. // Copy to dst buffer
  1644. ggml_vk_buffer_copy(dst, dst_offset, dst_ctx->sync_staging, 0, size);
  1645. }
  1646. }
  1647. static void ggml_vk_buffer_memset(ggml_backend_vk_context * ctx, vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  1648. #ifdef GGML_VULKAN_DEBUG
  1649. std::cerr << "ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")" << std::endl;
  1650. #endif
  1651. // Make sure ctx owns the buffer
  1652. GGML_ASSERT(dst->ctx == ctx);
  1653. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  1654. ggml_vk_ctx_begin(ctx, subctx);
  1655. subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  1656. ggml_vk_ctx_end(subctx);
  1657. ggml_vk_submit(subctx, ctx->fence);
  1658. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "vk_memset waitForFences");
  1659. ctx->device.lock()->device.resetFences({ ctx->fence });
  1660. }
  1661. static void ggml_vk_h2d_tensor_2d(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& dst, size_t offset, const ggml_tensor * src, uint64_t i3, uint64_t i2, uint64_t i1) {
  1662. #ifdef GGML_VULKAN_DEBUG
  1663. std::cerr << "ggml_vk_h2d_tensor_2d(dst=" << dst << ", offset=" << offset << ", src=" << src << ", i3=" << i3 << ", i2=" << i2 << ", i1=" << i1 << ")" << std::endl;
  1664. #endif
  1665. const uint64_t ne0 = src->ne[0];
  1666. const uint64_t ne1 = src->ne[1];
  1667. const uint64_t nb0 = src->nb[0];
  1668. const uint64_t nb1 = src->nb[1];
  1669. const uint64_t nb2 = src->nb[2];
  1670. const uint64_t nb3 = src->nb[3];
  1671. const enum ggml_type type = src->type;
  1672. const size_t ts = ggml_type_size(type);
  1673. const size_t bs = ggml_blck_size(type);
  1674. const size_t row_length = ts*ne0/bs;
  1675. const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
  1676. if (nb0 == ts && nb1 == row_length) {
  1677. return ggml_vk_buffer_write_async(ctx, subctx, dst, offset, x, i1*nb1);
  1678. }
  1679. if (nb0 == ts && (i1 == ne1 || !ggml_is_permuted(src))) {
  1680. return ggml_vk_buffer_write_2d_async(ctx, subctx, dst, offset, x, nb1, row_length, i1);
  1681. }
  1682. GGML_ASSERT(i3 == 0);
  1683. GGML_ASSERT(i2 == 0);
  1684. GGML_ASSERT(i1 == (uint64_t) ggml_nrows(src));
  1685. return ggml_vk_buffer_write_nc_async(ctx, subctx, dst, offset, src);
  1686. }
  1687. static void ggml_vk_d2h_tensor_2d(ggml_backend_vk_context * ctx, vk_context * subctx, vk_buffer& src, size_t offset, const ggml_tensor * dst) {
  1688. #ifdef GGML_VULKAN_DEBUG
  1689. std::cerr << "ggml_vk_d2h_tensor_2d()" << std::endl;
  1690. #endif
  1691. const uint64_t ne0 = dst->ne[0];
  1692. const uint64_t ne1 = dst->ne[1];
  1693. const uint64_t ne2 = dst->ne[2];
  1694. const uint64_t ne3 = dst->ne[3];
  1695. const uint64_t nb0 = dst->nb[0];
  1696. const uint64_t nb1 = dst->nb[1];
  1697. // const uint64_t nb2 = dst->nb[2];
  1698. // const uint64_t nb3 = dst->nb[3];
  1699. const enum ggml_type type = dst->type;
  1700. const size_t ts = ggml_type_size(type);
  1701. const size_t bs = ggml_blck_size(type);
  1702. const size_t row_length = ts*ne0/bs;
  1703. if (ggml_is_contiguous(dst)) {
  1704. return ggml_vk_buffer_read_async(ctx, subctx, src, offset, dst->data, ne1*nb1*ne2*ne3);
  1705. }
  1706. if (nb0 == ts) {
  1707. return ggml_vk_buffer_read_2d_async(ctx, subctx, src, offset, dst->data, nb1, nb1, row_length, ne1*ne2*ne3);
  1708. }
  1709. GGML_ASSERT(false);
  1710. }
  1711. static uint32_t ggml_vk_guess_split_k(int m, int n, int k) {
  1712. #ifdef GGML_VULKAN_DEBUG
  1713. std::cerr << "ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ")";
  1714. #endif
  1715. if (k > 128 && (m < 128 || n < 128) && m > 2 && n > 2) {
  1716. #ifdef GGML_VULKAN_DEBUG
  1717. std::cerr << " = 4" << std::endl;
  1718. #endif
  1719. return 4;
  1720. }
  1721. #ifdef GGML_VULKAN_DEBUG
  1722. std::cerr << " = 1" << std::endl;
  1723. #endif
  1724. return 1;
  1725. }
  1726. static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, int m, int n) {
  1727. #ifdef GGML_VULKAN_DEBUG
  1728. std::cerr << "ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")" << std::endl;
  1729. #endif
  1730. if (m <= 32 || n <= 32) {
  1731. return ctx->pipeline_matmul_f32_aligned_s.align;
  1732. }
  1733. if (ctx->device.lock()->subgroup_size == 64 || m <= 64 || n <= 64) {
  1734. return ctx->pipeline_matmul_f32_aligned_m.align;
  1735. }
  1736. return ctx->pipeline_matmul_f32_aligned_l.align;
  1737. }
  1738. static vk_pipeline* ggml_vk_guess_matmul_pipeline_amd(ggml_backend_vk_context * ctx, bool bit16_x, bool bit16_y, int m, int n, bool aligned) {
  1739. if (bit16_x && bit16_y) {
  1740. if (m <= 32 || n <= 32) {
  1741. #ifdef GGML_VULKAN_DEBUG
  1742. std::cerr << " S" << std::endl;
  1743. #endif
  1744. return aligned ? &ctx->pipeline_matmul_f16_aligned_s : &ctx->pipeline_matmul_f16_s;
  1745. }
  1746. #ifdef GGML_VULKAN_DEBUG
  1747. std::cerr << " M" << std::endl;
  1748. #endif
  1749. return aligned ? &ctx->pipeline_matmul_f16_aligned_m : &ctx->pipeline_matmul_f16_m;
  1750. }
  1751. if (bit16_x && !bit16_y) {
  1752. if (m <= 32 || n <= 32) {
  1753. #ifdef GGML_VULKAN_DEBUG
  1754. std::cerr << " S" << std::endl;
  1755. #endif
  1756. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_s : &ctx->pipeline_matmul_f16_f32_s;
  1757. }
  1758. #ifdef GGML_VULKAN_DEBUG
  1759. std::cerr << " M" << std::endl;
  1760. #endif
  1761. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_m : &ctx->pipeline_matmul_f16_f32_m;
  1762. }
  1763. if (!bit16_x && bit16_y) {
  1764. GGML_ASSERT(false);
  1765. }
  1766. if (m <= 32 || n <= 32) {
  1767. #ifdef GGML_VULKAN_DEBUG
  1768. std::cerr << " S" << std::endl;
  1769. #endif
  1770. return aligned ? &ctx->pipeline_matmul_f32_aligned_s : &ctx->pipeline_matmul_f32_s;
  1771. }
  1772. #ifdef GGML_VULKAN_DEBUG
  1773. std::cerr << " M" << std::endl;
  1774. #endif
  1775. return aligned ? &ctx->pipeline_matmul_f32_aligned_m : &ctx->pipeline_matmul_f32_m;
  1776. }
  1777. static vk_pipeline* ggml_vk_guess_matmul_pipeline_apple(ggml_backend_vk_context * ctx, bool bit16_x, bool bit16_y, bool aligned) {
  1778. #ifdef GGML_VULKAN_DEBUG
  1779. std::cerr << " M" << std::endl;
  1780. #endif
  1781. if (bit16_x && bit16_y) {
  1782. return aligned ? &ctx->pipeline_matmul_f16_aligned_m : &ctx->pipeline_matmul_f16_m;
  1783. }
  1784. if (bit16_x && !bit16_y) {
  1785. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_m : &ctx->pipeline_matmul_f16_f32_m;
  1786. }
  1787. if (!bit16_x && bit16_y) {
  1788. GGML_ASSERT(false);
  1789. }
  1790. return aligned ? &ctx->pipeline_matmul_f32_aligned_m : &ctx->pipeline_matmul_f32_m;
  1791. }
  1792. static vk_pipeline* ggml_vk_guess_matmul_pipeline_intel(ggml_backend_vk_context * ctx, bool bit16_x, bool bit16_y, bool aligned) {
  1793. #ifdef GGML_VULKAN_DEBUG
  1794. std::cerr << " S" << std::endl;
  1795. #endif
  1796. if (bit16_x && bit16_y) {
  1797. return aligned ? &ctx->pipeline_matmul_f16_aligned_s : &ctx->pipeline_matmul_f16_s;
  1798. }
  1799. if (bit16_x && !bit16_y) {
  1800. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_s : &ctx->pipeline_matmul_f16_f32_s;
  1801. }
  1802. if (!bit16_x && bit16_y) {
  1803. GGML_ASSERT(false);
  1804. }
  1805. return aligned ? &ctx->pipeline_matmul_f32_aligned_s : &ctx->pipeline_matmul_f32_s;
  1806. }
  1807. static vk_pipeline* ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, bool bit16_x, bool bit16_y, int m, int n, bool aligned) {
  1808. #ifdef GGML_VULKAN_DEBUG
  1809. std::cerr << "ggml_vk_guess_matmul_pipeline(" << bit16_x << ", " << bit16_y << ", " << m << ", " << n << ", " << aligned << ")";
  1810. #endif
  1811. switch (ctx->device.lock()->vendor_id) {
  1812. case VK_VENDOR_ID_AMD:
  1813. return ggml_vk_guess_matmul_pipeline_amd(ctx, bit16_x, bit16_y, m, n, aligned);
  1814. case VK_VENDOR_ID_APPLE:
  1815. return ggml_vk_guess_matmul_pipeline_apple(ctx, bit16_x, bit16_y, aligned);
  1816. case VK_VENDOR_ID_INTEL:
  1817. return ggml_vk_guess_matmul_pipeline_intel(ctx, bit16_x, bit16_y, aligned);
  1818. }
  1819. if (bit16_x && bit16_y) {
  1820. if (m <= 32 || n <= 32) {
  1821. #ifdef GGML_VULKAN_DEBUG
  1822. std::cerr << " S" << std::endl;
  1823. #endif
  1824. return aligned ? &ctx->pipeline_matmul_f16_aligned_s : &ctx->pipeline_matmul_f16_s;
  1825. }
  1826. if (m <= 64 || n <= 64) {
  1827. #ifdef GGML_VULKAN_DEBUG
  1828. std::cerr << " M" << std::endl;
  1829. #endif
  1830. return aligned ? &ctx->pipeline_matmul_f16_aligned_m : &ctx->pipeline_matmul_f16_m;
  1831. }
  1832. #ifdef GGML_VULKAN_DEBUG
  1833. std::cerr << " L" << std::endl;
  1834. #endif
  1835. return aligned ? &ctx->pipeline_matmul_f16_aligned_l : &ctx->pipeline_matmul_f16_l;
  1836. }
  1837. if (bit16_x && !bit16_y) {
  1838. if (m <= 32 || n <= 32) {
  1839. #ifdef GGML_VULKAN_DEBUG
  1840. std::cerr << " S" << std::endl;
  1841. #endif
  1842. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_s : &ctx->pipeline_matmul_f16_f32_s;
  1843. }
  1844. if (m <= 64 || n <= 64) {
  1845. #ifdef GGML_VULKAN_DEBUG
  1846. std::cerr << " M" << std::endl;
  1847. #endif
  1848. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_m : &ctx->pipeline_matmul_f16_f32_m;
  1849. }
  1850. #ifdef GGML_VULKAN_DEBUG
  1851. std::cerr << " L" << std::endl;
  1852. #endif
  1853. return aligned ? &ctx->pipeline_matmul_f16_f32_aligned_l : &ctx->pipeline_matmul_f16_f32_l;
  1854. }
  1855. if (!bit16_x && bit16_y) {
  1856. GGML_ASSERT(false);
  1857. }
  1858. if (m <= 32 || n <= 32) {
  1859. #ifdef GGML_VULKAN_DEBUG
  1860. std::cerr << " S" << std::endl;
  1861. #endif
  1862. return aligned ? &ctx->pipeline_matmul_f32_aligned_s : &ctx->pipeline_matmul_f32_s;
  1863. }
  1864. if (m <= 64 || n <= 64) {
  1865. #ifdef GGML_VULKAN_DEBUG
  1866. std::cerr << " M" << std::endl;
  1867. #endif
  1868. return aligned ? &ctx->pipeline_matmul_f32_aligned_m : &ctx->pipeline_matmul_f32_m;
  1869. }
  1870. #ifdef GGML_VULKAN_DEBUG
  1871. std::cerr << " L" << std::endl;
  1872. #endif
  1873. return aligned ? &ctx->pipeline_matmul_f32_aligned_l : &ctx->pipeline_matmul_f32_l;
  1874. }
  1875. static void ggml_vk_matmul(ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline& pipeline, vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer, uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d, uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3, uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d) {
  1876. #ifdef GGML_VULKAN_DEBUG
  1877. std::cerr << "ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), c: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << split_k_buffer.buffer->buffer << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ")" << std::endl;
  1878. #endif
  1879. ggml_vk_sync_buffers(subctx);
  1880. if (split_k == 1) {
  1881. const std::array<uint32_t, 14> pc = { m, n, k, stride_a, stride_b, stride_d, k, ne02, ne12, broadcast2, broadcast3, batch_stride_a, batch_stride_b, batch_stride_d };
  1882. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, pc.size() * sizeof(uint32_t), pc.data(), { m, n, batch });
  1883. return;
  1884. }
  1885. GGML_ASSERT(batch_stride_d == m * n);
  1886. const std::array<uint32_t, 14> pc1 = { m, n, k, stride_a, stride_b, stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3, batch_stride_a, batch_stride_b, batch_stride_d };
  1887. // Make sure enough workgroups get assigned for split k to work
  1888. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, pc1.size() * sizeof(uint32_t), pc1.data(), { (CEIL_DIV(m, pipeline.wg_denoms[0]) * pipeline.wg_denoms[0]) * split_k, n, batch });
  1889. ggml_vk_sync_buffers(subctx);
  1890. const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
  1891. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2.size() * sizeof(uint32_t), pc2.data(), { m * n * batch, 1, 1 });
  1892. }
  1893. static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
  1894. return
  1895. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1896. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  1897. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1898. }
  1899. static vk_pipeline * ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, ggml_type from, ggml_type to) {
  1900. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
  1901. return &ctx->pipeline_cpy_f32_f32;
  1902. }
  1903. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
  1904. return &ctx->pipeline_cpy_f32_f16;
  1905. }
  1906. if (from == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
  1907. return &ctx->pipeline_cpy_f16_f16;
  1908. }
  1909. std::cerr << "Missing CPY op for types: " << ggml_type_name(from) << " " << ggml_type_name(to) << std::endl;
  1910. GGML_ASSERT(false);
  1911. }
  1912. static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context * subctx, vk_pipeline * pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out, ggml_type buffer_type, bool aligned=true) {
  1913. #ifdef GGML_VULKAN_DEBUG
  1914. std::cerr << "ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", backend=" << tensor->backend << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
  1915. std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")" << std::endl;
  1916. #endif
  1917. const int tensor_type_size = ggml_type_size(tensor->type);
  1918. const int dst_type_size = ggml_type_size(buffer_type);
  1919. const uint32_t ne = tensor->ne[0] * tensor->ne[1] * tensor->ne[2];
  1920. const uint32_t nb2 = aligned ? ggml_vk_align_size(dst_type_size * tensor->ne[0] * tensor->ne[1], ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size : tensor->ne[0] * tensor->ne[1];
  1921. const vk_op_cpy_push_constants pc = {
  1922. (uint32_t)ne,
  1923. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size,
  1924. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], 1 , (uint32_t)tensor->ne[0] , nb2,
  1925. 0,
  1926. };
  1927. ggml_vk_sync_buffers(subctx);
  1928. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { in, out }, sizeof(vk_op_cpy_push_constants), &pc, { ne, 1, 1 });
  1929. }
  1930. static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1931. #ifdef GGML_VULKAN_DEBUG
  1932. std::cerr << "ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", backend=" << src0->backend << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  1933. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", backend=" << src1->backend << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  1934. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", backend=" << dst->backend << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)" << std::endl;
  1935. #endif
  1936. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  1937. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  1938. const uint64_t ne00 = src0->ne[0];
  1939. const uint64_t ne01 = src0->ne[1];
  1940. const uint64_t ne02 = src0->ne[2];
  1941. const uint64_t ne03 = src0->ne[3];
  1942. const uint64_t ne10 = src1->ne[0];
  1943. const uint64_t ne11 = src1->ne[1];
  1944. const uint64_t ne12 = src1->ne[2];
  1945. const uint64_t ne13 = src1->ne[3];
  1946. const uint64_t ne20 = dst->ne[0];
  1947. const uint64_t ne21 = dst->ne[1];
  1948. const uint64_t r2 = ne12 / ne02;
  1949. const uint64_t r3 = ne13 / ne03;
  1950. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  1951. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  1952. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  1953. vk_buffer d_Qx;
  1954. size_t qx_buf_offset = 0;
  1955. vk_buffer d_Qy;
  1956. size_t qy_buf_offset = 0;
  1957. bool src0_uma = false;
  1958. bool src1_uma = false;
  1959. if (ctx->device.lock()->uma) {
  1960. ggml_vk_host_get(ctx, src0->data, d_Qx, qx_buf_offset);
  1961. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  1962. src0_uma = d_Qx != nullptr;
  1963. src1_uma = d_Qy != nullptr;
  1964. }
  1965. const bool load_x = src0->backend != GGML_BACKEND_TYPE_GPU && !src0_uma;
  1966. const bool load_y = src1->backend != GGML_BACKEND_TYPE_GPU && !src1_uma;
  1967. const bool x_non_contig = !load_x && !ggml_vk_dim01_contiguous(src0);
  1968. const bool y_non_contig = !load_y && !ggml_vk_dim01_contiguous(src1);
  1969. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  1970. const bool qx_needs_dequant = src0->type != GGML_TYPE_F16 || x_non_contig;
  1971. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  1972. // Not implemented
  1973. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  1974. const int x_ne = ne01 * ne00;
  1975. const int y_ne = ne11 * ne10;
  1976. const int d_ne = ne11 * ne01;
  1977. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, ne01, ne11));
  1978. const bool aligned = ne10 == kpad;
  1979. const uint32_t split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  1980. vk_pipeline * pipeline = ggml_vk_guess_matmul_pipeline(ctx, true, !f16_f32_kernel, ne01, ne11, aligned);
  1981. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  1982. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  1983. const uint64_t x_sz = sizeof(ggml_fp16_t) * x_ne;
  1984. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  1985. const uint64_t d_sz = sizeof(float) * d_ne;
  1986. vk_buffer d_D = extra->buffer_gpu.lock();
  1987. const uint64_t d_buf_offset = extra->offset;
  1988. GGML_ASSERT(d_D != nullptr);
  1989. GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
  1990. vk_buffer d_X;
  1991. uint64_t x_buf_offset = 0;
  1992. vk_buffer d_Y;
  1993. uint64_t y_buf_offset = 0;
  1994. if (load_x) {
  1995. d_Qx = ctx->prealloc_qx;
  1996. } else if (!src0_uma) {
  1997. d_Qx = extra_src0->buffer_gpu.lock();
  1998. qx_buf_offset = extra_src0->offset;
  1999. GGML_ASSERT(d_Qx != nullptr);
  2000. }
  2001. if (load_y) {
  2002. d_Qy = ctx->prealloc_qy;
  2003. } else if (!src1_uma) {
  2004. d_Qy = extra_src1->buffer_gpu.lock();
  2005. qy_buf_offset = extra_src1->offset;
  2006. GGML_ASSERT(d_Qy != nullptr);
  2007. }
  2008. if (qx_needs_dequant) {
  2009. d_X = ctx->prealloc_x;
  2010. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  2011. } else {
  2012. d_X = d_Qx;
  2013. x_buf_offset = qx_buf_offset;
  2014. GGML_ASSERT(qx_sz == x_sz); // NOLINT
  2015. }
  2016. if (qy_needs_dequant) {
  2017. d_Y = ctx->prealloc_y;
  2018. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  2019. } else {
  2020. d_Y = d_Qy;
  2021. y_buf_offset = qy_buf_offset;
  2022. GGML_ASSERT(qy_sz == y_sz);
  2023. }
  2024. vk_pipeline * to_fp16_vk_0 = nullptr;
  2025. vk_pipeline * to_fp16_vk_1 = nullptr;
  2026. if (x_non_contig) {
  2027. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  2028. } else {
  2029. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  2030. }
  2031. if (y_non_contig) {
  2032. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  2033. } else {
  2034. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2035. }
  2036. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2037. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2038. // Allocate descriptor sets
  2039. ggml_pipeline_allocate_descriptor_sets(ctx, *pipeline, ne12 * ne13);
  2040. if (qx_needs_dequant) {
  2041. ggml_pipeline_allocate_descriptor_sets(ctx, *to_fp16_vk_0, x_non_contig ? 1 : ne12 * ne13);
  2042. }
  2043. if (qy_needs_dequant) {
  2044. ggml_pipeline_allocate_descriptor_sets(ctx, *to_fp16_vk_1, y_non_contig ? 1 : ne12 * ne13);
  2045. }
  2046. if (split_k > 1) {
  2047. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->pipeline_matmul_split_k_reduce, ne12 * ne13);
  2048. }
  2049. if (x_non_contig) {
  2050. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE }, dst->type, false);
  2051. } else if (load_x || qx_needs_dequant) {
  2052. if (load_x) {
  2053. // copy data to device
  2054. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Qx, 0, src0, 0, 0, ggml_nrows(src0));
  2055. ctx->staging_offset = qx_sz * ne02 * ne03;
  2056. }
  2057. if (qx_needs_dequant) {
  2058. const std::vector<int> pc = { (int)ne01, (int)ne10, (int)ne10, (int)ne10 };
  2059. ggml_vk_sync_buffers(subctx);
  2060. ggml_vk_dispatch_pipeline(ctx, subctx, *to_fp16_vk_0, { { d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, { d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  2061. }
  2062. }
  2063. if (y_non_contig) {
  2064. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }, dst->type);
  2065. } else if (load_y) {
  2066. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Qy, 0, src1, 0, 0, ggml_nrows(src1));
  2067. }
  2068. uint32_t stride_batch_x = ne00*ne01;
  2069. uint32_t stride_batch_y = ne10*ne11;
  2070. if (!ggml_vk_dim01_contiguous(src0) && !load_x && !qx_needs_dequant) {
  2071. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2072. }
  2073. if (!ggml_vk_dim01_contiguous(src1) && !load_y && !qy_needs_dequant) {
  2074. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2075. }
  2076. // compute
  2077. ggml_vk_matmul(ctx, subctx, *pipeline, { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 }, { d_D, d_buf_offset, d_sz * ne12 * ne13 }, { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k }, ne01, ne11, ne10, ne10, ne10, ne01, split_k, ne12*ne13, ne02, ne12, r2, r3, stride_batch_x, stride_batch_y, ne20*ne21); // NOLINT
  2078. if (dst->backend == GGML_BACKEND_TYPE_CPU) {
  2079. // copy dst to host
  2080. float * d = (float *) ((char *) dst->data);
  2081. ggml_vk_buffer_read_async(ctx, subctx, d_D, 0, d, sizeof(float) * d_ne * ne12 * ne13);
  2082. }
  2083. }
  2084. static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2085. #ifdef GGML_VULKAN_DEBUG
  2086. std::cerr << "ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", backend=" << src0->backend << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2087. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", backend=" << src1->backend << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2088. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", backend=" << dst->backend << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)" << std::endl;
  2089. #endif
  2090. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2091. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2092. const uint64_t ne00 = src0->ne[0];
  2093. const uint64_t ne01 = src0->ne[1];
  2094. const uint64_t ne02 = src0->ne[2];
  2095. const uint64_t ne03 = src0->ne[3];
  2096. const uint64_t ne10 = src1->ne[0];
  2097. const uint64_t ne11 = src1->ne[1];
  2098. const uint64_t ne12 = src1->ne[2];
  2099. const uint64_t ne13 = src1->ne[3];
  2100. GGML_ASSERT(ne11 == 1);
  2101. const uint64_t nb2 = dst->nb[2];
  2102. const uint64_t nb3 = dst->nb[3];
  2103. const uint64_t r2 = ne12 / ne02;
  2104. const uint64_t r3 = ne13 / ne03;
  2105. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2106. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2107. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2108. vk_buffer d_Qx;
  2109. size_t qx_buf_offset = 0;
  2110. vk_buffer d_Qy;
  2111. size_t qy_buf_offset = 0;
  2112. bool src0_uma = false;
  2113. bool src1_uma = false;
  2114. if (ctx->device.lock()->uma) {
  2115. ggml_vk_host_get(ctx, src0->data, d_Qx, qx_buf_offset);
  2116. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2117. src0_uma = d_Qx != nullptr;
  2118. src1_uma = d_Qy != nullptr;
  2119. }
  2120. const bool load_x = src0->backend != GGML_BACKEND_TYPE_GPU && !src0_uma;
  2121. const bool load_y = src1->backend != GGML_BACKEND_TYPE_GPU && !src1_uma;
  2122. const bool x_non_contig = !load_x && !ggml_vk_dim01_contiguous(src0);
  2123. const bool y_non_contig = !load_y && !ggml_vk_dim01_contiguous(src1);
  2124. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  2125. const bool qx_needs_dequant = x_non_contig;
  2126. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  2127. const uint64_t x_ne = ne01 * ne00;
  2128. const uint64_t y_ne = ne11 * ne10;
  2129. const uint64_t d_ne = ne11 * ne01;
  2130. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment);
  2131. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2132. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  2133. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2134. const uint64_t d_sz = sizeof(float) * d_ne;
  2135. vk_buffer d_D = extra->buffer_gpu.lock();
  2136. const uint64_t d_buf_offset = extra->offset;
  2137. GGML_ASSERT(d_D != nullptr);
  2138. vk_buffer d_X;
  2139. uint64_t x_buf_offset = 0;
  2140. vk_buffer d_Y;
  2141. uint64_t y_buf_offset = 0;
  2142. if (load_x) {
  2143. d_Qx = ctx->prealloc_qx;
  2144. } else if(!src1_uma) {
  2145. d_Qx = extra_src0->buffer_gpu.lock();
  2146. qx_buf_offset = extra_src0->offset;
  2147. GGML_ASSERT(d_Qx != nullptr);
  2148. }
  2149. if (load_y) {
  2150. d_Qy = ctx->prealloc_qy;
  2151. } else if(!src1_uma) {
  2152. d_Qy = extra_src1->buffer_gpu.lock();
  2153. qy_buf_offset = extra_src1->offset;
  2154. GGML_ASSERT(d_Qy != nullptr);
  2155. }
  2156. if (qx_needs_dequant) {
  2157. d_X = ctx->prealloc_x;
  2158. } else {
  2159. d_X = d_Qx;
  2160. x_buf_offset = qx_buf_offset;
  2161. GGML_ASSERT(qx_sz == x_sz);
  2162. }
  2163. if (qy_needs_dequant) {
  2164. d_Y = ctx->prealloc_y;
  2165. } else {
  2166. d_Y = d_Qy;
  2167. y_buf_offset = qy_buf_offset;
  2168. GGML_ASSERT(qy_sz == y_sz);
  2169. }
  2170. vk_pipeline * to_fp16_vk_0 = nullptr;
  2171. vk_pipeline* to_fp16_vk_1 = nullptr;
  2172. if (x_non_contig) {
  2173. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  2174. }
  2175. if (y_non_contig) {
  2176. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  2177. } else {
  2178. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2179. }
  2180. vk_pipeline* dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type);
  2181. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2182. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2183. GGML_ASSERT(dmmv != nullptr);
  2184. // Allocate descriptor sets
  2185. if (qx_needs_dequant) {
  2186. ggml_pipeline_allocate_descriptor_sets(ctx, *to_fp16_vk_0, 1);
  2187. }
  2188. if (qy_needs_dequant) {
  2189. ggml_pipeline_allocate_descriptor_sets(ctx, *to_fp16_vk_1, y_non_contig ? 1 : ne12 * ne13);
  2190. }
  2191. ggml_pipeline_allocate_descriptor_sets(ctx, *dmmv, ne12 * ne13);
  2192. if (x_non_contig) {
  2193. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment));
  2194. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE }, src0->type);
  2195. } else if (load_x) {
  2196. // copy data to device
  2197. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Qx, 0, src0, 0, 0, ggml_nrows(src0));
  2198. }
  2199. if (y_non_contig) {
  2200. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  2201. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }, src1->type);
  2202. } else if (load_y) {
  2203. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Qy, 0, src1, 0, 0, ggml_nrows(src1));
  2204. }
  2205. for (uint64_t i13 = 0; i13 < ne13; i13++) {
  2206. const uint64_t i03 = i13 / r3;
  2207. for (uint64_t i12 = 0; i12 < ne12; i12++) {
  2208. const uint64_t i02 = i12 / r2;
  2209. const uint64_t it_idx0 = (i03 * ne02 + i02);
  2210. const uint64_t it_idx1 = (i13 * ne12 + i12);
  2211. const uint64_t x_offset = x_buf_offset + x_sz * it_idx0;
  2212. const uint64_t qy_offset = qy_buf_offset + qy_sz * it_idx1;
  2213. const uint64_t y_offset = y_buf_offset + y_sz * it_idx1;
  2214. const uint64_t d_offset = d_buf_offset + d_sz * it_idx1;
  2215. const uint64_t y_buffer_offset = (y_offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2216. const uint64_t y_shader_offset = y_offset - y_buffer_offset;
  2217. const uint64_t d_buffer_offset = (d_offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2218. const uint64_t d_shader_offset = d_offset - d_buffer_offset;
  2219. if (!y_non_contig && qy_needs_dequant) {
  2220. const std::vector<int> pc = { (int)ne11, (int)ne10, (int)ne10, (int)ne10 };
  2221. ggml_vk_sync_buffers(subctx);
  2222. ggml_vk_dispatch_pipeline(ctx, subctx, *to_fp16_vk_1, { { d_Qy, qy_offset, qy_sz }, { d_Y, y_offset, y_sz } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)y_ne, 1, 1});
  2223. }
  2224. // compute
  2225. const std::array<int, 3> pc = { (int)ne00, (int)(y_shader_offset / ggml_type_size(src1->type)), (int)(d_shader_offset / ggml_type_size(dst->type))};
  2226. ggml_vk_sync_buffers(subctx);
  2227. ggml_vk_dispatch_pipeline(ctx, subctx, *dmmv, { { d_X, x_offset, x_sz }, { d_Y, y_buffer_offset, y_sz + y_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 3 * sizeof(int), &pc, { (uint32_t)ne01, 1, 1});
  2228. if (dst->backend == GGML_BACKEND_TYPE_CPU) {
  2229. // copy dst to host
  2230. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  2231. ggml_vk_sync_buffers(subctx);
  2232. ggml_vk_buffer_read_async(ctx, subctx, d_D, d_offset, d, sizeof(float) * d_ne);
  2233. }
  2234. }
  2235. }
  2236. }
  2237. static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2238. #ifdef GGML_VULKAN_DEBUG
  2239. std::cerr << "ggml_vk_mul_mat_p021_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", backend=" << src0->backend << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2240. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", backend=" << src1->backend << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2241. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", backend=" << dst->backend << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)" << std::endl;
  2242. #endif
  2243. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  2244. GGML_ASSERT(src0->backend == GGML_BACKEND_TYPE_GPU);
  2245. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
  2246. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
  2247. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2248. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2249. const uint64_t ne00 = src0->ne[0];
  2250. const uint64_t ne01 = src0->ne[1];
  2251. const uint64_t ne02 = src0->ne[2];
  2252. // const uint64_t ne03 = src0->ne[3];
  2253. const uint64_t ne10 = src1->ne[0];
  2254. const uint64_t ne11 = src1->ne[1];
  2255. const uint64_t ne12 = src1->ne[2];
  2256. // const uint64_t ne13 = src1->ne[3];
  2257. GGML_ASSERT(ne11 == 1);
  2258. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2259. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2260. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2261. vk_buffer d_Qy;
  2262. size_t qy_buf_offset = 0;
  2263. bool src1_uma = false;
  2264. if (ctx->device.lock()->uma) {
  2265. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2266. src1_uma = d_Qy != nullptr;
  2267. }
  2268. const bool load_y = src1->backend != GGML_BACKEND_TYPE_GPU && !src1_uma;
  2269. const uint64_t x_ne = ne00 * ne01 * ne02;
  2270. const uint64_t y_ne = ne10 * ne11 * ne12;
  2271. const uint64_t d_ne = ne01 * ne11 * ne12;
  2272. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment);
  2273. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2274. const uint64_t d_sz = sizeof(float) * d_ne;
  2275. vk_buffer d_D = extra->buffer_gpu.lock();
  2276. const uint64_t d_buf_offset = extra->offset;
  2277. GGML_ASSERT(d_D != nullptr);
  2278. vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
  2279. const uint64_t qx_buf_offset = extra_src0->offset;
  2280. GGML_ASSERT(d_Qx != nullptr);
  2281. if (load_y) {
  2282. d_Qy = ctx->prealloc_qy;
  2283. } else if (!src1_uma) {
  2284. d_Qy = extra_src1->buffer_gpu.lock();
  2285. qy_buf_offset = extra_src1->offset;
  2286. GGML_ASSERT(d_Qx != nullptr);
  2287. }
  2288. // Allocate descriptor sets
  2289. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->pipeline_mul_mat_vec_p021_f16_f32, 1);
  2290. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2291. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2292. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2293. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2294. if (load_y) {
  2295. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Qy, qy_buf_offset, src1, 0, 0, ggml_nrows(src1));
  2296. }
  2297. // compute
  2298. const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2299. ggml_vk_sync_buffers(subctx);
  2300. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->pipeline_mul_mat_vec_p021_f16_f32, { { d_Qx, qx_buf_offset, qx_sz }, { d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2301. if (dst->backend == GGML_BACKEND_TYPE_CPU) {
  2302. // copy dst to host
  2303. float * d = (float *) dst->data;
  2304. ggml_vk_sync_buffers(subctx);
  2305. ggml_vk_buffer_read_async(ctx, subctx, d_D, d_buf_offset, d, sizeof(float) * d_ne);
  2306. }
  2307. }
  2308. static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2309. #ifdef GGML_VULKAN_DEBUG
  2310. std::cerr << "ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", backend=" << src0->backend << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2311. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", backend=" << src1->backend << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2312. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", backend=" << dst->backend << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)" << std::endl;
  2313. #endif
  2314. GGML_ASSERT(!ggml_is_transposed(src0));
  2315. GGML_ASSERT(!ggml_is_transposed(src1));
  2316. GGML_ASSERT(!ggml_is_permuted(src0));
  2317. GGML_ASSERT(src0->backend == GGML_BACKEND_TYPE_GPU);
  2318. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2319. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2320. const uint64_t ne00 = src0->ne[0];
  2321. const uint64_t ne01 = src0->ne[1];
  2322. const uint64_t ne02 = src0->ne[2];
  2323. // const uint64_t ne03 = src0->ne[3];
  2324. const uint64_t nb01 = src0->nb[1];
  2325. const uint64_t nb02 = src0->nb[2];
  2326. // const uint64_t ne10 = src1->ne[0];
  2327. const uint64_t ne11 = src1->ne[1];
  2328. const uint64_t ne12 = src1->ne[2];
  2329. // const uint64_t ne13 = src1->ne[3];
  2330. GGML_ASSERT(ne11 == 1);
  2331. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2332. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2333. ggml_tensor_extra_gpu * extra_src1 = (ggml_tensor_extra_gpu *) src1->extra;
  2334. vk_buffer d_Qy = nullptr;
  2335. size_t qy_buf_offset = 0;
  2336. bool src1_uma = false;
  2337. if (ctx->device.lock()->uma) {
  2338. ggml_vk_host_get(ctx, src1->data, d_Qy, qy_buf_offset);
  2339. src1_uma = d_Qy != nullptr;
  2340. }
  2341. const bool load_y = src1->backend != GGML_BACKEND_TYPE_GPU && !src1_uma;
  2342. const uint64_t d_ne = ne01 * ne11 * ne12;
  2343. const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
  2344. const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
  2345. const uint64_t qx_sz = ggml_nbytes(src0);
  2346. const uint64_t qy_sz = ggml_nbytes(src1);
  2347. const uint64_t d_sz = sizeof(float) * d_ne;
  2348. vk_buffer d_D = extra->buffer_gpu.lock();
  2349. const uint64_t d_buf_offset = extra->offset;
  2350. GGML_ASSERT(d_D != nullptr);
  2351. vk_buffer d_Qx = extra_src0->buffer_gpu.lock();
  2352. const uint64_t qx_buf_offset = extra_src0->offset;
  2353. GGML_ASSERT(d_Qx != nullptr);
  2354. if (load_y) {
  2355. d_Qy = ctx->prealloc_qy;
  2356. } else {
  2357. d_Qy = extra_src1->buffer_gpu.lock();
  2358. qy_buf_offset = extra_src1->offset;
  2359. GGML_ASSERT(d_Qx != nullptr);
  2360. }
  2361. // Allocate descriptor sets
  2362. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->pipeline_mul_mat_vec_nc_f16_f32, 1);
  2363. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2364. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2365. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2366. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2367. if (load_y) {
  2368. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Qy, qy_buf_offset, src1, 0, 0, ggml_nrows(src1));
  2369. }
  2370. // compute
  2371. const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2372. ggml_vk_sync_buffers(subctx);
  2373. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->pipeline_mul_mat_vec_nc_f16_f32, { { d_Qx, qx_buf_offset, qx_sz }, { d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, { d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2374. if (dst->backend == GGML_BACKEND_TYPE_CPU) {
  2375. // copy dst to host
  2376. float * d = (float *) dst->data;
  2377. ggml_vk_sync_buffers(subctx);
  2378. ggml_vk_buffer_read_async(ctx, subctx, d_D, d_buf_offset, d, sizeof(float) * d_ne);
  2379. }
  2380. }
  2381. static bool ggml_vk_can_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * dst) {
  2382. const uint64_t ne10 = src1->ne[0];
  2383. const uint64_t ne0 = dst->ne[0];
  2384. const uint64_t ne1 = dst->ne[1];
  2385. // TODO: find the optimal values for these
  2386. return (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
  2387. (src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16 || ggml_is_quantized(src1->type)) &&
  2388. dst->type == GGML_TYPE_F32 &&
  2389. ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32) || src0->backend == GGML_BACKEND_TYPE_GPU);
  2390. }
  2391. static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context * subctx, const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
  2392. #ifdef GGML_VULKAN_DEBUG
  2393. std::cerr << "ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")" << std::endl;
  2394. #endif
  2395. if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
  2396. ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, src0, src1, dst);
  2397. } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
  2398. ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst);
  2399. } else if (src1->ne[1] == 1 && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  2400. ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst);
  2401. } else {
  2402. ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst);
  2403. }
  2404. }
  2405. static void ggml_vk_op_repeat(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2406. // guaranteed to be an integer due to the check in ggml_can_repeat
  2407. const uint64_t ne0 = dst->ne[0];
  2408. const uint64_t ne1 = dst->ne[1];
  2409. const uint64_t ne2 = dst->ne[2];
  2410. const uint64_t ne3 = dst->ne[3];
  2411. const uint64_t ne00 = src0->ne[0];
  2412. const uint64_t ne01 = src0->ne[1];
  2413. const uint64_t ne02 = src0->ne[2];
  2414. const uint64_t ne03 = src0->ne[3];
  2415. const uint64_t nb0 = dst->nb[0];
  2416. const uint64_t nb1 = dst->nb[1];
  2417. const uint64_t nb2 = dst->nb[2];
  2418. const uint64_t nb3 = dst->nb[3];
  2419. const uint64_t nb00 = src0->nb[0];
  2420. const uint64_t nb01 = src0->nb[1];
  2421. const uint64_t nb02 = src0->nb[2];
  2422. const uint64_t nb03 = src0->nb[3];
  2423. const uint64_t nr0 = ne0/ne00;
  2424. const uint64_t nr1 = ne1/ne01;
  2425. const uint64_t nr2 = ne2/ne02;
  2426. const uint64_t nr3 = ne3/ne03;
  2427. // TODO: support for transposed / permuted tensors
  2428. GGML_ASSERT(nb0 == sizeof(float));
  2429. GGML_ASSERT(nb00 == sizeof(float));
  2430. GGML_ASSERT(src0->backend == GGML_BACKEND_TYPE_GPU);
  2431. GGML_ASSERT(dst->backend == GGML_BACKEND_TYPE_GPU);
  2432. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2433. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2434. const vk_buffer src_buf = extra_src0->buffer_gpu.lock();
  2435. const uint64_t src_offset = extra_src0->offset;
  2436. vk_buffer dst_buf = extra->buffer_gpu.lock();
  2437. const uint64_t dst_offset = extra->offset;
  2438. std::vector<vk::BufferCopy> copies;
  2439. for (uint64_t i3 = 0; i3 < nr3; i3++) {
  2440. for (uint64_t k3 = 0; k3 < ne03; k3++) {
  2441. for (uint64_t i2 = 0; i2 < nr2; i2++) {
  2442. for (uint64_t k2 = 0; k2 < ne02; k2++) {
  2443. for (uint64_t i1 = 0; i1 < nr1; i1++) {
  2444. for (uint64_t k1 = 0; k1 < ne01; k1++) {
  2445. for (uint64_t i0 = 0; i0 < nr0; i0++) {
  2446. copies.push_back({
  2447. src_offset + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0,
  2448. dst_offset + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01,
  2449. ne00*nb0,
  2450. });
  2451. }
  2452. }
  2453. }
  2454. }
  2455. }
  2456. }
  2457. }
  2458. ggml_vk_sync_buffers(subctx);
  2459. subctx->s->buffer.copyBuffer(src_buf->buffer, dst_buf->buffer, copies);
  2460. GGML_UNUSED(ctx);
  2461. GGML_UNUSED(src1);
  2462. }
  2463. static vk_pipeline* ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_op op) {
  2464. switch (op) {
  2465. case GGML_OP_ADD:
  2466. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2467. return &ctx->pipeline_add_f32;
  2468. }
  2469. return nullptr;
  2470. case GGML_OP_GET_ROWS:
  2471. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  2472. if (dst->type == GGML_TYPE_F16) {
  2473. return &ctx->pipeline_get_rows[src0->type];
  2474. }
  2475. if (dst->type == GGML_TYPE_F32) {
  2476. return &ctx->pipeline_get_rows_f32[src0->type];
  2477. }
  2478. return nullptr;
  2479. case GGML_OP_MUL:
  2480. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2481. return &ctx->pipeline_mul_f32;
  2482. }
  2483. return nullptr;
  2484. case GGML_OP_SCALE:
  2485. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2486. return &ctx->pipeline_scale_f32;
  2487. }
  2488. return nullptr;
  2489. case GGML_OP_SQR:
  2490. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2491. return &ctx->pipeline_sqr_f32;
  2492. }
  2493. return nullptr;
  2494. case GGML_OP_CLAMP:
  2495. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2496. return &ctx->pipeline_clamp_f32;
  2497. }
  2498. return nullptr;
  2499. case GGML_OP_CPY:
  2500. case GGML_OP_CONT:
  2501. case GGML_OP_DUP:
  2502. return ggml_vk_get_cpy_pipeline(ctx, src0->type, dst->type);
  2503. case GGML_OP_NORM:
  2504. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2505. return &ctx->pipeline_norm_f32;
  2506. }
  2507. return nullptr;
  2508. case GGML_OP_RMS_NORM:
  2509. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2510. return &ctx->pipeline_rms_norm_f32;
  2511. }
  2512. return nullptr;
  2513. case GGML_OP_UNARY:
  2514. switch (ggml_get_unary_op(dst)) {
  2515. case GGML_UNARY_OP_SILU:
  2516. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2517. return &ctx->pipeline_silu_f32;
  2518. }
  2519. break;
  2520. case GGML_UNARY_OP_GELU:
  2521. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2522. return &ctx->pipeline_gelu_f32;
  2523. }
  2524. break;
  2525. case GGML_UNARY_OP_RELU:
  2526. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2527. return &ctx->pipeline_relu_f32;
  2528. }
  2529. break;
  2530. default:
  2531. break;
  2532. }
  2533. return nullptr;
  2534. case GGML_OP_DIAG_MASK_INF:
  2535. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2536. return &ctx->pipeline_diag_mask_inf_f32;
  2537. }
  2538. return nullptr;
  2539. case GGML_OP_SOFT_MAX:
  2540. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2541. return &ctx->pipeline_soft_max_f32;
  2542. }
  2543. return nullptr;
  2544. case GGML_OP_ROPE:
  2545. {
  2546. const int mode = ((const int32_t *) dst->op_params)[2];
  2547. const bool is_neox = mode & 2;
  2548. const bool is_glm = mode & 4;
  2549. if (is_glm) {
  2550. return nullptr;
  2551. }
  2552. if (is_neox) {
  2553. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2554. return &ctx->pipeline_rope_neox_f32;
  2555. }
  2556. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  2557. return &ctx->pipeline_rope_neox_f16;
  2558. }
  2559. } else {
  2560. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  2561. return &ctx->pipeline_rope_f32;
  2562. }
  2563. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  2564. return &ctx->pipeline_rope_f16;
  2565. }
  2566. }
  2567. return nullptr;
  2568. }
  2569. default:
  2570. return nullptr;
  2571. }
  2572. }
  2573. static ggml_vk_func_t ggml_vk_op_get_func(ggml_op op) {
  2574. switch(op) {
  2575. case GGML_OP_REPEAT:
  2576. return ggml_vk_op_repeat;
  2577. default:
  2578. return nullptr;
  2579. }
  2580. }
  2581. template<typename PC>
  2582. static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_op op, const PC&& pc) {
  2583. #ifdef GGML_VULKAN_DEBUG
  2584. std::cerr << "ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", backend=" << src0->backend << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2585. if (src1 != nullptr) {
  2586. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", backend=" << src1->backend << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2587. }
  2588. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", backend=" << dst->backend << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "), " << ggml_op_name(op) << ")" << std::endl;
  2589. #endif
  2590. GGML_ASSERT(!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type))); // NOLINT
  2591. GGML_ASSERT(op == GGML_OP_CPY || ggml_vk_dim01_contiguous(src0)); // NOLINT
  2592. GGML_ASSERT(src1 == nullptr || ggml_vk_dim01_contiguous(src1)); // NOLINT
  2593. GGML_ASSERT(dst->extra != nullptr);
  2594. const uint64_t ne00 = src0->ne[0];
  2595. const uint64_t ne01 = src0->ne[1];
  2596. const uint64_t ne02 = src0->ne[2];
  2597. const uint64_t ne03 = src0->ne[3];
  2598. const uint64_t ne0 = ne00 * ne01;
  2599. const bool use_src1 = src1 != nullptr;
  2600. const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
  2601. const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
  2602. const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
  2603. const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
  2604. const uint64_t ne1 = ne10 * ne11;
  2605. // const uint64_t nb10 = use_src1 ? src1->nb[0] : 0;
  2606. const uint64_t nb2 = dst->nb[2];
  2607. const uint64_t nb3 = dst->nb[3];
  2608. vk_pipeline * pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, dst, op);
  2609. ggml_vk_func_t op_func;
  2610. if (pipeline == nullptr) {
  2611. op_func = ggml_vk_op_get_func(op);
  2612. if (op_func == nullptr) {
  2613. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
  2614. if (src1 != nullptr) {
  2615. std::cerr << " and " << ggml_type_name(src1->type);
  2616. }
  2617. std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
  2618. GGML_ASSERT(false);
  2619. }
  2620. op_func(ctx, subctx, src0, src1, dst);
  2621. return;
  2622. }
  2623. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2624. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2625. ggml_tensor_extra_gpu * extra_src1 = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
  2626. vk_buffer d_X = nullptr;
  2627. size_t x_buf_offset = 0;
  2628. vk_buffer d_Y = nullptr;
  2629. size_t y_buf_offset = 0;
  2630. bool src0_uma = false;
  2631. bool src1_uma = false;
  2632. if (ctx->device.lock()->uma) {
  2633. ggml_vk_host_get(ctx, src0->data, d_X, x_buf_offset);
  2634. src0_uma = d_X != nullptr;
  2635. if (use_src1) {
  2636. ggml_vk_host_get(ctx, src1->data, d_Y, y_buf_offset);
  2637. src1_uma = d_Y != nullptr;
  2638. }
  2639. }
  2640. const bool transfer_src0 = src0->backend != GGML_BACKEND_TYPE_GPU && !src0_uma;
  2641. const bool transfer_src1 = use_src1 && src1->backend != GGML_BACKEND_TYPE_GPU && !src1_uma;
  2642. uint64_t x_sz = ggml_vk_align_size(ggml_type_size(src0->type) * ne0, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment);
  2643. uint64_t y_sz = use_src1 ? ggml_vk_align_size(ggml_type_size(src1->type) * ne1, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) : 0;
  2644. uint64_t d_sz = ggml_type_size(dst->type) * ne0;
  2645. vk_buffer d_D = extra->buffer_gpu.lock();
  2646. // Workaround for tiny tensor inputs on ROPE
  2647. if (use_src1 && src1->backend == GGML_BACKEND_TYPE_GPU && y_sz > d_D->size) {
  2648. y_sz = VK_WHOLE_SIZE;
  2649. }
  2650. GGML_ASSERT(d_D != nullptr);
  2651. uint64_t d_buf_offset = (extra->offset / ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  2652. GGML_ASSERT(d_buf_offset == extra->offset || op == GGML_OP_CPY); // NOLINT
  2653. if (transfer_src0) {
  2654. d_X = ctx->prealloc_qx;
  2655. } else if(!src0_uma) {
  2656. d_X = extra_src0->buffer_gpu.lock();
  2657. x_buf_offset = extra_src0->offset;
  2658. GGML_ASSERT(d_X != nullptr);
  2659. }
  2660. if (transfer_src1) {
  2661. d_Y = ctx->prealloc_qy;
  2662. } else if (use_src1 && !src1_uma) {
  2663. d_Y = extra_src1->buffer_gpu.lock();
  2664. y_buf_offset = extra_src1->offset;
  2665. GGML_ASSERT(d_Y != nullptr);
  2666. }
  2667. if (op == GGML_OP_CPY) {
  2668. GGML_ASSERT(!transfer_src0);
  2669. GGML_ASSERT(!transfer_src1);
  2670. x_sz = ggml_nbytes(src0);
  2671. d_sz = ggml_nbytes(dst);
  2672. if (extra_src0->offset + x_sz >= d_X->size) {
  2673. x_sz = VK_WHOLE_SIZE;
  2674. }
  2675. if (extra->offset + d_sz >= d_D->size) {
  2676. d_sz = VK_WHOLE_SIZE;
  2677. }
  2678. }
  2679. std::array<uint32_t, 3> elements;
  2680. // copy src0 to device
  2681. if (transfer_src0) {
  2682. ggml_vk_h2d_tensor_2d(ctx, subctx, d_X, 0, src0, 0, 0, ggml_nrows(src0));
  2683. ctx->staging_offset = x_sz * ne02 * ne03;
  2684. }
  2685. if (transfer_src1) {
  2686. ggml_vk_h2d_tensor_2d(ctx, subctx, d_Y, 0, src1, 0, 0, ggml_nrows(src1));
  2687. }
  2688. // Single call if dimension 2 is contiguous
  2689. if (op == GGML_OP_CPY || (ggml_is_contiguous(src0) && (src1 == nullptr || ggml_is_contiguous(src1)))) {
  2690. ggml_pipeline_allocate_descriptor_sets(ctx, *pipeline, 1);
  2691. switch (dst->op) {
  2692. case GGML_OP_NORM:
  2693. case GGML_OP_RMS_NORM:
  2694. case GGML_OP_SOFT_MAX:
  2695. elements = { (uint32_t)ggml_nrows(src0), 1, 1 };
  2696. break;
  2697. case GGML_OP_DIAG_MASK_INF:
  2698. case GGML_OP_ROPE:
  2699. elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
  2700. break;
  2701. default:
  2702. elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
  2703. break;
  2704. }
  2705. if (op != GGML_OP_CPY) {
  2706. if (x_sz != VK_WHOLE_SIZE) {
  2707. x_sz *= ne02 * ne03;
  2708. }
  2709. if (y_sz != VK_WHOLE_SIZE) {
  2710. y_sz *= ne12 * ne13;
  2711. }
  2712. if (d_sz != VK_WHOLE_SIZE) {
  2713. d_sz *= ne02 * ne03;
  2714. }
  2715. }
  2716. if (!use_src1 && op == GGML_OP_SOFT_MAX) {
  2717. // Empty src1 is possible on soft_max, but the shader needs a buffer
  2718. ggml_vk_sync_buffers(subctx);
  2719. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { { d_X, x_buf_offset, x_sz }, { ctx->prealloc_y, 0, ctx->prealloc_y->size }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  2720. } else if (use_src1) {
  2721. ggml_vk_sync_buffers(subctx);
  2722. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { { d_X, x_buf_offset, x_sz }, { d_Y, y_buf_offset, y_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  2723. } else {
  2724. ggml_vk_sync_buffers(subctx);
  2725. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { { d_X, x_buf_offset, x_sz }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  2726. }
  2727. if (dst->backend == GGML_BACKEND_TYPE_CPU && op == GGML_OP_CPY) {
  2728. ggml_vk_d2h_tensor_2d(ctx, subctx, d_D, 0, dst);
  2729. } else if(dst->backend == GGML_BACKEND_TYPE_CPU) {
  2730. // copy dst to host
  2731. float * d = (float *) dst->data;
  2732. ggml_vk_buffer_read_async(ctx, subctx, d_D, 0, d, d_sz);
  2733. }
  2734. } else {
  2735. ggml_pipeline_allocate_descriptor_sets(ctx, *pipeline, ne02 * ne03);
  2736. switch (dst->op) {
  2737. case GGML_OP_NORM:
  2738. case GGML_OP_RMS_NORM:
  2739. case GGML_OP_SOFT_MAX:
  2740. elements = { (uint32_t)ne01, 1, 1 };
  2741. break;
  2742. case GGML_OP_DIAG_MASK_INF:
  2743. case GGML_OP_ROPE:
  2744. elements = { (uint32_t)ne01, (uint32_t)ne00, 1 };
  2745. break;
  2746. default:
  2747. elements = { (uint32_t)ne0, 1, 1 };
  2748. break;
  2749. }
  2750. for (uint64_t i03 = 0; i03 < ne03; i03++) {
  2751. for (uint64_t i02 = 0; i02 < ne02; i02++) {
  2752. const uint32_t it_idx0 = (i03 * ne02 + i02);
  2753. const uint32_t it_idx1 = use_src1 ? ((i03 % ne13) * ne12 + (i02 % ne12)) : 0;
  2754. const uint32_t x_offset = x_sz * it_idx0;
  2755. const uint32_t y_offset = y_sz * it_idx1;
  2756. const uint32_t d_offset = d_sz * it_idx0;
  2757. if (!use_src1 && op == GGML_OP_SOFT_MAX) {
  2758. // Empty src1 is possible on soft_max, but the shader needs a buffer
  2759. ggml_vk_sync_buffers(subctx);
  2760. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { { d_X, x_buf_offset, x_sz }, { ctx->prealloc_y, 0, ctx->prealloc_y->size }, { d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  2761. } else if (use_src1) {
  2762. ggml_vk_sync_buffers(subctx);
  2763. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { { d_X, x_buf_offset + x_offset, x_sz }, { d_Y, y_buf_offset + y_offset, y_sz }, { d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
  2764. } else {
  2765. ggml_vk_sync_buffers(subctx);
  2766. ggml_vk_dispatch_pipeline(ctx, subctx, *pipeline, { { d_X, x_buf_offset + x_offset, x_sz }, { d_D, d_buf_offset + d_offset, d_sz } }, sizeof(PC), &pc, elements);
  2767. }
  2768. if (dst->backend == GGML_BACKEND_TYPE_CPU) {
  2769. // copy dst to host
  2770. ggml_vk_buffer_read_async(ctx, subctx, d_D, d_buf_offset + d_offset, (char *) dst->data + i02*nb2 + i03*nb3, d_sz);
  2771. }
  2772. }
  2773. }
  2774. }
  2775. }
  2776. static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2777. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_REPEAT, { (uint32_t)ggml_nelements(src0), (uint32_t)ggml_nelements(src1), 0.0f, 0.0f });
  2778. }
  2779. static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2780. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_GET_ROWS, { (uint32_t)ggml_nelements(src0), (uint32_t)ggml_nelements(src1), 0.0f, 0.0f });
  2781. }
  2782. static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2783. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_ADD, { (uint32_t)ggml_nelements(src0), (uint32_t)ggml_nelements(src1), 0.0f, 0.0f });
  2784. }
  2785. static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2786. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_MUL, { (uint32_t)ggml_nelements(src0), (uint32_t)ggml_nelements(src1), 0.0f, 0.0f });
  2787. }
  2788. static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2789. float * op_params = (float *)dst->op_params;
  2790. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_SCALE, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f });
  2791. }
  2792. static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2793. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_SQR, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  2794. }
  2795. static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2796. float * op_params = (float *)dst->op_params;
  2797. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_CLAMP, { (uint32_t)ggml_nelements(src0), 0, op_params[0], op_params[1] });
  2798. }
  2799. static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2800. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) dst->extra;
  2801. const int src0_type_size = ggml_type_size(src0->type);
  2802. const int dst_type_size = ggml_type_size(dst->type);
  2803. const uint32_t d_offset = (extra->offset % ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
  2804. ggml_vk_op_f32<vk_op_cpy_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_CPY, {
  2805. (uint32_t)ggml_nelements(src0),
  2806. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size,
  2807. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size,
  2808. d_offset,
  2809. });
  2810. }
  2811. static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2812. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], 0.0f, 0.0f });
  2813. }
  2814. static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2815. float * op_params = (float *)dst->op_params;
  2816. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f });
  2817. }
  2818. static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2819. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f });
  2820. }
  2821. static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2822. int32_t * op_params = (int32_t *)dst->op_params;
  2823. ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] });
  2824. }
  2825. static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2826. float * op_params = (float *)dst->op_params;
  2827. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_SOFT_MAX, { (uint32_t)src0->ne[0], (uint32_t)(src1 != nullptr ? ggml_nrows(src1) : 0), op_params[0], 0.0f });
  2828. }
  2829. static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  2830. const int n_dims = ((int32_t *) dst->op_params)[1];
  2831. const int mode = ((int32_t *) dst->op_params)[2];
  2832. // const int n_ctx = ((int32_t *) dst->op_params)[3];
  2833. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  2834. const float freq_base = ((float *) dst->op_params)[5];
  2835. const float freq_scale = ((float *) dst->op_params)[6];
  2836. const float ext_factor = ((float *) dst->op_params)[7];
  2837. const float attn_factor = ((float *) dst->op_params)[8];
  2838. const float beta_fast = ((float *) dst->op_params)[9];
  2839. const float beta_slow = ((float *) dst->op_params)[10];
  2840. const bool is_neox = mode & 2;
  2841. const bool is_glm = mode & 4;
  2842. GGML_ASSERT(!is_glm);
  2843. float corr_dims[2];
  2844. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  2845. if (is_neox) {
  2846. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  2847. const float inv_ndims = -1.0f / n_dims;
  2848. ggml_vk_op_f32<vk_op_rope_neox_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_ROPE, { (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1], freq_base, ext_factor, attn_factor, corr_dims[0], corr_dims[1], 0.0f, 0.0f, theta_scale, inv_ndims });
  2849. } else {
  2850. ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, dst, GGML_OP_ROPE, { (uint32_t)src0->ne[0], freq_scale, (uint32_t)src0->ne[1], freq_base, ext_factor, attn_factor, corr_dims[0], corr_dims[1], 0.0f, 0.0f });
  2851. }
  2852. }
  2853. static void ggml_vk_nop(ggml_backend_vk_context * ctx, vk_context * subctx, const ggml_tensor * src0, ggml_tensor * dst) {
  2854. // If backend is CPU, data from src0 has to be copied off the device
  2855. if (dst->backend == GGML_BACKEND_TYPE_CPU) {
  2856. ggml_tensor_extra_gpu * extra_src0 = (ggml_tensor_extra_gpu *) src0->extra;
  2857. vk_buffer d_D = extra_src0->buffer_gpu.lock();
  2858. ggml_vk_sync_buffers(subctx);
  2859. ggml_vk_buffer_read_async(ctx, subctx, d_D, 0, dst->data, d_D->size);
  2860. }
  2861. }
  2862. #ifdef GGML_VULKAN_RUN_TESTS
  2863. static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
  2864. if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
  2865. return;
  2866. }
  2867. i0 = std::max(i0, 5);
  2868. i1 = std::max(i1, 5);
  2869. i2 = std::max(i2, 0);
  2870. fprintf(stderr, " ");
  2871. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  2872. fprintf(stderr, "%7d ", idx1);
  2873. }
  2874. fprintf(stderr, "\n");
  2875. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  2876. fprintf(stderr, "%7d: ", idx0);
  2877. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  2878. if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
  2879. float val;
  2880. if (type == GGML_TYPE_F32) {
  2881. val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
  2882. } else if (type == GGML_TYPE_F16) {
  2883. val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
  2884. }
  2885. fprintf(stderr, "% 7.2f ", val);
  2886. } else {
  2887. fprintf(stderr, " ");
  2888. }
  2889. }
  2890. fprintf(stderr, "\n");
  2891. }
  2892. }
  2893. template <typename X_TYPE, typename Y_TYPE>
  2894. static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
  2895. #ifdef GGML_VULKAN_DEBUG
  2896. std::cerr << "ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")" << std::endl;
  2897. #endif
  2898. const size_t x_ne = m * k * batch;
  2899. const size_t y_ne = k * n * batch;
  2900. const size_t d_ne = m * n * batch;
  2901. vk_pipeline * p;
  2902. std::string shname;
  2903. if (shader_size == 0) {
  2904. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2905. p = &ctx->pipeline_matmul_f32_aligned_s;
  2906. shname = "F32_ALIGNED_S";
  2907. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2908. p = &ctx->pipeline_matmul_f16_f32_aligned_s;
  2909. shname = "F16_F32_ALIGNED_S";
  2910. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  2911. p = &ctx->pipeline_matmul_f16_aligned_s;
  2912. shname = "F16_ALIGNED_S";
  2913. } else {
  2914. GGML_ASSERT(false);
  2915. }
  2916. } else if (shader_size == 1) {
  2917. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2918. p = &ctx->pipeline_matmul_f32_aligned_m;
  2919. shname = "F32_ALIGNED_M";
  2920. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2921. p = &ctx->pipeline_matmul_f16_f32_aligned_m;
  2922. shname = "F16_F32_ALIGNED_M";
  2923. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  2924. p = &ctx->pipeline_matmul_f16_aligned_m;
  2925. shname = "F16_ALIGNED_M";
  2926. } else {
  2927. GGML_ASSERT(false);
  2928. }
  2929. } else if (shader_size == 2) {
  2930. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2931. p = &ctx->pipeline_matmul_f32_aligned_l;
  2932. shname = "F32_ALIGNED_L";
  2933. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2934. p = &ctx->pipeline_matmul_f16_f32_aligned_l;
  2935. shname = "F16_F32_ALIGNED_L";
  2936. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  2937. p = &ctx->pipeline_matmul_f16_aligned_l;
  2938. shname = "F16_ALIGNED_L";
  2939. } else {
  2940. GGML_ASSERT(false);
  2941. }
  2942. } else {
  2943. GGML_ASSERT(0);
  2944. }
  2945. const size_t kpad = ggml_vk_align_size(k, p->align);
  2946. if (k != kpad) {
  2947. if (shader_size == 0) {
  2948. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2949. p = &ctx->pipeline_matmul_f32_s;
  2950. shname = "F32_S";
  2951. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2952. p = &ctx->pipeline_matmul_f16_f32_s;
  2953. shname = "F16_F32_S";
  2954. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  2955. p = &ctx->pipeline_matmul_f16_s;
  2956. shname = "F16_S";
  2957. }
  2958. } else if (shader_size == 1) {
  2959. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2960. p = &ctx->pipeline_matmul_f32_m;
  2961. shname = "F32_M";
  2962. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2963. p = &ctx->pipeline_matmul_f16_f32_m;
  2964. shname = "F16_F32_M";
  2965. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  2966. p = &ctx->pipeline_matmul_f16_m;
  2967. shname = "F16_M";
  2968. }
  2969. } else if (shader_size == 2) {
  2970. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2971. p = &ctx->pipeline_matmul_f32_l;
  2972. shname = "F32_L";
  2973. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  2974. p = &ctx->pipeline_matmul_f16_f32_l;
  2975. shname = "F16_F32_L";
  2976. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  2977. p = &ctx->pipeline_matmul_f16_l;
  2978. shname = "F16_L";
  2979. }
  2980. }
  2981. }
  2982. ggml_pipeline_allocate_descriptor_sets(ctx, *p, num_it);
  2983. if (split_k > 1) {
  2984. ggml_pipeline_allocate_descriptor_sets(ctx, ctx->pipeline_matmul_split_k_reduce, num_it);
  2985. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  2986. // Resize buffer
  2987. if (ctx->prealloc_split_k != nullptr) {
  2988. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  2989. }
  2990. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  2991. }
  2992. }
  2993. vk_buffer d_X = ggml_vk_create_buffer_check(ctx, sizeof(X_TYPE) * x_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  2994. vk_buffer d_Y = ggml_vk_create_buffer_check(ctx, sizeof(Y_TYPE) * y_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  2995. vk_buffer d_D = ggml_vk_create_buffer_check(ctx, sizeof(float) * d_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  2996. X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
  2997. Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
  2998. float* d = (float *) malloc(sizeof(float) * d_ne);
  2999. for (size_t i = 0; i < x_ne; i++) {
  3000. if (std::is_same<float, X_TYPE>()) {
  3001. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  3002. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  3003. x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  3004. } else {
  3005. GGML_ASSERT(false);
  3006. }
  3007. }
  3008. for (size_t i = 0; i < y_ne; i++) {
  3009. if (std::is_same<float, Y_TYPE>()) {
  3010. y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  3011. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3012. y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  3013. } else {
  3014. GGML_ASSERT(false);
  3015. }
  3016. }
  3017. ggml_vk_buffer_write(ctx, d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
  3018. ggml_vk_buffer_write(ctx, d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
  3019. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->compute_queue);
  3020. for (size_t i = 0; i < num_it; i++) {
  3021. ggml_vk_ctx_begin(ctx, subctx);
  3022. ggml_vk_matmul(ctx, subctx, *p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k), m, n, k, k, k, m, split_k, batch, batch, batch, 1, 1, k*m, k*n, m*n);
  3023. ggml_vk_ctx_end(subctx);
  3024. }
  3025. auto begin = std::chrono::high_resolution_clock::now();
  3026. ggml_vk_submit(subctx, ctx->fence);
  3027. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
  3028. ctx->device.lock()->device.resetFences({ ctx->fence });
  3029. auto end = std::chrono::high_resolution_clock::now();
  3030. double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  3031. // copy dst to host
  3032. ggml_vk_buffer_read(ctx, d_D, 0, d, sizeof(float) * d_ne);
  3033. float * d_chk = (float *) malloc(sizeof(float) * d_ne);
  3034. ggml_init_params iparams = {
  3035. /*.mem_size =*/ 1024*1024*1024,
  3036. /*.mem_buffer =*/ NULL,
  3037. /*.no_alloc =*/ true,
  3038. };
  3039. ggml_context * ggml_ctx = ggml_init(iparams);
  3040. ggml_type src0_type;
  3041. ggml_type src1_type;
  3042. if (std::is_same<float, X_TYPE>()) {
  3043. src0_type = GGML_TYPE_F32;
  3044. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  3045. src0_type = GGML_TYPE_F16;
  3046. } else {
  3047. GGML_ASSERT(false);
  3048. }
  3049. if (std::is_same<float, Y_TYPE>()) {
  3050. src1_type = GGML_TYPE_F32;
  3051. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  3052. src1_type = GGML_TYPE_F16;
  3053. } else {
  3054. GGML_ASSERT(false);
  3055. }
  3056. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
  3057. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
  3058. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  3059. src0_ggml->data = x;
  3060. src1_ggml->data = y;
  3061. tensor_ggml->data = d_chk;
  3062. ctx->disable = true;
  3063. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  3064. ggml_build_forward_expand(cgraph, tensor_ggml);
  3065. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  3066. ctx->disable = false;
  3067. ggml_free(ggml_ctx);
  3068. double avg_err = 0.0;
  3069. int first_err_n = -1;
  3070. int first_err_m = -1;
  3071. int first_err_b = -1;
  3072. for (size_t i = 0; i < m*n*batch; i++) {
  3073. double err = std::fabs(d[i] - d_chk[i]);
  3074. avg_err += err;
  3075. if (err > 0.05f && first_err_n == -1) {
  3076. first_err_b = i / (m * n);
  3077. first_err_n = (i % (m * n)) / m;
  3078. first_err_m = (i % (m * n)) % m;
  3079. }
  3080. }
  3081. avg_err /= m * n;
  3082. std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms avg_err=" << avg_err << std::endl;
  3083. if (avg_err > 0.1) {
  3084. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  3085. std::cerr << "Actual result: " << std::endl << std::endl;
  3086. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  3087. std::cerr << "Expected result: " << std::endl << std::endl;
  3088. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  3089. if (split_k > 1) {
  3090. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  3091. ggml_vk_buffer_read(ctx, ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  3092. std::cerr << "d_buf0: " << std::endl << std::endl;
  3093. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  3094. std::cerr << "d_buf1: " << std::endl << std::endl;
  3095. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  3096. std::cerr << "d_buf2: " << std::endl << std::endl;
  3097. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  3098. std::cerr << "d_buf3: " << std::endl << std::endl;
  3099. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  3100. free(split_k_buf);
  3101. }
  3102. }
  3103. free(d_chk);
  3104. ggml_vk_queue_cleanup(ctx, ctx->device.lock()->transfer_queue);
  3105. ggml_vk_queue_cleanup(ctx, ctx->device.lock()->compute_queue);
  3106. ggml_vk_destroy_buffer(d_X);
  3107. ggml_vk_destroy_buffer(d_Y);
  3108. ggml_vk_destroy_buffer(d_D);
  3109. ggml_pipeline_cleanup(*p);
  3110. ggml_pipeline_cleanup(ctx->pipeline_matmul_split_k_reduce);
  3111. free(x);
  3112. free(y);
  3113. free(d);
  3114. }
  3115. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  3116. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  3117. return;
  3118. }
  3119. i0 = std::max(i0, 5);
  3120. i1 = std::max(i1, 5);
  3121. i2 = std::max(i2, 0);
  3122. i3 = std::max(i3, 0);
  3123. fprintf(stderr, " ");
  3124. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  3125. fprintf(stderr, "%7d ", idx1);
  3126. }
  3127. fprintf(stderr, "\n");
  3128. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  3129. fprintf(stderr, "%7d: ", idx0);
  3130. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  3131. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  3132. float val;
  3133. if (tensor->type == GGML_TYPE_F32) {
  3134. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  3135. } else if (tensor->type == GGML_TYPE_F16) {
  3136. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  3137. }
  3138. fprintf(stderr, "% 7.2f ", val);
  3139. } else {
  3140. fprintf(stderr, " ");
  3141. }
  3142. }
  3143. fprintf(stderr, "\n");
  3144. }
  3145. }
  3146. static void ggml_vk_test_h2d_nc(ggml_backend_vk_context * ctx, size_t ne0, size_t ne1, size_t ne2, size_t ne3) {
  3147. const size_t ne = ne0 * ne1 * ne2 * ne3;
  3148. ggml_init_params iparams = {
  3149. /*.mem_size =*/ 1024*1024*1024,
  3150. /*.mem_buffer =*/ NULL,
  3151. /*.no_alloc =*/ true,
  3152. };
  3153. ggml_context * ggml_ctx = ggml_init(iparams);
  3154. ggml_tensor * tensor = ggml_new_tensor_4d(ggml_ctx, GGML_TYPE_F32, ne0, ne2, ne1, ne3); // NOLINT
  3155. ggml_tensor * result_tensor = ggml_new_tensor_4d(ggml_ctx, GGML_TYPE_F32, ne0, ne1, ne2, ne3);
  3156. float * data = (float *) ggml_vk_host_malloc(ctx, ggml_nbytes(tensor));
  3157. tensor->data = data;
  3158. float * result_data = (float *) malloc(ggml_nbytes(tensor));
  3159. result_tensor->data = result_data;
  3160. // Permute
  3161. {
  3162. size_t tmp = tensor->nb[2];
  3163. tensor->nb[2] = tensor->nb[1];
  3164. tensor->nb[1] = tmp;
  3165. tensor->ne[2] = ne2;
  3166. tensor->ne[1] = ne1;
  3167. }
  3168. for (size_t i = 0; i < ne; i++) {
  3169. data[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  3170. }
  3171. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->compute_queue);
  3172. ggml_vk_ctx_begin(ctx, subctx);
  3173. vk_buffer buffer = ggml_vk_create_buffer_check(ctx, ggml_nbytes(tensor), vk::MemoryPropertyFlagBits::eDeviceLocal);
  3174. ggml_vk_h2d_tensor_2d(ctx, subctx, buffer, 0, tensor, 0, 0, ggml_nrows(tensor));
  3175. ggml_vk_ctx_end(subctx);
  3176. ggml_vk_submit(subctx, ctx->fence);
  3177. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_h2d_nc waitForFences");
  3178. ctx->device.lock()->device.resetFences({ ctx->fence });
  3179. ggml_vk_buffer_read(ctx, buffer, 0, result_data, ggml_nbytes(tensor));
  3180. double avg_err = 0.0;
  3181. int first_err_i0 = -1;
  3182. int first_err_i1 = -1;
  3183. int first_err_i2 = -1;
  3184. int first_err_i3 = -1;
  3185. for (size_t i3 = 0; i3 < ne3; i3++) {
  3186. for (size_t i2 = 0; i2 < ne2; i2++) {
  3187. for (size_t i1 = 0; i1 < ne1; i1++) {
  3188. for (size_t i0 = 0; i0 < ne0; i0++) {
  3189. float correct = *(float *) ((char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  3190. float result = *(float *) ((char *) result_data + i3*ne2*ne1*ne0*sizeof(float) + i2*ne1*ne0*sizeof(float) + i1*ne0*sizeof(float) + i0*sizeof(float));
  3191. double err = std::fabs(result - correct);
  3192. avg_err += err;
  3193. if (err > 0.05f && first_err_i0 == -1) {
  3194. first_err_i0 = i0;
  3195. first_err_i1 = i1;
  3196. first_err_i2 = i2;
  3197. first_err_i3 = i3;
  3198. }
  3199. }
  3200. }
  3201. }
  3202. }
  3203. avg_err /= ne;
  3204. std::cerr << "TEST nc copy ne0=" << ne0 << " ne1=" << ne1 << " ne2=" << ne2 << " ne3=" << ne3 << " avg_err=" << avg_err << std::endl;
  3205. if (avg_err > 0.1) {
  3206. std::cerr << "i0 = " << first_err_i0 << " i1 = " << first_err_i1 << " i2 = " << first_err_i2 << " i3 = " << first_err_i3 << std::endl;
  3207. std::cerr << "Actual result: " << std::endl << std::endl;
  3208. ggml_vk_print_tensor_area(result_tensor, first_err_i0, first_err_i1, first_err_i2, first_err_i3);
  3209. std::cerr << "Expected result: " << std::endl << std::endl;
  3210. ggml_vk_print_tensor_area(tensor, first_err_i0, first_err_i1, first_err_i2, first_err_i3);
  3211. }
  3212. ggml_free(ggml_ctx);
  3213. ggml_vk_destroy_buffer(buffer);
  3214. ggml_vk_host_free(ctx, data);
  3215. free(result_data);
  3216. }
  3217. static void ggml_vk_test_transfer(ggml_backend_vk_context * ctx, size_t ne, bool pinned) {
  3218. #ifdef GGML_VULKAN_DEBUG
  3219. std::cerr << "ggml_vk_test_transfer(" << ne << ")" << std::endl;
  3220. #endif
  3221. // Check transfers are correct
  3222. vk_buffer buffer = ggml_vk_create_buffer_check(ctx, sizeof(float) * ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3223. float * x;
  3224. float * y;
  3225. if (pinned) {
  3226. x = (float *) ggml_vk_host_malloc(ctx, sizeof(float) * ne);
  3227. y = (float *) ggml_vk_host_malloc(ctx, sizeof(float) * ne);
  3228. } else {
  3229. x = (float *) malloc(sizeof(float) * ne);
  3230. y = (float *) malloc(sizeof(float) * ne);
  3231. }
  3232. for (size_t i = 0; i < ne; i++) {
  3233. x[i] = rand() / (float)RAND_MAX;
  3234. }
  3235. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->compute_queue);
  3236. ggml_vk_ctx_begin(ctx, subctx);
  3237. auto begin = std::chrono::high_resolution_clock::now();
  3238. ggml_vk_buffer_write_async(ctx, subctx, buffer, 0, x, sizeof(float) * ne);
  3239. for (auto& cpy : subctx->in_memcpys) {
  3240. memcpy(cpy.dst, cpy.src, cpy.n);
  3241. }
  3242. subctx->in_memcpys.clear();
  3243. ggml_vk_ctx_end(subctx);
  3244. ggml_vk_submit(subctx, ctx->fence);
  3245. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_transfer waitForFences");
  3246. ctx->device.lock()->device.resetFences({ ctx->fence });
  3247. auto end = std::chrono::high_resolution_clock::now();
  3248. double ms_to_gpu = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  3249. ggml_vk_ctx_begin(ctx, subctx);
  3250. begin = std::chrono::high_resolution_clock::now();
  3251. ggml_vk_buffer_read_async(ctx, subctx, buffer, 0, y, sizeof(float) * ne);
  3252. ggml_vk_ctx_end(subctx);
  3253. ggml_vk_submit(subctx, ctx->fence);
  3254. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_transfer waitForFences");
  3255. ctx->device.lock()->device.resetFences({ ctx->fence });
  3256. for (auto& cpy : subctx->out_memcpys) {
  3257. memcpy(cpy.dst, cpy.src, cpy.n);
  3258. }
  3259. subctx->out_memcpys.clear();
  3260. end = std::chrono::high_resolution_clock::now();
  3261. double ms_from_gpu = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  3262. double avg_err = 0.0;
  3263. for (size_t i = 0; i < ne; i++) {
  3264. avg_err += std::fabs(x[i] - y[i]);
  3265. }
  3266. double kb = ne * sizeof(float) / 1024.0;
  3267. std::cerr << "TEST TRANSFER " << kb << " KB to_gpu " << ms_to_gpu << "ms (" << kb / ms_to_gpu * 1000.0 / 1024.0 << " MB/s) from_gpu " << ms_from_gpu << "ms (" << kb / ms_from_gpu * 1000.0 / 1024.0 << " MB/s) avg_err=" << avg_err / ne << std::endl;
  3268. ggml_vk_destroy_buffer(buffer);
  3269. if (pinned) {
  3270. ggml_vk_host_free(ctx, x);
  3271. ggml_vk_host_free(ctx, y);
  3272. } else {
  3273. free(x);
  3274. free(y);
  3275. }
  3276. }
  3277. static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  3278. #ifdef GGML_VULKAN_DEBUG
  3279. std::cerr << "ggml_vk_test_dequant(" << ne << ")" << std::endl;
  3280. #endif
  3281. const size_t x_sz = sizeof(float) * ne;
  3282. const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
  3283. const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  3284. float * x = (float *) malloc(x_sz);
  3285. void * qx = malloc(qx_sz);
  3286. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3287. vk_buffer x_buf = ggml_vk_create_buffer_check(ctx, x_sz_f16, vk::MemoryPropertyFlagBits::eDeviceLocal);
  3288. ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
  3289. for (size_t i = 0; i < ne; i++) {
  3290. x[i] = rand() / (float)RAND_MAX;
  3291. }
  3292. std::vector<int64_t> hist_cur(1 << 4, 0);
  3293. vk_pipeline& p = ctx->pipeline_dequant[quant];
  3294. switch(quant) {
  3295. case GGML_TYPE_Q4_0:
  3296. ggml_quantize_q4_0(x, qx, ne, ne, hist_cur.data());
  3297. break;
  3298. case GGML_TYPE_Q4_1:
  3299. ggml_quantize_q4_1(x, qx, ne, ne, hist_cur.data());
  3300. break;
  3301. case GGML_TYPE_Q5_0:
  3302. ggml_quantize_q5_0(x, qx, ne, ne, hist_cur.data());
  3303. break;
  3304. case GGML_TYPE_Q5_1:
  3305. ggml_quantize_q4_1(x, qx, ne, ne, hist_cur.data());
  3306. break;
  3307. case GGML_TYPE_Q8_0:
  3308. ggml_quantize_q8_0(x, qx, ne, ne, hist_cur.data());
  3309. break;
  3310. case GGML_TYPE_Q2_K:
  3311. ggml_quantize_q2_K(x, qx, ne, ne, hist_cur.data());
  3312. break;
  3313. case GGML_TYPE_Q3_K:
  3314. ggml_quantize_q3_K(x, qx, ne, ne, hist_cur.data());
  3315. break;
  3316. case GGML_TYPE_Q4_K:
  3317. ggml_quantize_q4_K(x, qx, ne, ne, hist_cur.data());
  3318. break;
  3319. case GGML_TYPE_Q5_K:
  3320. ggml_quantize_q5_K(x, qx, ne, ne, hist_cur.data());
  3321. break;
  3322. case GGML_TYPE_Q6_K:
  3323. ggml_quantize_q6_K(x, qx, ne, ne, hist_cur.data());
  3324. break;
  3325. default:
  3326. GGML_ASSERT(false);
  3327. }
  3328. ggml_pipeline_allocate_descriptor_sets(ctx, p, 1);
  3329. ggml_vk_buffer_write(ctx, qx_buf, 0, qx, qx_sz);
  3330. vk_context * subctx = ggml_vk_create_context(ctx, ctx->device.lock()->compute_queue);
  3331. ggml_vk_ctx_begin(ctx, subctx);
  3332. const std::vector<int> pc = { 1, (int)ne, (int)ne, (int)ne };
  3333. ggml_vk_dispatch_pipeline(ctx, subctx, p, { { qx_buf, 0, qx_sz }, { x_buf, 0, x_sz_f16 } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)ne, 1, 1});
  3334. ggml_vk_ctx_end(subctx);
  3335. auto begin = std::chrono::high_resolution_clock::now();
  3336. ggml_vk_submit(subctx, ctx->fence);
  3337. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  3338. ctx->device.lock()->device.resetFences({ ctx->fence });
  3339. auto end = std::chrono::high_resolution_clock::now();
  3340. double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  3341. ggml_vk_buffer_read(ctx, x_buf, 0, x_chk, x_sz_f16);
  3342. double avg_err = 0.0;
  3343. for (size_t i = 0; i < ne; i++) {
  3344. avg_err += std::fabs(x[i] - ggml_fp16_to_fp32(x_chk[i]));
  3345. }
  3346. std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err / ne << std::endl;
  3347. ggml_vk_destroy_buffer(x_buf);
  3348. ggml_vk_destroy_buffer(qx_buf);
  3349. free(x);
  3350. free(qx);
  3351. free(x_chk);
  3352. }
  3353. #endif
  3354. static ggml_tensor_extra_gpu * ggml_vk_tensor_create_extra(ggml_tensor * tensor) {
  3355. #ifdef GGML_VULKAN_DEBUG
  3356. std::cerr << "ggml_vk_create_extra(" << tensor << " (" << tensor->name << ", " << ggml_op_name(tensor->op) << "))" << std::endl;
  3357. #endif
  3358. ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu;
  3359. extra->reset();
  3360. tensor->extra = extra;
  3361. return extra;
  3362. }
  3363. static ggml_tensor * ggml_vk_find_last_use(const ggml_tensor * node, ggml_cgraph * graph) {
  3364. GGML_ASSERT(node != nullptr);
  3365. for (int i = graph->n_nodes - 1; i >= 0; i--) {
  3366. for (int j = 0; j < GGML_MAX_SRC; j++) {
  3367. if (graph->nodes[i]->src[j] == node) {
  3368. return graph->nodes[i];
  3369. }
  3370. }
  3371. }
  3372. return nullptr;
  3373. }
  3374. static void ggml_vk_preallocate_buffers_graph(ggml_backend_vk_context * ctx, ggml_tensor * node){
  3375. #ifdef GGML_VULKAN_DEBUG
  3376. std::cerr << "ggml_vk_preallocate_buffers_graph(" << node << ")" << std::endl;
  3377. #endif
  3378. const bool any_on_device = node->backend == GGML_BACKEND_TYPE_GPU
  3379. || (node->src[0] != nullptr && (node->src[0]->backend == GGML_BACKEND_TYPE_GPU || node->src[0]->backend == GGML_BACKEND_TYPE_GPU_SPLIT))
  3380. || (node->src[1] != nullptr && (node->src[1]->backend == GGML_BACKEND_TYPE_GPU));
  3381. if (ctx->disable || (!any_on_device && node->op != GGML_OP_MUL_MAT)) {
  3382. return;
  3383. }
  3384. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
  3385. if (extra == nullptr) {
  3386. // Workaround for CPU backend BLAS matmul calls
  3387. extra = ggml_vk_tensor_create_extra(node);
  3388. }
  3389. ggml_tensor * src0 = node->src[0];
  3390. ggml_tensor * src1 = node->src[1];
  3391. const bool use_src0 = src0 != nullptr;
  3392. const int64_t ne00 = use_src0 ? src0->ne[0] : 0;
  3393. const int64_t ne01 = use_src0 ? src0->ne[1] : 0;
  3394. const int64_t ne02 = use_src0 ? src0->ne[2] : 0;
  3395. const int64_t ne03 = use_src0 ? src0->ne[3] : 0;
  3396. const bool use_src1 = src1 != nullptr && node->op != GGML_OP_CPY && node->op != GGML_OP_CONT && node->op != GGML_OP_DUP;
  3397. const int64_t ne10 = use_src1 ? src1->ne[0] : 0;
  3398. const int64_t ne11 = use_src1 ? src1->ne[1] : 0;
  3399. const int64_t ne12 = use_src1 ? src1->ne[2] : 0;
  3400. const int64_t ne13 = use_src1 ? src1->ne[3] : 0;
  3401. const int64_t ne20 = node->ne[0];
  3402. const int64_t ne21 = node->ne[1];
  3403. const int64_t ne22 = node->ne[2];
  3404. const int64_t ne23 = node->ne[3];
  3405. const bool f16_f32_kernel = use_src1 && src1->type == GGML_TYPE_F32;
  3406. int split_k;
  3407. if (node->op == GGML_OP_MUL_MAT) {
  3408. split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  3409. } else {
  3410. split_k = 1;
  3411. }
  3412. const uint32_t x_ne = ne00 * ne01;
  3413. const uint32_t y_ne = ne10 * ne11;
  3414. const uint32_t d_ne = ne20 * ne21;
  3415. const uint64_t qx_sz = use_src0 ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ne02 * ne03 : 0;
  3416. const uint64_t qy_sz = use_src1 ? ggml_vk_align_size(ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type), ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ne12 * ne13 : 0;
  3417. const uint64_t x_sz = use_src0 ? ggml_vk_align_size(sizeof(ggml_fp16_t) * x_ne, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ne02 * ne03 : 0;
  3418. const uint64_t y_sz = use_src1 ? ggml_vk_align_size(f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ne12 * ne13 : 0;
  3419. uint64_t d_sz = ggml_vk_align_size(ggml_type_size(node->type) * d_ne, ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment) * ne22 * ne23;
  3420. const uint64_t split_k_size = split_k > 1 ? d_sz * 4 : 0;
  3421. if (extra->buffer_gpu.expired()) {
  3422. // Workaround for CPU backend BLAS matmul calls
  3423. extra->buffer_gpu = ggml_vk_create_buffer_temp(ctx, d_sz);
  3424. }
  3425. switch (node->op) {
  3426. case GGML_OP_REPEAT:
  3427. case GGML_OP_GET_ROWS:
  3428. case GGML_OP_RESHAPE:
  3429. case GGML_OP_VIEW:
  3430. case GGML_OP_PERMUTE:
  3431. case GGML_OP_TRANSPOSE:
  3432. case GGML_OP_ADD:
  3433. case GGML_OP_SCALE:
  3434. case GGML_OP_SQR:
  3435. case GGML_OP_CLAMP:
  3436. case GGML_OP_CPY:
  3437. case GGML_OP_CONT:
  3438. case GGML_OP_DUP:
  3439. case GGML_OP_MUL:
  3440. case GGML_OP_NORM:
  3441. case GGML_OP_RMS_NORM:
  3442. case GGML_OP_DIAG_MASK_INF:
  3443. case GGML_OP_SOFT_MAX:
  3444. case GGML_OP_ROPE:
  3445. break;
  3446. case GGML_OP_UNARY:
  3447. switch (ggml_get_unary_op(node)) {
  3448. case GGML_UNARY_OP_SILU:
  3449. case GGML_UNARY_OP_GELU:
  3450. case GGML_UNARY_OP_RELU:
  3451. break;
  3452. default:
  3453. return;
  3454. }
  3455. break;
  3456. case GGML_OP_MUL_MAT:
  3457. if (ctx->prealloc_size_qx < qx_sz) {
  3458. ctx->prealloc_size_qx = qx_sz;
  3459. }
  3460. if (ctx->prealloc_size_qy < qy_sz) {
  3461. ctx->prealloc_size_qy = qy_sz;
  3462. }
  3463. if (ctx->prealloc_size_x < x_sz) {
  3464. ctx->prealloc_size_x = x_sz;
  3465. }
  3466. if (ctx->prealloc_size_y < y_sz) {
  3467. ctx->prealloc_size_y = y_sz;
  3468. }
  3469. if (ctx->prealloc_size_split_k < split_k_size) {
  3470. ctx->prealloc_size_split_k = split_k_size;
  3471. }
  3472. if (ctx->staging_size < x_sz + y_sz) {
  3473. ctx->staging_size = x_sz + y_sz;
  3474. }
  3475. break;
  3476. default:
  3477. return;
  3478. }
  3479. }
  3480. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
  3481. if (ctx->disable) {
  3482. return;
  3483. }
  3484. #ifdef GGML_VULKAN_DEBUG
  3485. std::cerr << "ggml_vk_preallocate_buffers(qx_size: " << ctx->prealloc_size_qx << " qy_size: " << ctx->prealloc_size_qy << " x_size: " << ctx->prealloc_size_x << " y_size: " << ctx->prealloc_size_y << " split_k_size: " << ctx->prealloc_size_split_k << ")" << std::endl;
  3486. #endif
  3487. #if defined(GGML_VULKAN_RUN_TESTS)
  3488. ctx->staging = ggml_vk_create_buffer_check(ctx, 100ul * 1024ul * 1024ul,
  3489. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached
  3490. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  3491. ggml_vk_test_transfer(ctx, 8192 * 1000, false);
  3492. ggml_vk_test_transfer(ctx, 8192 * 1000, true);
  3493. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q4_0);
  3494. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q4_1);
  3495. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q5_0);
  3496. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q5_1);
  3497. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q8_0);
  3498. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q2_K);
  3499. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q3_K);
  3500. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q4_K);
  3501. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q5_K);
  3502. ggml_vk_test_dequant(ctx, 2560 * 7680, GGML_TYPE_Q6_K);
  3503. const std::vector<size_t> vals {
  3504. 8, 8, 8,
  3505. 100, 46, 576,
  3506. 623, 111, 128,
  3507. 100, 46, 558,
  3508. 512, 1, 256,
  3509. 128, 110, 622,
  3510. 511, 511, 127,
  3511. 511, 511, 7,
  3512. 511, 511, 17,
  3513. 49, 49, 128,
  3514. 128, 49, 49,
  3515. 4096, 49, 4096,
  3516. 11008, 49, 4096,
  3517. 4096, 49, 11008,
  3518. 32000, 49, 4096,
  3519. 512, 512, 128,
  3520. 128, 512, 512,
  3521. 4096, 512, 4096,
  3522. 11008, 512, 4096,
  3523. 4096, 512, 11008,
  3524. 32000, 512, 4096,
  3525. };
  3526. const size_t num_it = 1;
  3527. for (size_t i = 0; i < vals.size(); i += 3) {
  3528. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
  3529. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
  3530. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
  3531. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
  3532. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
  3533. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
  3534. std::cerr << std::endl;
  3535. }
  3536. GGML_ASSERT(false);
  3537. #endif
  3538. if (ctx->prealloc_qx == nullptr || (ctx->prealloc_size_qx > 0 && ctx->prealloc_qx->size < ctx->prealloc_size_qx)) {
  3539. // Resize buffer
  3540. if (ctx->prealloc_qx != nullptr) {
  3541. ggml_vk_destroy_buffer(ctx->prealloc_qx);
  3542. }
  3543. ctx->prealloc_qx = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_qx);
  3544. }
  3545. if (ctx->prealloc_qy == nullptr || (ctx->prealloc_size_qy > 0 && ctx->prealloc_qy->size < ctx->prealloc_size_qy)) {
  3546. // Resize buffer
  3547. if (ctx->prealloc_qy != nullptr) {
  3548. ggml_vk_destroy_buffer(ctx->prealloc_qy);
  3549. }
  3550. ctx->prealloc_qy = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_qy);
  3551. }
  3552. if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
  3553. // Resize buffer
  3554. if (ctx->prealloc_x != nullptr) {
  3555. ggml_vk_destroy_buffer(ctx->prealloc_x);
  3556. }
  3557. ctx->prealloc_x = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_x);
  3558. }
  3559. if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
  3560. // Resize buffer
  3561. if (ctx->prealloc_y != nullptr) {
  3562. ggml_vk_destroy_buffer(ctx->prealloc_y);
  3563. }
  3564. ctx->prealloc_y = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_y);
  3565. }
  3566. if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
  3567. // Resize buffer
  3568. if (ctx->prealloc_split_k != nullptr) {
  3569. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  3570. }
  3571. ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx, ctx->prealloc_size_split_k);
  3572. }
  3573. if (ctx->staging == nullptr || (ctx->staging_size > 0 && ctx->staging->size < ctx->staging_size)) {
  3574. // Resize buffer
  3575. if (ctx->staging != nullptr) {
  3576. ggml_vk_destroy_buffer(ctx->staging);
  3577. }
  3578. ctx->staging = ggml_vk_create_buffer_check(ctx, ctx->staging_size,
  3579. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  3580. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  3581. }
  3582. }
  3583. static void ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, bool last_node){
  3584. const bool any_on_device = node->backend == GGML_BACKEND_TYPE_GPU
  3585. || (node->src[0] != nullptr && (node->src[0]->backend == GGML_BACKEND_TYPE_GPU || node->src[0]->backend == GGML_BACKEND_TYPE_GPU_SPLIT))
  3586. || (node->src[1] != nullptr && node->src[1]->backend == GGML_BACKEND_TYPE_GPU);
  3587. if (ctx->disable || (!any_on_device && node->op != GGML_OP_MUL_MAT) || (node->op == GGML_OP_MUL_MAT && !any_on_device && !ggml_vk_can_mul_mat(node->src[0], node->src[1], node))) {
  3588. return;
  3589. }
  3590. #ifdef GGML_VULKAN_DEBUG
  3591. std::cerr << "ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")" << std::endl;
  3592. #endif
  3593. ctx->semaphore_idx = 0;
  3594. ctx->staging_offset = 0;
  3595. const ggml_tensor * src0 = node->src[0];
  3596. const ggml_tensor * src1 = node->src[1];
  3597. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) node->extra;
  3598. switch (node->op) {
  3599. case GGML_OP_UNARY:
  3600. switch (ggml_get_unary_op(node)) {
  3601. case GGML_UNARY_OP_SILU:
  3602. case GGML_UNARY_OP_GELU:
  3603. case GGML_UNARY_OP_RELU:
  3604. break;
  3605. default:
  3606. return;
  3607. }
  3608. break;
  3609. case GGML_OP_REPEAT:
  3610. // case GGML_OP_GET_ROWS:
  3611. case GGML_OP_ADD:
  3612. case GGML_OP_MUL:
  3613. case GGML_OP_SCALE:
  3614. case GGML_OP_SQR:
  3615. case GGML_OP_CLAMP:
  3616. case GGML_OP_CPY:
  3617. case GGML_OP_CONT:
  3618. case GGML_OP_DUP:
  3619. case GGML_OP_RESHAPE:
  3620. case GGML_OP_VIEW:
  3621. case GGML_OP_PERMUTE:
  3622. case GGML_OP_TRANSPOSE:
  3623. case GGML_OP_NORM:
  3624. case GGML_OP_RMS_NORM:
  3625. case GGML_OP_DIAG_MASK_INF:
  3626. case GGML_OP_SOFT_MAX:
  3627. case GGML_OP_ROPE:
  3628. case GGML_OP_MUL_MAT:
  3629. case GGML_OP_NONE:
  3630. break;
  3631. default:
  3632. if (any_on_device) {
  3633. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl;
  3634. GGML_ASSERT(false);
  3635. }
  3636. return;
  3637. }
  3638. if (ctx->compute_ctx == nullptr) {
  3639. ctx->compute_ctx = ggml_vk_create_context(ctx, ctx->device.lock()->compute_queue);
  3640. ggml_vk_ctx_begin(ctx, ctx->compute_ctx);
  3641. }
  3642. switch (node->op) {
  3643. case GGML_OP_REPEAT:
  3644. ggml_vk_repeat(ctx, ctx->compute_ctx, src0, src1, node);
  3645. break;
  3646. case GGML_OP_GET_ROWS:
  3647. ggml_vk_get_rows(ctx, ctx->compute_ctx, src0, src1, node);
  3648. break;
  3649. case GGML_OP_ADD:
  3650. ggml_vk_add(ctx, ctx->compute_ctx, src0, src1, node);
  3651. break;
  3652. case GGML_OP_MUL:
  3653. ggml_vk_mul(ctx, ctx->compute_ctx, src0, src1, node);
  3654. break;
  3655. case GGML_OP_SCALE:
  3656. ggml_vk_scale(ctx, ctx->compute_ctx, src0, node);
  3657. break;
  3658. case GGML_OP_SQR:
  3659. ggml_vk_sqr(ctx, ctx->compute_ctx, src0, node);
  3660. break;
  3661. case GGML_OP_CLAMP:
  3662. ggml_vk_clamp(ctx, ctx->compute_ctx, src0, node);
  3663. break;
  3664. case GGML_OP_CPY:
  3665. case GGML_OP_CONT:
  3666. case GGML_OP_DUP:
  3667. ggml_vk_cpy(ctx, ctx->compute_ctx, src0, node);
  3668. break;
  3669. case GGML_OP_RESHAPE:
  3670. case GGML_OP_VIEW:
  3671. case GGML_OP_PERMUTE:
  3672. case GGML_OP_TRANSPOSE:
  3673. case GGML_OP_NONE:
  3674. ggml_vk_nop(ctx, ctx->compute_ctx, src0, node);
  3675. break;
  3676. case GGML_OP_NORM:
  3677. ggml_vk_norm(ctx, ctx->compute_ctx, src0, node);
  3678. break;
  3679. case GGML_OP_RMS_NORM:
  3680. ggml_vk_rms_norm(ctx, ctx->compute_ctx, src0, node);
  3681. break;
  3682. case GGML_OP_UNARY:
  3683. switch (ggml_get_unary_op(node)) {
  3684. case GGML_UNARY_OP_SILU:
  3685. case GGML_UNARY_OP_GELU:
  3686. case GGML_UNARY_OP_RELU:
  3687. ggml_vk_unary(ctx, ctx->compute_ctx, src0, node);
  3688. break;
  3689. default:
  3690. return;
  3691. }
  3692. break;
  3693. case GGML_OP_DIAG_MASK_INF:
  3694. ggml_vk_diag_mask_inf(ctx, ctx->compute_ctx, src0, node);
  3695. break;
  3696. case GGML_OP_SOFT_MAX:
  3697. ggml_vk_soft_max(ctx, ctx->compute_ctx, src0, src1, node);
  3698. break;
  3699. case GGML_OP_ROPE:
  3700. ggml_vk_rope(ctx, ctx->compute_ctx, src0, src1, node);
  3701. break;
  3702. case GGML_OP_MUL_MAT:
  3703. ggml_vk_mul_mat(ctx, ctx->compute_ctx, src0, src1, node);
  3704. break;
  3705. default:
  3706. return;
  3707. }
  3708. extra->ready = true;
  3709. extra->ctx_idx = ctx->compute_ctx->idx;
  3710. #ifdef GGML_VULKAN_CHECK_RESULTS
  3711. // Force context reset on each node so that each tensor ends up in its own context
  3712. // and can be run and compared to its CPU equivalent separately
  3713. last_node = true;
  3714. #endif
  3715. if (node->backend == GGML_BACKEND_TYPE_CPU || last_node) {
  3716. ggml_vk_ctx_end(ctx->compute_ctx);
  3717. ctx->compute_ctx->exit_tensor = node;
  3718. ctx->compute_ctx = nullptr;
  3719. }
  3720. }
  3721. static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor){
  3722. const bool any_on_device = tensor->backend == GGML_BACKEND_TYPE_GPU
  3723. || (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_TYPE_GPU || tensor->src[0]->backend == GGML_BACKEND_TYPE_GPU_SPLIT))
  3724. || (tensor->src[1] != nullptr && tensor->src[1]->backend == GGML_BACKEND_TYPE_GPU);
  3725. if (ctx->disable || (!any_on_device && tensor->op != GGML_OP_MUL_MAT)) {
  3726. return false;
  3727. }
  3728. ggml_tensor_extra_gpu * extra = nullptr;
  3729. switch (tensor->op) {
  3730. case GGML_OP_ADD:
  3731. case GGML_OP_GET_ROWS:
  3732. case GGML_OP_MUL:
  3733. case GGML_OP_SCALE:
  3734. case GGML_OP_SQR:
  3735. case GGML_OP_CLAMP:
  3736. case GGML_OP_CPY:
  3737. case GGML_OP_CONT:
  3738. case GGML_OP_DUP:
  3739. case GGML_OP_NORM:
  3740. case GGML_OP_RMS_NORM:
  3741. case GGML_OP_DIAG_MASK_INF:
  3742. case GGML_OP_SOFT_MAX:
  3743. case GGML_OP_ROPE:
  3744. case GGML_OP_RESHAPE:
  3745. case GGML_OP_VIEW:
  3746. case GGML_OP_PERMUTE:
  3747. case GGML_OP_TRANSPOSE:
  3748. case GGML_OP_NONE:
  3749. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  3750. break;
  3751. case GGML_OP_UNARY:
  3752. switch (ggml_get_unary_op(tensor)) {
  3753. case GGML_UNARY_OP_SILU:
  3754. case GGML_UNARY_OP_GELU:
  3755. case GGML_UNARY_OP_RELU:
  3756. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  3757. break;
  3758. default:
  3759. return false;
  3760. }
  3761. break;
  3762. case GGML_OP_MUL_MAT:
  3763. if (!any_on_device && !ggml_vk_can_mul_mat(tensor->src[0], tensor->src[1], tensor)) {
  3764. return false;
  3765. }
  3766. extra = (ggml_tensor_extra_gpu *) tensor->extra;
  3767. break;
  3768. default:
  3769. return false;
  3770. }
  3771. if (extra == nullptr) {
  3772. return false;
  3773. }
  3774. if (params->ith != 0) {
  3775. return true;
  3776. }
  3777. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  3778. return true;
  3779. }
  3780. #ifdef GGML_VULKAN_DEBUG
  3781. std::cerr << "ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", backend=" << tensor->backend << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")" << std::endl;
  3782. #endif
  3783. #ifdef GGML_VULKAN_CHECK_RESULTS
  3784. ggml_vk_check_results_0(ctx, params, tensor);
  3785. #endif
  3786. GGML_ASSERT(extra->ready);
  3787. vk_context& subctx = ctx->gc.contexts[extra->ctx_idx];
  3788. // Only run if ctx hasn't been submitted yet
  3789. if (!subctx.seqs.empty()) {
  3790. // Do staging buffer copies
  3791. for (auto& cpy : subctx.in_memcpys) {
  3792. memcpy(cpy.dst, cpy.src, cpy.n);
  3793. }
  3794. ggml_vk_submit(&subctx, ctx->fence);
  3795. }
  3796. if (tensor == subctx.exit_tensor) {
  3797. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_compute_forward waitForFences");
  3798. ctx->device.lock()->device.resetFences({ ctx->fence });
  3799. // Do staging buffer copies
  3800. for (auto& cpy : subctx.out_memcpys) {
  3801. memcpy(cpy.dst, cpy.src, cpy.n);
  3802. }
  3803. subctx.in_memcpys.clear();
  3804. subctx.out_memcpys.clear();
  3805. }
  3806. extra->ready = false;
  3807. return true;
  3808. }
  3809. // Clean up after graph processing is done
  3810. static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
  3811. if (ctx->disable) {
  3812. return;
  3813. }
  3814. #ifdef GGML_VULKAN_DEBUG
  3815. std::cerr << "ggml_vk_graph_cleanup()" << std::endl;
  3816. #endif
  3817. for (auto& buffer : ctx->gc.temp_buffers) {
  3818. ggml_vk_pool_free(ctx, buffer);
  3819. }
  3820. ctx->gc.temp_buffers.clear();
  3821. for (auto * pipeline : ctx->gc.pipelines) {
  3822. ggml_pipeline_cleanup(*pipeline);
  3823. }
  3824. ggml_vk_queue_cleanup(ctx, ctx->device.lock()->compute_queue);
  3825. ggml_vk_queue_cleanup(ctx, ctx->device.lock()->transfer_queue);
  3826. for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
  3827. ctx->device.lock()->device.destroySemaphore({ ctx->gc.semaphores[i].s });
  3828. }
  3829. ctx->gc.semaphores.clear();
  3830. for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
  3831. ctx->device.lock()->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
  3832. }
  3833. ctx->gc.tl_semaphores.clear();
  3834. ctx->semaphore_idx = 0;
  3835. ctx->event_idx = 0;
  3836. for (auto& event : ctx->gc.events) {
  3837. ctx->device.lock()->device.resetEvent(event);
  3838. }
  3839. ctx->staging_offset = 0;
  3840. ctx->compute_ctx = nullptr;
  3841. ctx->transfer_ctx = nullptr;
  3842. ctx->gc.contexts.clear();
  3843. }
  3844. // Clean up on backend free
  3845. static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
  3846. #ifdef GGML_VULKAN_DEBUG
  3847. std::cerr << "ggml_vk_cleanup(" << ctx->idx << ")" << std::endl;
  3848. #endif
  3849. ggml_vk_graph_cleanup(ctx);
  3850. ggml_vk_destroy_buffer(ctx->prealloc_qx);
  3851. ggml_vk_destroy_buffer(ctx->prealloc_qy);
  3852. ggml_vk_destroy_buffer(ctx->prealloc_x);
  3853. ggml_vk_destroy_buffer(ctx->prealloc_y);
  3854. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  3855. ggml_vk_destroy_buffer(ctx->staging);
  3856. ggml_vk_destroy_buffer(ctx->sync_staging);
  3857. for (auto& buffer : ctx->buffer_pool) {
  3858. ggml_vk_destroy_buffer(buffer);
  3859. }
  3860. ctx->prealloc_size_qx = 0;
  3861. ctx->prealloc_size_qy = 0;
  3862. ctx->prealloc_size_x = 0;
  3863. ctx->prealloc_size_y = 0;
  3864. ctx->prealloc_size_split_k = 0;
  3865. ctx->staging_size = 0;
  3866. for (auto& event : ctx->gc.events) {
  3867. ctx->device.lock()->device.destroyEvent(event);
  3868. }
  3869. ctx->gc.events.clear();
  3870. for (auto* pipeline : ctx->gc.pipelines) {
  3871. ggml_vk_destroy_pipeline(ctx, pipeline);
  3872. }
  3873. ctx->gc.pipelines.clear();
  3874. ctx->device.lock()->device.destroyFence(ctx->fence);
  3875. ctx->device.lock()->device.destroyCommandPool(ctx->device.lock()->compute_queue.pool);
  3876. if (!ctx->device.lock()->single_queue) {
  3877. ctx->device.lock()->device.destroyCommandPool(ctx->device.lock()->transfer_queue.pool);
  3878. }
  3879. }
  3880. GGML_CALL static int ggml_vk_get_device_count() {
  3881. ggml_vk_instance_init();
  3882. return vk_instance.device_indices.size();
  3883. }
  3884. GGML_CALL static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
  3885. ggml_vk_instance_init();
  3886. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  3887. vk::PhysicalDeviceProperties props;
  3888. devices[device].getProperties(&props);
  3889. snprintf(description, description_size, "%s", props.deviceName.data());
  3890. }
  3891. // CPU assist interface
  3892. void ggml_vk_init_cpu_assist() {
  3893. ggml_vk_instance_init();
  3894. std::cerr << "ggml_vulkan: Found " << ggml_vk_get_device_count() << " Vulkan devices:" << std::endl;
  3895. for (int i = 0; i < ggml_vk_get_device_count(); i++) {
  3896. ggml_vk_print_gpu_info(i);
  3897. }
  3898. // Initialize the first backend to make sure CPU matrix multiplications can be offloaded.
  3899. ggml_backend_vk_init(0);
  3900. }
  3901. void ggml_vk_preallocate_buffers_graph_cpu_assist(ggml_tensor * node) {
  3902. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  3903. if (!ctx->initialized) {
  3904. return;
  3905. }
  3906. ggml_vk_preallocate_buffers_graph(ctx, node);
  3907. }
  3908. void ggml_vk_preallocate_buffers_cpu_assist() {
  3909. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  3910. if (!ctx->initialized) {
  3911. return;
  3912. }
  3913. ggml_vk_preallocate_buffers(ctx);
  3914. }
  3915. void ggml_vk_build_graph_cpu_assist(ggml_tensor * node, bool last_node) {
  3916. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  3917. if (!ctx->initialized) {
  3918. return;
  3919. }
  3920. ggml_vk_build_graph(ctx, node, last_node);
  3921. }
  3922. bool ggml_vk_compute_forward_cpu_assist(ggml_compute_params * params, ggml_tensor * tensor){
  3923. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  3924. if (!ctx->initialized) {
  3925. return false;
  3926. }
  3927. return ggml_vk_compute_forward(ctx, params, tensor);
  3928. }
  3929. void ggml_vk_graph_cleanup_cpu_assist() {
  3930. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  3931. if (!ctx->initialized) {
  3932. return;
  3933. }
  3934. ggml_vk_graph_cleanup(ctx);
  3935. }
  3936. void ggml_vk_free_cpu_assist() {
  3937. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  3938. if (!ctx->initialized || vk_instance.backends[0] == nullptr) {
  3939. return;
  3940. }
  3941. ggml_backend_vk_free(vk_instance.backends[0]);
  3942. }
  3943. // backend interface
  3944. #define UNUSED GGML_UNUSED
  3945. // device backend
  3946. static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
  3947. struct ggml_backend_vk_buffer_context {
  3948. ggml_backend_vk_context * ctx;
  3949. vk_buffer dev_buffer;
  3950. ggml_tensor_extra_gpu * temp_tensor_extras = nullptr;
  3951. size_t temp_tensor_extra_index = 0;
  3952. std::string name;
  3953. ggml_backend_vk_buffer_context(ggml_backend_vk_context * ctx, vk_buffer&& dev_buffer, std::string& name) :
  3954. ctx(ctx),
  3955. dev_buffer(dev_buffer),
  3956. name(name) {
  3957. }
  3958. ~ggml_backend_vk_buffer_context() {
  3959. ggml_vk_destroy_buffer(dev_buffer);
  3960. delete[] temp_tensor_extras;
  3961. }
  3962. ggml_tensor_extra_gpu * ggml_vk_alloc_temp_tensor_extra() {
  3963. if (temp_tensor_extras == nullptr) {
  3964. temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_VK_MAX_NODES];
  3965. }
  3966. size_t alloc_index = temp_tensor_extra_index;
  3967. temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_VK_MAX_NODES;
  3968. ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index];
  3969. extra->reset();
  3970. return extra;
  3971. }
  3972. };
  3973. GGML_CALL static const char * ggml_backend_vk_buffer_get_name(ggml_backend_buffer_t buffer) {
  3974. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  3975. return ctx->name.c_str();
  3976. }
  3977. GGML_CALL static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
  3978. return buffer->iface.get_name == ggml_backend_vk_buffer_get_name;
  3979. }
  3980. GGML_CALL static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  3981. #ifdef GGML_VULKAN_DEBUG
  3982. std::cerr << "ggml_backend_vk_buffer_free_buffer()" << std::endl;
  3983. #endif
  3984. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  3985. ggml_vk_destroy_buffer(ctx->dev_buffer);
  3986. delete ctx;
  3987. }
  3988. GGML_CALL static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
  3989. return vk_ptr_base;
  3990. UNUSED(buffer);
  3991. }
  3992. GGML_CALL static void ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  3993. #ifdef GGML_VULKAN_DEBUG
  3994. std::cerr << "ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")" << std::endl;
  3995. #endif
  3996. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  3997. ggml_tensor_extra_gpu * extra = ctx->ggml_vk_alloc_temp_tensor_extra();
  3998. if (tensor->view_src != nullptr && tensor->view_src->extra != nullptr) {
  3999. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  4000. ggml_tensor_extra_gpu * extra_view = (ggml_tensor_extra_gpu *) tensor->view_src->extra;
  4001. extra->buffer_gpu = extra_view->buffer_gpu;
  4002. extra->offset = extra_view->offset + tensor->view_offs;
  4003. } else {
  4004. extra->buffer_gpu = ctx->dev_buffer;
  4005. extra->offset = (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
  4006. }
  4007. tensor->backend = GGML_BACKEND_TYPE_GPU;
  4008. tensor->extra = extra;
  4009. }
  4010. GGML_CALL static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  4011. #ifdef GGML_VULKAN_DEBUG
  4012. std::cerr << "ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")" << std::endl;
  4013. #endif
  4014. GGML_ASSERT(tensor->backend == GGML_BACKEND_TYPE_GPU);
  4015. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  4016. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4017. vk_buffer buf = extra->buffer_gpu.lock();
  4018. ggml_vk_buffer_write(ctx->ctx, buf, extra->offset + offset, data, size);
  4019. }
  4020. GGML_CALL static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  4021. #ifdef GGML_VULKAN_DEBUG
  4022. std::cerr << "ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")" << std::endl;
  4023. #endif
  4024. GGML_ASSERT(tensor->backend == GGML_BACKEND_TYPE_GPU);
  4025. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  4026. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4027. vk_buffer buf = extra->buffer_gpu.lock();
  4028. ggml_vk_buffer_read(ctx->ctx, buf, extra->offset + offset, data, size);
  4029. }
  4030. GGML_CALL static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  4031. if (ggml_backend_buffer_is_vk(src->buffer)) {
  4032. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  4033. ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
  4034. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  4035. vk_buffer src_buf = src_extra->buffer_gpu.lock();
  4036. vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
  4037. ggml_vk_buffer_copy(dst_buf, dst_extra->offset, src_buf, src_extra->offset, ggml_nbytes(src));
  4038. return true;
  4039. }
  4040. return false;
  4041. }
  4042. GGML_CALL static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  4043. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  4044. ggml_vk_buffer_memset(ctx->ctx, ctx->dev_buffer, 0, value, buffer->size);
  4045. }
  4046. static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
  4047. /* .get_name = */ ggml_backend_vk_buffer_get_name,
  4048. /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
  4049. /* .get_base = */ ggml_backend_vk_buffer_get_base,
  4050. /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
  4051. /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
  4052. /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
  4053. /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
  4054. /* .clear = */ ggml_backend_vk_buffer_clear,
  4055. /* .reset = */ NULL,
  4056. };
  4057. // vk buffer type
  4058. struct ggml_backend_vk_buffer_type_context {
  4059. std::string name;
  4060. ggml_backend_vk_context * ctx;
  4061. };
  4062. GGML_CALL static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
  4063. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  4064. return ctx->name.c_str();
  4065. }
  4066. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  4067. #ifdef GGML_VULKAN_DEBUG
  4068. std::cerr << "ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")" << std::endl;
  4069. #endif
  4070. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  4071. vk_buffer dev_buffer = ggml_vk_create_buffer_device(ctx->ctx, size);
  4072. ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->ctx, std::move(dev_buffer), ctx->name);
  4073. return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
  4074. }
  4075. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  4076. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  4077. return ctx->ctx->device.lock()->properties.limits.minStorageBufferOffsetAlignment;
  4078. }
  4079. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  4080. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  4081. return ctx->ctx->device.lock()->max_memory_allocation_size;
  4082. }
  4083. GGML_CALL static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  4084. return ggml_nbytes(tensor);
  4085. UNUSED(buft);
  4086. }
  4087. GGML_CALL static bool ggml_backend_vk_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  4088. if (!ggml_backend_is_vk(backend)) {
  4089. return false;
  4090. }
  4091. ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  4092. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4093. return buft_ctx->ctx->idx == ctx->idx;
  4094. }
  4095. static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
  4096. /* .get_name = */ ggml_backend_vk_buffer_type_name,
  4097. /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
  4098. /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
  4099. /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
  4100. /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
  4101. /* .supports_backend = */ ggml_backend_vk_buffer_type_supports_backend,
  4102. /* .is_host = */ NULL,
  4103. };
  4104. GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t idx) {
  4105. #ifdef GGML_VULKAN_DEBUG
  4106. std::cerr << "ggml_backend_vk_buffer_type(" << idx << ")" << std::endl;
  4107. #endif
  4108. GGML_ASSERT(idx < vk_instance.device_indices.size());
  4109. ggml_backend_vk_init(idx);
  4110. return &vk_instance.buffer_types[idx];
  4111. }
  4112. // host buffer type
  4113. GGML_CALL static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  4114. return GGML_VK_NAME "_Host";
  4115. UNUSED(buft);
  4116. }
  4117. GGML_CALL static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
  4118. return GGML_VK_NAME "_Host";
  4119. UNUSED(buffer);
  4120. }
  4121. GGML_CALL static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  4122. #ifdef GGML_VULKAN_DEBUG
  4123. std::cerr << "ggml_backend_vk_host_buffer_free_buffer()" << std::endl;
  4124. #endif
  4125. ggml_vk_host_free(&vk_instance.contexts[0], buffer->context);
  4126. }
  4127. GGML_CALL static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  4128. #ifdef GGML_VULKAN_DEBUG
  4129. std::cerr << "ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")" << std::endl;
  4130. #endif
  4131. void * ptr = nullptr;
  4132. try {
  4133. ptr = ggml_vk_host_malloc(&vk_instance.contexts[0], size);
  4134. } catch (vk::SystemError& e) {
  4135. std::cerr << "ggml_vulkan: Failed to allocate pinned memory." << std::endl;
  4136. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  4137. // fallback to cpu buffer
  4138. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  4139. }
  4140. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  4141. buffer->buft = buft;
  4142. buffer->iface.get_name = ggml_backend_vk_host_buffer_name;
  4143. buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
  4144. return buffer;
  4145. }
  4146. GGML_CALL static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  4147. return vk_instance.contexts[0].device.lock()->properties.limits.minMemoryMapAlignment;
  4148. UNUSED(buft);
  4149. }
  4150. GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
  4151. static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
  4152. /* .iface = */ {
  4153. /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
  4154. /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
  4155. /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
  4156. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  4157. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  4158. /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend,
  4159. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  4160. },
  4161. /* .context = */ nullptr,
  4162. };
  4163. if (!vk_instance.contexts[0].initialized) {
  4164. // Fall back to CPU
  4165. return ggml_backend_cpu_buffer_type();
  4166. }
  4167. return &ggml_backend_vk_buffer_type_host;
  4168. }
  4169. // backend
  4170. GGML_CALL static const char * ggml_backend_vk_name(ggml_backend_t backend) {
  4171. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4172. return ctx->name.c_str();
  4173. }
  4174. GGML_CALL static void ggml_backend_vk_free(ggml_backend_t backend) {
  4175. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4176. #ifdef GGML_VULKAN_DEBUG
  4177. std::cerr << "ggml_backend_vk_free(" << ctx->name << ")" << std::endl;
  4178. #endif
  4179. size_t idx = ctx->idx;
  4180. ggml_vk_cleanup(ctx);
  4181. // Release device
  4182. vk_instance.devices[ctx->idx].reset();
  4183. ctx->initialized = false;
  4184. vk_instance.initialized[idx] = false;
  4185. vk_instance.backends[idx] = nullptr;
  4186. memset(&vk_instance.buffer_types[idx], 0, sizeof(ggml_backend_buffer_type));
  4187. delete backend;
  4188. }
  4189. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
  4190. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4191. GGML_ASSERT(ctx->initialized);
  4192. return ggml_backend_vk_buffer_type(ctx->idx);
  4193. }
  4194. GGML_CALL static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  4195. #ifdef GGML_VULKAN_DEBUG
  4196. std::cerr << "ggml_backend_vk_set_tensor_async(" << size << ")" << std::endl;
  4197. #endif
  4198. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4199. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_buffer_type(ctx->idx) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  4200. GGML_ASSERT(tensor->backend == GGML_BACKEND_TYPE_GPU);
  4201. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4202. if (ctx->transfer_ctx == nullptr) {
  4203. // Initialize new transfer context
  4204. ctx->transfer_ctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  4205. ggml_vk_ctx_begin(ctx, ctx->transfer_ctx);
  4206. }
  4207. vk_buffer buf = extra->buffer_gpu.lock();
  4208. ggml_vk_buffer_write_async(ctx, ctx->transfer_ctx, buf, extra->offset + offset, data, size);
  4209. }
  4210. GGML_CALL static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  4211. #ifdef GGML_VULKAN_DEBUG
  4212. std::cerr << "ggml_backend_vk_get_tensor_async(" << size << ")" << std::endl;
  4213. #endif
  4214. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4215. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_buffer_type(ctx->idx) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  4216. GGML_ASSERT(tensor->backend == GGML_BACKEND_TYPE_GPU);
  4217. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4218. if (ctx->transfer_ctx == nullptr) {
  4219. // Initialize new transfer context
  4220. ctx->transfer_ctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  4221. ggml_vk_ctx_begin(ctx, ctx->transfer_ctx);
  4222. }
  4223. vk_buffer buf = extra->buffer_gpu.lock();
  4224. ggml_vk_buffer_read_async(ctx, ctx->transfer_ctx, buf, extra->offset + offset, data, size);
  4225. }
  4226. GGML_CALL static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
  4227. #ifdef GGML_VULKAN_DEBUG
  4228. std::cerr << "ggml_backend_vk_cpy_tensor_async()" << std::endl;
  4229. #endif
  4230. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4231. if ((dst->buffer->buft == ggml_backend_vk_buffer_type(ctx->idx) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
  4232. ggml_tensor_extra_gpu * src_extra = (ggml_tensor_extra_gpu *) src->extra;
  4233. ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  4234. if (ctx->transfer_ctx == nullptr) {
  4235. // Initialize new transfer context
  4236. ctx->transfer_ctx = ggml_vk_create_context(ctx, ctx->device.lock()->transfer_queue);
  4237. ggml_vk_ctx_begin(ctx, ctx->transfer_ctx);
  4238. }
  4239. vk_buffer src_buf = src_extra->buffer_gpu.lock();
  4240. vk_buffer dst_buf = dst_extra->buffer_gpu.lock();
  4241. ggml_vk_buffer_copy_async(ctx->transfer_ctx, src_buf, src_extra->offset, dst_buf, dst_extra->offset, ggml_nbytes(src));
  4242. return true;
  4243. }
  4244. return false;
  4245. }
  4246. GGML_CALL static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
  4247. #ifdef GGML_VULKAN_DEBUG
  4248. std::cerr << "ggml_backend_vk_synchronize()" << std::endl;
  4249. #endif
  4250. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4251. if(ctx->transfer_ctx == nullptr) {
  4252. return;
  4253. }
  4254. ggml_vk_ctx_end(ctx->transfer_ctx);
  4255. for (auto& cpy : ctx->transfer_ctx->in_memcpys) {
  4256. memcpy(cpy.dst, cpy.src, cpy.n);
  4257. }
  4258. ggml_vk_submit(ctx->transfer_ctx, ctx->fence);
  4259. VK_CHECK(ctx->device.lock()->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_backend_vk_synchronize waitForFences");
  4260. ctx->device.lock()->device.resetFences({ ctx->fence });
  4261. for (auto& cpy : ctx->transfer_ctx->out_memcpys) {
  4262. memcpy(cpy.dst, cpy.src, cpy.n);
  4263. }
  4264. ctx->transfer_ctx = nullptr;
  4265. }
  4266. GGML_CALL static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  4267. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  4268. for (int i = 0; i < cgraph->n_nodes; i++) {
  4269. ggml_vk_preallocate_buffers_graph(ctx, cgraph->nodes[i]);
  4270. }
  4271. ggml_vk_preallocate_buffers(ctx);
  4272. int last_node = cgraph->n_nodes - 1;
  4273. // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
  4274. while (last_node > 0 && cgraph->nodes[last_node]->backend != GGML_BACKEND_TYPE_GPU) {
  4275. last_node -= 1;
  4276. }
  4277. for (int i = 0; i < cgraph->n_nodes; i++) {
  4278. ggml_vk_build_graph(ctx,cgraph->nodes[i], i == last_node);
  4279. }
  4280. ggml_compute_params params = {};
  4281. params.type = GGML_TASK_TYPE_COMPUTE;
  4282. params.ith = 0;
  4283. for (int i = 0; i < cgraph->n_nodes; i++) {
  4284. ggml_tensor * node = cgraph->nodes[i];
  4285. if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
  4286. continue;
  4287. }
  4288. bool ok = ggml_vk_compute_forward(ctx, &params, node);
  4289. if (!ok) {
  4290. fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
  4291. }
  4292. #ifdef GGML_VULKAN_CHECK_RESULTS
  4293. else {
  4294. ggml_vk_check_results_1(ctx, &params, node);
  4295. }
  4296. #endif
  4297. GGML_ASSERT(ok);
  4298. }
  4299. ggml_vk_graph_cleanup(ctx);
  4300. return GGML_STATUS_SUCCESS;
  4301. UNUSED(backend);
  4302. }
  4303. GGML_CALL static bool ggml_backend_vk_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
  4304. switch (op->op) {
  4305. case GGML_OP_UNARY:
  4306. switch (ggml_get_unary_op(op)) {
  4307. case GGML_UNARY_OP_GELU:
  4308. case GGML_UNARY_OP_SILU:
  4309. case GGML_UNARY_OP_RELU:
  4310. return true;
  4311. default:
  4312. return false;
  4313. }
  4314. break;
  4315. case GGML_OP_MUL_MAT:
  4316. {
  4317. struct ggml_tensor * a;
  4318. struct ggml_tensor * b;
  4319. if (op->op == GGML_OP_MUL_MAT) {
  4320. a = op->src[0];
  4321. b = op->src[1];
  4322. } else {
  4323. a = op->src[2];
  4324. b = op->src[1];
  4325. }
  4326. if (a->ne[3] != b->ne[3]) {
  4327. return false;
  4328. }
  4329. return true;
  4330. } break;
  4331. // case GGML_OP_GET_ROWS:
  4332. // {
  4333. // switch (op->src[0]->type) {
  4334. // case GGML_TYPE_F16:
  4335. // case GGML_TYPE_F32:
  4336. // case GGML_TYPE_Q4_0:
  4337. // case GGML_TYPE_Q4_1:
  4338. // case GGML_TYPE_Q5_0:
  4339. // case GGML_TYPE_Q5_1:
  4340. // case GGML_TYPE_Q8_0:
  4341. // return true;
  4342. // default:
  4343. // return false;
  4344. // }
  4345. // } break;
  4346. case GGML_OP_CPY:
  4347. {
  4348. ggml_type src0_type = op->src[0]->type;
  4349. ggml_type src1_type = op->src[1]->type;
  4350. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  4351. return true;
  4352. }
  4353. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  4354. return true;
  4355. }
  4356. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  4357. return true;
  4358. }
  4359. return false;
  4360. } break;
  4361. case GGML_OP_DUP:
  4362. // case GGML_OP_REPEAT:
  4363. // {
  4364. // ggml_type src0_type = op->src[0]->type;
  4365. // return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16;
  4366. // } break;
  4367. case GGML_OP_ROPE:
  4368. {
  4369. const int mode = ((const int32_t *) op->op_params)[2];
  4370. const bool is_glm = mode & 4;
  4371. return !is_glm;
  4372. } break;
  4373. case GGML_OP_NONE:
  4374. case GGML_OP_RESHAPE:
  4375. case GGML_OP_VIEW:
  4376. case GGML_OP_PERMUTE:
  4377. case GGML_OP_TRANSPOSE:
  4378. case GGML_OP_NORM:
  4379. case GGML_OP_ADD:
  4380. case GGML_OP_MUL:
  4381. case GGML_OP_RMS_NORM:
  4382. case GGML_OP_SCALE:
  4383. case GGML_OP_SQR:
  4384. case GGML_OP_CLAMP:
  4385. case GGML_OP_CONT:
  4386. case GGML_OP_DIAG_MASK_INF:
  4387. case GGML_OP_SOFT_MAX:
  4388. return true;
  4389. default:
  4390. return false;
  4391. }
  4392. UNUSED(backend);
  4393. }
  4394. // TODO: enable async and synchronize
  4395. static ggml_backend_i ggml_backend_vk_interface = {
  4396. /* .get_name = */ ggml_backend_vk_name,
  4397. /* .free = */ ggml_backend_vk_free,
  4398. /* .get_default_buffer_type = */ ggml_backend_vk_get_default_buffer_type,
  4399. /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
  4400. /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async,
  4401. /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
  4402. /* .synchronize = */ NULL, // ggml_backend_vk_synchronize,
  4403. /* .graph_plan_create = */ NULL,
  4404. /* .graph_plan_free = */ NULL,
  4405. /* .graph_plan_compute = */ NULL,
  4406. /* .graph_compute = */ ggml_backend_vk_graph_compute,
  4407. /* .supports_op = */ ggml_backend_vk_supports_op,
  4408. };
  4409. static ggml_guid_t ggml_backend_vk_guid() {
  4410. static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
  4411. return &guid;
  4412. }
  4413. GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t idx) {
  4414. if (vk_instance.initialized[idx]) {
  4415. return vk_instance.backends[idx];
  4416. }
  4417. #ifdef GGML_VULKAN_DEBUG
  4418. std::cerr << "ggml_backend_vk_init(" << idx << ")" << std::endl;
  4419. #endif
  4420. ggml_backend_vk_context * ctx = &vk_instance.contexts[idx];
  4421. ggml_vk_init(ctx, idx);
  4422. ctx->name = GGML_VK_NAME + std::to_string(idx);
  4423. vk_instance.buffer_types[idx] = {
  4424. /* .iface = */ ggml_backend_vk_buffer_type_interface,
  4425. /* .context = */ new ggml_backend_vk_buffer_type_context{ ctx->name, ctx },
  4426. };
  4427. vk_instance.initialized[idx] = true;
  4428. ggml_backend_t vk_backend = new ggml_backend {
  4429. /* .guid = */ ggml_backend_vk_guid(),
  4430. /* .interface = */ ggml_backend_vk_interface,
  4431. /* .context = */ &vk_instance.contexts[ctx->idx],
  4432. };
  4433. vk_instance.backends[idx] = vk_backend;
  4434. return vk_backend;
  4435. }
  4436. GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend) {
  4437. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
  4438. }
  4439. GGML_CALL int ggml_backend_vk_get_device_count() {
  4440. return ggml_vk_get_device_count();
  4441. }
  4442. GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
  4443. ggml_vk_get_device_description(device, description, description_size);
  4444. }
  4445. GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
  4446. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  4447. vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
  4448. vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties();
  4449. for (const vk::MemoryHeap& heap : memprops.memoryHeaps) {
  4450. if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
  4451. *total = heap.size;
  4452. *free = heap.size;
  4453. break;
  4454. }
  4455. }
  4456. }
  4457. // backend registry
  4458. GGML_CALL static ggml_backend_t ggml_backend_reg_vk_init(const char * params, void * user_data) {
  4459. ggml_backend_t vk_backend = ggml_backend_vk_init((int) (intptr_t) user_data);
  4460. return vk_backend;
  4461. UNUSED(params);
  4462. }
  4463. extern "C" GGML_CALL int ggml_backend_vk_reg_devices();
  4464. GGML_CALL int ggml_backend_vk_reg_devices() {
  4465. for (auto idx : vk_instance.device_indices) {
  4466. char name[128];
  4467. snprintf(name, sizeof(name), "%s%ld", GGML_VK_NAME, idx);
  4468. ggml_backend_register(name, ggml_backend_reg_vk_init, ggml_backend_vk_buffer_type(idx), (void *) (intptr_t) idx);
  4469. }
  4470. return vk_instance.device_indices.size();
  4471. }
  4472. // Extension availability
  4473. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  4474. #ifdef GGML_VULKAN_VALIDATE
  4475. bool portability_enumeration_ext = false;
  4476. // Check for portability enumeration extension for MoltenVK support
  4477. for (const auto& properties : instance_extensions) {
  4478. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  4479. return true;
  4480. }
  4481. }
  4482. if (!portability_enumeration_ext) {
  4483. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  4484. }
  4485. #endif
  4486. return false;
  4487. UNUSED(instance_extensions);
  4488. }
  4489. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  4490. #ifdef __APPLE__
  4491. bool portability_enumeration_ext = false;
  4492. // Check for portability enumeration extension for MoltenVK support
  4493. for (const auto& properties : instance_extensions) {
  4494. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  4495. return true;
  4496. }
  4497. }
  4498. if (!portability_enumeration_ext) {
  4499. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  4500. }
  4501. #endif
  4502. return false;
  4503. UNUSED(instance_extensions);
  4504. }
  4505. // checks
  4506. #ifdef GGML_VULKAN_CHECK_RESULTS
  4507. static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
  4508. if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
  4509. return;
  4510. }
  4511. for (int j = 0; j < level; j++) {
  4512. std::cerr << " ";
  4513. }
  4514. std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << " backend=" << tensor->backend << std::endl;
  4515. done.push_back(tensor);
  4516. for (int i = 0; i < GGML_MAX_SRC; i++) {
  4517. if (tensor->src[i] != nullptr) {
  4518. ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
  4519. }
  4520. }
  4521. }
  4522. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
  4523. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  4524. return;
  4525. }
  4526. i0 = std::max(i0, 5);
  4527. i1 = std::max(i1, 5);
  4528. i2 = std::max(i2, 0);
  4529. i3 = std::max(i3, 0);
  4530. fprintf(stderr, " ");
  4531. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4532. fprintf(stderr, "%7d ", idx1);
  4533. }
  4534. fprintf(stderr, "\n");
  4535. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  4536. fprintf(stderr, "%7d: ", idx0);
  4537. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4538. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  4539. float val;
  4540. if (tensor->type == GGML_TYPE_F32) {
  4541. val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  4542. } else if (tensor->type == GGML_TYPE_F16) {
  4543. val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  4544. }
  4545. fprintf(stderr, "% 7.2f ", val);
  4546. } else {
  4547. fprintf(stderr, " ");
  4548. }
  4549. }
  4550. fprintf(stderr, "\n");
  4551. }
  4552. }
  4553. static void ggml_vk_print_tensor(ggml_backend_vk_context * ctx, const ggml_tensor * tensor, const char * name) {
  4554. void * tensor_data = tensor->data;
  4555. if (tensor->backend == GGML_BACKEND_TYPE_GPU) {
  4556. const size_t tensor_size = ggml_nbytes(tensor);
  4557. tensor_data = malloc(tensor_size);
  4558. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4559. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  4560. ggml_vk_buffer_read(ctx, buffer_gpu, extra->offset, tensor_data, tensor_size);
  4561. }
  4562. std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
  4563. std::cerr << "tensor=" << tensor << " tensor->backend: " << tensor->backend << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  4564. if (tensor->src[0] != nullptr) {
  4565. std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " backend=" << tensor->src[0]->backend << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
  4566. }
  4567. if (tensor->src[1] != nullptr) {
  4568. std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " backend=" << tensor->src[1]->backend << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
  4569. }
  4570. std::cerr << std::endl << "Result:" << std::endl;
  4571. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  4572. std::cerr << std::endl;
  4573. std::cerr << std::endl << "Result:" << std::endl;
  4574. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 1, 0);
  4575. std::cerr << std::endl;
  4576. std::vector<const ggml_tensor *> done;
  4577. ggml_vk_print_graph_origin(tensor, done);
  4578. if (tensor->backend == GGML_BACKEND_TYPE_GPU) {
  4579. free(tensor_data);
  4580. }
  4581. }
  4582. static void ggml_vk_check_tensor(const std::string& name, const ggml_tensor * tensor) {
  4583. return;
  4584. GGML_ASSERT(tensor->backend == GGML_BACKEND_TYPE_CPU);
  4585. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  4586. return;
  4587. }
  4588. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  4589. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  4590. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  4591. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  4592. float val = 0.0f;
  4593. if (tensor->type == GGML_TYPE_F32) {
  4594. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  4595. } else if (tensor->type == GGML_TYPE_F16) {
  4596. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  4597. }
  4598. if (std::isnan(val)) {
  4599. std::cerr << "ERROR: TENSOR CHECK " << name << ": Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " val=" << val << std::endl;
  4600. std::cerr << "tensor=" << tensor << " tensor->type=" << ggml_type_name(tensor->type) << " tensor->backend: " << tensor->backend << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  4601. std::cerr << std::endl;
  4602. ggml_vk_print_tensor_area(tensor, tensor->data, i0, i1, i2, i3);
  4603. std::cerr << std::endl;
  4604. std::vector<const ggml_tensor *> done;
  4605. ggml_vk_print_graph_origin(tensor, done);
  4606. GGML_ASSERT(false);
  4607. }
  4608. }
  4609. }
  4610. }
  4611. }
  4612. }
  4613. void * comp_result;
  4614. size_t comp_size;
  4615. size_t comp_nb[GGML_MAX_DIMS];
  4616. size_t check_counter = 0;
  4617. static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor) {
  4618. if (params->ith != 0) {
  4619. return;
  4620. }
  4621. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE || tensor->op == GGML_OP_TRANSPOSE) {
  4622. return;
  4623. }
  4624. check_counter++;
  4625. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  4626. return;
  4627. }
  4628. ggml_tensor * src0 = tensor->src[0];
  4629. ggml_tensor * src1 = tensor->src[1];
  4630. struct ggml_init_params iparams = {
  4631. /*.mem_size =*/ 1024*1024*1024,
  4632. /*.mem_buffer =*/ NULL,
  4633. /*.no_alloc =*/ false,
  4634. };
  4635. struct ggml_context * ggml_ctx = ggml_init(iparams);
  4636. struct ggml_tensor * src0_clone = nullptr;
  4637. struct ggml_tensor * src1_clone = nullptr;
  4638. struct ggml_tensor * tensor_clone = nullptr;
  4639. size_t src0_size;
  4640. size_t src1_size;
  4641. void * src0_buffer;
  4642. void * src1_buffer;
  4643. if (src0 != nullptr) {
  4644. src0_clone = ggml_dup_tensor(ggml_ctx, src0);
  4645. src0_size = ggml_nbytes(src0);
  4646. src0_buffer = malloc(src0_size);
  4647. src0_clone->data = src0_buffer;
  4648. if (src0->backend == GGML_BACKEND_TYPE_CPU) {
  4649. memcpy(src0_clone->data, src0->data, src0_size);
  4650. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  4651. } else if (src0->backend == GGML_BACKEND_TYPE_GPU) {
  4652. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src0->extra;
  4653. uint64_t offset = extra->offset;
  4654. if (!ggml_is_contiguous(src0) && ggml_vk_dim01_contiguous(src0)) {
  4655. for (int i3 = 0; i3 < src0->ne[3]; i3++) {
  4656. for (int i2 = 0; i2 < src0->ne[2]; i2++) {
  4657. const int idx = i3*src0->ne[2] + i2;
  4658. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  4659. ggml_vk_buffer_read(ctx, buffer_gpu, offset + idx * src0->nb[2], ((char *)src0_clone->data + idx * src0_clone->nb[2]), src0->ne[1] * src0->nb[1]);
  4660. }
  4661. }
  4662. src0_clone->nb[0] = src0->nb[0];
  4663. src0_clone->nb[1] = src0->nb[1];
  4664. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  4665. src0_clone->nb[i] = src0_clone->nb[i - 1]*src0_clone->ne[i - 1];
  4666. }
  4667. } else {
  4668. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  4669. if (offset + src0_size >= buffer_gpu->size) {
  4670. src0_size = buffer_gpu->size - offset;
  4671. }
  4672. ggml_vk_buffer_read(ctx, buffer_gpu, offset, src0_clone->data, src0_size);
  4673. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  4674. }
  4675. } else {
  4676. GGML_ASSERT(false);
  4677. }
  4678. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  4679. ggml_vk_print_tensor(ctx, src0, "src0");
  4680. }
  4681. ggml_vk_check_tensor(std::string(ggml_op_name(tensor->op)) + "->src0", src0_clone);
  4682. }
  4683. if (src1 != nullptr) {
  4684. src1_clone = ggml_dup_tensor(ggml_ctx, src1);
  4685. src1_size = ggml_nbytes(src1);
  4686. src1_buffer = malloc(src1_size);
  4687. src1_clone->data = src1_buffer;
  4688. if (src1->backend == GGML_BACKEND_TYPE_CPU) {
  4689. memcpy(src1_clone->data, src1->data, src1_size);
  4690. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  4691. } else if (src1->backend == GGML_BACKEND_TYPE_GPU) {
  4692. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src1->extra;
  4693. uint64_t offset = extra->offset;
  4694. if (!ggml_is_contiguous(src1) && ggml_vk_dim01_contiguous(src1)) {
  4695. for (int i3 = 0; i3 < src1->ne[3]; i3++) {
  4696. for (int i2 = 0; i2 < src1->ne[2]; i2++) {
  4697. const int idx = i3*src1->ne[2] + i2;
  4698. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  4699. ggml_vk_buffer_read(ctx, buffer_gpu, offset + idx * src1->nb[2], ((char *)src1_clone->data + idx * src1_clone->nb[2]), src1->ne[1] * src1->nb[1]);
  4700. }
  4701. }
  4702. src1_clone->nb[0] = src1->nb[0];
  4703. src1_clone->nb[1] = src1->nb[1];
  4704. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  4705. src1_clone->nb[i] = src1_clone->nb[i - 1]*src1_clone->ne[i - 1];
  4706. }
  4707. } else {
  4708. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  4709. if (offset + src1_size >= buffer_gpu->size) {
  4710. src1_size = buffer_gpu->size - offset;
  4711. }
  4712. ggml_vk_buffer_read(ctx, buffer_gpu, offset, src1_clone->data, src1_size);
  4713. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  4714. }
  4715. } else {
  4716. GGML_ASSERT(false);
  4717. }
  4718. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  4719. ggml_vk_print_tensor(ctx, src1, "src1");
  4720. std::cerr << "TENSOR CHECK: " << ggml_op_name(src1_clone->op) << " (check " << check_counter << ")" << std::endl;
  4721. std::cerr << "src1_clone=" << tensor << " src1_clone->backend: " << src1_clone->backend << " src1_clone->type: " << ggml_type_name(src1_clone->type) << " ne0=" << src1_clone->ne[0] << " nb0=" << src1_clone->nb[0] << " ne1=" << src1_clone->ne[1] << " nb1=" << src1_clone->nb[1] << " ne2=" << src1_clone->ne[2] << " nb2=" << src1_clone->nb[2] << " ne3=" << src1_clone->ne[3] << " nb3=" << src1_clone->nb[3] << std::endl;
  4722. if (src1->src[0] != nullptr) {
  4723. std::cerr << "src1->src[0]=" << src1->src[0] << " op=" << ggml_op_name(src1->src[0]->op) << " type=" << ggml_type_name(src1->src[0]->type) << " backend=" << src1->src[0]->backend << " ne0=" << src1->src[0]->ne[0] << " nb0=" << src1->src[0]->nb[0] << " ne1=" << src1->src[0]->ne[1] << " nb1=" << src1->src[0]->nb[1] << " ne2=" << src1->src[0]->ne[2] << " nb2=" << src1->src[0]->nb[2] << " ne3=" << src1->src[0]->ne[3] << " nb3=" << src1->src[0]->nb[3] << std::endl;
  4724. }
  4725. if (src1->src[1] != nullptr) {
  4726. std::cerr << "src1->src[1]=" << src1->src[1] << " op=" << ggml_op_name(src1->src[1]->op) << " type=" << ggml_type_name(src1->src[1]->type) << " backend=" << src1->src[1]->backend << " ne0=" << src1->src[1]->ne[0] << " nb0=" << src1->src[1]->nb[0] << " ne1=" << src1->src[1]->ne[1] << " nb1=" << src1->src[1]->nb[1] << " ne2=" << src1->src[1]->ne[2] << " nb2=" << src1->src[1]->nb[2] << " ne3=" << src1->src[1]->ne[3] << " nb3=" << src1->src[1]->nb[3] << std::endl;
  4727. }
  4728. std::cerr << std::endl << "Result:" << std::endl;
  4729. ggml_vk_print_tensor_area(src1_clone, src1_clone->data, 5, 5, 0, 0);
  4730. std::cerr << std::endl;
  4731. std::cerr << std::endl << "Result:" << std::endl;
  4732. ggml_vk_print_tensor_area(src1_clone, src1_clone->data, 5, 5, 1, 0);
  4733. std::cerr << std::endl;
  4734. std::vector<const ggml_tensor *> done;
  4735. ggml_vk_print_graph_origin(src1_clone, done);
  4736. }
  4737. ggml_vk_check_tensor(std::string(ggml_op_name(tensor->op)) + "->src1", src1_clone);
  4738. }
  4739. if (tensor->op == GGML_OP_MUL_MAT) {
  4740. tensor_clone = ggml_mul_mat(ggml_ctx, src0_clone, src1_clone);
  4741. } else if (tensor->op == GGML_OP_MUL) {
  4742. tensor_clone = ggml_mul(ggml_ctx, src0_clone, src1_clone);
  4743. } else if (tensor->op == GGML_OP_SCALE) {
  4744. tensor_clone = ggml_scale(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0]);
  4745. } else if (tensor->op == GGML_OP_SQR) {
  4746. tensor_clone = ggml_sqr(ggml_ctx, src0_clone);
  4747. } else if (tensor->op == GGML_OP_CLAMP) {
  4748. tensor_clone = ggml_clamp(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  4749. } else if (tensor->op == GGML_OP_ADD) {
  4750. tensor_clone = ggml_add(ggml_ctx, src0_clone, src1_clone);
  4751. } else if (tensor->op == GGML_OP_NORM) {
  4752. tensor_clone = ggml_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  4753. } else if (tensor->op == GGML_OP_RMS_NORM) {
  4754. tensor_clone = ggml_rms_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  4755. } else if (tensor->op == GGML_OP_SOFT_MAX) {
  4756. tensor_clone = ggml_soft_max(ggml_ctx, src0_clone);
  4757. } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
  4758. tensor_clone = ggml_diag_mask_inf(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  4759. } else if (tensor->op == GGML_OP_ROPE) {
  4760. const int n_dims = ((int32_t *) tensor->op_params)[1];
  4761. const int mode = ((int32_t *) tensor->op_params)[2];
  4762. const int n_ggml_ctx = ((int32_t *) tensor->op_params)[3];
  4763. const int n_orig_ggml_ctx = ((int32_t *) tensor->op_params)[4];
  4764. float freq_base = ((float *) tensor->op_params)[5];
  4765. float freq_scale = ((float *) tensor->op_params)[6];
  4766. float ext_factor = ((float *) tensor->op_params)[7];
  4767. float attn_factor = ((float *) tensor->op_params)[8];
  4768. float beta_fast = ((float *) tensor->op_params)[9];
  4769. float beta_slow = ((float *) tensor->op_params)[10];
  4770. tensor_clone = ggml_rope_custom(ggml_ctx, src0_clone, src1_clone, n_dims, mode, n_ggml_ctx, n_orig_ggml_ctx, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  4771. } else if (tensor->op == GGML_OP_UNARY) {
  4772. switch (ggml_get_unary_op(tensor)) {
  4773. case GGML_UNARY_OP_SILU:
  4774. tensor_clone = ggml_silu(ggml_ctx, src0_clone);
  4775. break;
  4776. case GGML_UNARY_OP_GELU:
  4777. tensor_clone = ggml_gelu(ggml_ctx, src0_clone);
  4778. break;
  4779. case GGML_UNARY_OP_RELU:
  4780. tensor_clone = ggml_relu(ggml_ctx, src0_clone);
  4781. break;
  4782. default:
  4783. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  4784. GGML_ASSERT(false);
  4785. }
  4786. } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
  4787. if (src1 == nullptr) {
  4788. tensor_clone = ggml_dup(ggml_ctx, src0_clone);
  4789. tensor_clone->type = tensor->type;
  4790. } else {
  4791. tensor_clone = ggml_cpy(ggml_ctx, src0_clone, src1_clone);
  4792. }
  4793. } else if (tensor->op == GGML_OP_CONT) {
  4794. tensor_clone = ggml_cont_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  4795. } else if (tensor->op == GGML_OP_RESHAPE) {
  4796. tensor_clone = ggml_reshape_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  4797. } else if (tensor->op == GGML_OP_VIEW) {
  4798. tensor_clone = ggml_view_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
  4799. } else if (tensor->op == GGML_OP_PERMUTE) {
  4800. int32_t * params = (int32_t *)tensor->op_params;
  4801. tensor_clone = ggml_permute(ggml_ctx, src0_clone, params[0], params[1], params[2], params[3]);
  4802. } else if (tensor->op == GGML_OP_TRANSPOSE) {
  4803. tensor_clone = ggml_transpose(ggml_ctx, src0_clone);
  4804. } else {
  4805. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  4806. GGML_ASSERT(false);
  4807. }
  4808. // Disable vulkan here to avoid the hooks in ggml.c
  4809. ctx->disable = true;
  4810. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  4811. ggml_build_forward_expand(cgraph, tensor_clone);
  4812. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 8);
  4813. ctx->disable = false;
  4814. ggml_vk_check_tensor(ggml_op_name(tensor->op), tensor_clone);
  4815. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  4816. ggml_vk_print_tensor(ctx, tensor_clone, "tensor_clone");
  4817. }
  4818. comp_size = ggml_nbytes(tensor_clone);
  4819. comp_result = malloc(comp_size);
  4820. memcpy(comp_result, tensor_clone->data, comp_size);
  4821. memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
  4822. if (src0 != nullptr) {
  4823. free(src0_buffer);
  4824. }
  4825. if (src1 != nullptr) {
  4826. free(src1_buffer);
  4827. }
  4828. ggml_free(ggml_ctx);
  4829. }
  4830. static void ggml_vk_check_results_1(ggml_backend_vk_context * ctx, ggml_compute_params * params, ggml_tensor * tensor) {
  4831. if (params->ith != 0) {
  4832. return;
  4833. }
  4834. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE || tensor->op == GGML_OP_TRANSPOSE) {
  4835. return;
  4836. }
  4837. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  4838. return;
  4839. }
  4840. ggml_tensor * src0 = tensor->src[0];
  4841. ggml_tensor * src1 = tensor->src[1];
  4842. void * tensor_data = tensor->data;
  4843. if (tensor->backend == GGML_BACKEND_TYPE_GPU) {
  4844. size_t tensor_size = ggml_nbytes(tensor);
  4845. tensor_data = malloc(tensor_size);
  4846. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  4847. vk_buffer buffer_gpu = extra->buffer_gpu.lock();
  4848. if (extra->offset + tensor_size >= buffer_gpu->size) {
  4849. tensor_size = buffer_gpu->size - (extra->offset);
  4850. }
  4851. ggml_vk_buffer_read(ctx, buffer_gpu, extra->offset, tensor_data, tensor_size);
  4852. }
  4853. float first_error_result = -1.0f;
  4854. float first_error_correct = -1.0f;
  4855. std::array<int, 4> first_error = { -1, -1, -1, -1 };
  4856. double avg_err = 0.0;
  4857. size_t counter = 0;
  4858. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  4859. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  4860. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  4861. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  4862. const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
  4863. float correct = 0.0f;
  4864. float result = 0.0f;
  4865. if (buffer_size_fit) {
  4866. if (tensor->type == GGML_TYPE_F32) {
  4867. correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  4868. result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  4869. } else if (tensor->type == GGML_TYPE_F16) {
  4870. correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  4871. result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  4872. } else {
  4873. std::cerr << "comp_size=" << comp_size << " but required is " << (i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]) << std::endl;
  4874. }
  4875. } else {
  4876. std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
  4877. GGML_ASSERT(false);
  4878. }
  4879. if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
  4880. std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
  4881. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->backend: " << tensor->backend << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  4882. if (src0 != nullptr) {
  4883. std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " backend=" << src0->backend << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  4884. }
  4885. if (src1 != nullptr) {
  4886. std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " backend=" << src1->backend << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  4887. }
  4888. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  4889. std::cerr << std::endl << "Result:" << std::endl;
  4890. ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
  4891. std::cerr << std::endl << "Correct:" << std::endl;
  4892. ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
  4893. std::cerr << std::endl;
  4894. std::vector<const ggml_tensor *> done;
  4895. ggml_vk_print_graph_origin(tensor, done);
  4896. GGML_ASSERT(false);
  4897. }
  4898. if (first_error[0] == -1 && std::fabs(correct - result) > 0.1f) {
  4899. first_error[0] = i0;
  4900. first_error[1] = i1;
  4901. first_error[2] = i2;
  4902. first_error[3] = i3;
  4903. first_error_result = result;
  4904. first_error_correct = correct;
  4905. }
  4906. // Special case, value is infinite, avoid NaN result in avg_err
  4907. // NaN also appears in results, if both are nan error is 0
  4908. if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
  4909. avg_err += std::fabs(correct - result);
  4910. }
  4911. counter++;
  4912. }
  4913. }
  4914. }
  4915. }
  4916. avg_err /= counter;
  4917. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  4918. std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  4919. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->backend: " << tensor->backend << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  4920. if (src0 != nullptr) {
  4921. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " backend=" << src0->backend << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  4922. }
  4923. if (src1 != nullptr) {
  4924. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " backend=" << src1->backend << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  4925. }
  4926. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  4927. std::cerr << std::endl << "Result:" << std::endl;
  4928. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  4929. std::cerr << std::endl << "Correct:" << std::endl;
  4930. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
  4931. std::cerr << std::endl;
  4932. std::cerr << std::endl << "Result:" << std::endl;
  4933. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 1, 0);
  4934. std::cerr << std::endl << "Correct:" << std::endl;
  4935. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 1, 0);
  4936. std::cerr << std::endl;
  4937. std::vector<const ggml_tensor *> done;
  4938. ggml_vk_print_graph_origin(tensor, done);
  4939. }
  4940. if (avg_err > 0.05 || std::isnan(avg_err)) {
  4941. std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  4942. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->backend: " << tensor->backend << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  4943. if (src0 != nullptr) {
  4944. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " backend=" << src0->backend << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  4945. }
  4946. if (src1 != nullptr) {
  4947. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " backend=" << src1->backend << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  4948. }
  4949. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  4950. std::cerr << std::endl << "Result:" << std::endl;
  4951. ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
  4952. std::cerr << std::endl << "Correct:" << std::endl;
  4953. ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
  4954. std::cerr << std::endl;
  4955. std::vector<const ggml_tensor *> done;
  4956. ggml_vk_print_graph_origin(tensor, done);
  4957. GGML_ASSERT(false);
  4958. } else {
  4959. std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " backend=" << tensor->backend << " avg_err=" << avg_err << std::endl;
  4960. }
  4961. free(comp_result);
  4962. comp_result = nullptr;
  4963. comp_size = 0;
  4964. if (tensor->backend == GGML_BACKEND_TYPE_GPU) {
  4965. free(tensor_data);
  4966. }
  4967. }
  4968. void ggml_vk_check_results_1_cpu_assist(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  4969. ggml_backend_vk_context * ctx = &vk_instance.contexts[0];
  4970. ggml_vk_check_results_0(ctx, params, tensor);
  4971. }
  4972. #endif