ggml-vulkan.cpp 415 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536
  1. #include "ggml-vulkan.h"
  2. #include <vulkan/vulkan_core.h>
  3. #if defined(GGML_VULKAN_RUN_TESTS) || defined(GGML_VULKAN_PERF)
  4. #include <chrono>
  5. #endif
  6. #include <vulkan/vulkan.hpp>
  7. #include <algorithm>
  8. #include <cmath>
  9. #include <iomanip>
  10. #include <iostream>
  11. #include <tuple>
  12. #include <vector>
  13. #include <sstream>
  14. #include <utility>
  15. #include <memory>
  16. #include <limits>
  17. #include <map>
  18. #include <unordered_map>
  19. #include <memory>
  20. #include <mutex>
  21. #include <future>
  22. #include <thread>
  23. #include "ggml-impl.h"
  24. #include "ggml-backend-impl.h"
  25. #include "ggml-vulkan-shaders.hpp"
  26. #define VK_API_VERSION VK_API_VERSION_1_2
  27. #define CEIL_DIV(M, N) (((M) + (N)-1) / (N))
  28. #define VK_VENDOR_ID_AMD 0x1002
  29. #define VK_VENDOR_ID_APPLE 0x106b
  30. #define VK_VENDOR_ID_INTEL 0x8086
  31. #define VK_VENDOR_ID_NVIDIA 0x10de
  32. #define VK_DEVICE_DESCRIPTOR_POOL_SIZE 32
  33. #define GGML_VK_MAX_NODES 8192
  34. #define MAX_VK_BUFFERS 256
  35. #ifndef K_QUANTS_PER_ITERATION
  36. #define K_QUANTS_PER_ITERATION 1
  37. #else
  38. static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
  39. #endif
  40. #define VK_CHECK(err, msg) \
  41. do { \
  42. vk::Result err_ = (err); \
  43. if (err_ != vk::Result::eSuccess) { \
  44. fprintf(stderr, "ggml_vulkan: %s error %s at %s:%d\n", \
  45. #err, to_string(err_).c_str(), __FILE__, __LINE__); \
  46. exit(1); \
  47. } \
  48. } while (0)
  49. #ifdef GGML_VULKAN_DEBUG
  50. #define VK_LOG_DEBUG(msg) std::cerr << msg << std::endl
  51. #else
  52. #define VK_LOG_DEBUG(msg) ((void) 0)
  53. #endif // GGML_VULKAN_DEBUG
  54. struct ggml_backend_vk_context;
  55. struct vk_queue {
  56. uint32_t queue_family_index;
  57. vk::Queue queue;
  58. vk::CommandPool pool;
  59. uint32_t cmd_buffer_idx;
  60. std::vector<vk::CommandBuffer> cmd_buffers;
  61. vk::PipelineStageFlags stage_flags;
  62. bool transfer_only;
  63. };
  64. struct vk_pipeline_struct {
  65. std::string name;
  66. vk::ShaderModule shader_module;
  67. vk::DescriptorSetLayout dsl;
  68. std::vector<vk::DescriptorPool> descriptor_pools;
  69. std::vector<vk::DescriptorSet> descriptor_sets;
  70. uint32_t descriptor_set_idx;
  71. vk::PipelineLayout layout;
  72. vk::Pipeline pipeline;
  73. uint32_t push_constant_size;
  74. uint32_t parameter_count;
  75. std::array<uint32_t, 3> wg_denoms;
  76. uint32_t align;
  77. };
  78. typedef std::shared_ptr<vk_pipeline_struct> vk_pipeline;
  79. typedef std::weak_ptr<vk_pipeline_struct> vk_pipeline_ref;
  80. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline);
  81. struct vk_matmul_pipeline_struct {
  82. vk_pipeline l, m, s;
  83. vk_pipeline a_l, a_m, a_s;
  84. };
  85. typedef std::shared_ptr<vk_matmul_pipeline_struct> vk_matmul_pipeline;
  86. struct vk_device_struct;
  87. typedef std::shared_ptr<vk_device_struct> vk_device;
  88. typedef std::weak_ptr<vk_device_struct> vk_device_ref;
  89. struct vk_buffer_struct;
  90. typedef std::shared_ptr<vk_buffer_struct> vk_buffer;
  91. typedef std::weak_ptr<vk_buffer_struct> vk_buffer_ref;
  92. struct ggml_backend_vk_buffer_type_context {
  93. std::string name;
  94. vk_device device;
  95. };
  96. static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft);
  97. static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size);
  98. static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft);
  99. static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft);
  100. static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor);
  101. static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
  102. /* .get_name = */ ggml_backend_vk_buffer_type_name,
  103. /* .alloc_buffer = */ ggml_backend_vk_buffer_type_alloc_buffer,
  104. /* .get_alignment = */ ggml_backend_vk_buffer_type_get_alignment,
  105. /* .get_max_size = */ ggml_backend_vk_buffer_type_get_max_size,
  106. /* .get_alloc_size = */ ggml_backend_vk_buffer_type_get_alloc_size,
  107. /* .is_host = */ NULL,
  108. };
  109. #ifdef GGML_VULKAN_MEMORY_DEBUG
  110. class vk_memory_logger;
  111. #endif
  112. #ifdef GGML_VULKAN_PERF
  113. class vk_perf_logger;
  114. #endif
  115. static void ggml_vk_destroy_buffer(vk_buffer& buf);
  116. struct vk_device_struct {
  117. std::mutex mutex;
  118. vk::PhysicalDevice physical_device;
  119. vk::PhysicalDeviceProperties properties;
  120. std::string name;
  121. uint64_t max_memory_allocation_size;
  122. bool fp16;
  123. vk::Device device;
  124. uint32_t vendor_id;
  125. vk_queue compute_queue;
  126. vk_queue transfer_queue;
  127. bool single_queue;
  128. uint32_t subgroup_size;
  129. bool uma;
  130. size_t idx;
  131. vk_matmul_pipeline pipeline_matmul_f32;
  132. vk_matmul_pipeline pipeline_matmul_f32_f16;
  133. vk_matmul_pipeline pipeline_matmul_f16;
  134. vk_matmul_pipeline pipeline_matmul_f16_f32;
  135. vk_pipeline pipeline_matmul_split_k_reduce;
  136. vk_matmul_pipeline pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT];
  137. vk_matmul_pipeline pipeline_matmul_id_f32;
  138. vk_matmul_pipeline pipeline_matmul_id_f16;
  139. vk_matmul_pipeline pipeline_matmul_id_f16_f32;
  140. vk_matmul_pipeline pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT];
  141. vk_pipeline pipeline_dequant[GGML_TYPE_COUNT];
  142. vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT];
  143. vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT];
  144. vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT];
  145. vk_pipeline pipeline_mul_mat_vec_p021_f16_f32;
  146. vk_pipeline pipeline_mul_mat_vec_nc_f16_f32;
  147. vk_pipeline pipeline_get_rows[GGML_TYPE_COUNT];
  148. vk_pipeline pipeline_get_rows_f32[GGML_TYPE_COUNT];
  149. vk_pipeline pipeline_acc_f32;
  150. vk_pipeline pipeline_add_f32, pipeline_add_f16_f32_f16;
  151. vk_pipeline pipeline_mul_f32;
  152. vk_pipeline pipeline_div_f32;
  153. vk_pipeline pipeline_concat_f32, pipeline_concat_f16, pipeline_concat_i32;
  154. vk_pipeline pipeline_upscale_f32;
  155. vk_pipeline pipeline_scale_f32;
  156. vk_pipeline pipeline_sqr_f32;
  157. vk_pipeline pipeline_sin_f32;
  158. vk_pipeline pipeline_cos_f32;
  159. vk_pipeline pipeline_clamp_f32;
  160. vk_pipeline pipeline_pad_f32;
  161. vk_pipeline pipeline_repeat_f32;
  162. vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
  163. vk_pipeline pipeline_norm_f32;
  164. vk_pipeline pipeline_group_norm_f32;
  165. vk_pipeline pipeline_rms_norm_f32;
  166. vk_pipeline pipeline_gelu_f32;
  167. vk_pipeline pipeline_gelu_quick_f32;
  168. vk_pipeline pipeline_silu_f32;
  169. vk_pipeline pipeline_relu_f32;
  170. vk_pipeline pipeline_leaky_relu_f32;
  171. vk_pipeline pipeline_tanh_f32;
  172. vk_pipeline pipeline_diag_mask_inf_f32;
  173. vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
  174. vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16;
  175. vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
  176. vk_pipeline pipeline_argsort_f32;
  177. vk_pipeline pipeline_sum_rows_f32;
  178. vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16;
  179. vk_pipeline pipeline_timestep_embedding_f32;
  180. std::unordered_map<std::string, vk_pipeline_ref> pipelines;
  181. std::unordered_map<std::string, uint64_t> pipeline_descriptor_set_requirements;
  182. std::vector<std::tuple<void*, size_t, vk_buffer>> pinned_memory;
  183. vk::Fence fence;
  184. vk_buffer sync_staging;
  185. ggml_backend_buffer_type buffer_type;
  186. #ifdef GGML_VULKAN_MEMORY_DEBUG
  187. std::unique_ptr<vk_memory_logger> memory_logger;
  188. #endif
  189. #ifdef GGML_VULKAN_PERF
  190. std::unique_ptr<vk_perf_logger> perf_logger;
  191. #endif
  192. ~vk_device_struct() {
  193. VK_LOG_DEBUG("destroy device " << name);
  194. device.destroyFence(fence);
  195. ggml_vk_destroy_buffer(sync_staging);
  196. device.destroyCommandPool(compute_queue.pool);
  197. if (!single_queue) {
  198. device.destroyCommandPool(transfer_queue.pool);
  199. }
  200. for (auto& pipeline : pipelines) {
  201. if (pipeline.second.expired()) {
  202. continue;
  203. }
  204. vk_pipeline pl = pipeline.second.lock();
  205. ggml_vk_destroy_pipeline(device, pl);
  206. }
  207. pipelines.clear();
  208. device.destroy();
  209. }
  210. };
  211. struct vk_buffer_struct {
  212. vk::Buffer buffer = VK_NULL_HANDLE;
  213. vk::DeviceMemory device_memory = VK_NULL_HANDLE;
  214. vk::MemoryPropertyFlags memory_property_flags;
  215. void * ptr;
  216. size_t size = 0;
  217. vk_device device;
  218. ~vk_buffer_struct() {
  219. if (size == 0) {
  220. return;
  221. }
  222. VK_LOG_DEBUG("~vk_buffer_struct(" << buffer << ", " << size << ")");
  223. device->device.freeMemory(device_memory);
  224. device->device.destroyBuffer(buffer);
  225. }
  226. };
  227. struct vk_subbuffer {
  228. vk_buffer buffer;
  229. uint64_t offset;
  230. uint64_t size;
  231. operator vk::DescriptorBufferInfo() const {
  232. return { buffer->buffer, offset, size };
  233. }
  234. };
  235. struct vk_semaphore {
  236. vk::Semaphore s;
  237. uint64_t value;
  238. };
  239. struct vk_submission {
  240. vk::CommandBuffer buffer;
  241. std::vector<vk_semaphore> wait_semaphores;
  242. std::vector<vk_semaphore> signal_semaphores;
  243. };
  244. typedef std::vector<vk_submission> vk_sequence;
  245. struct vk_mat_mat_push_constants {
  246. uint32_t M; uint32_t N; uint32_t K;
  247. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  248. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  249. uint32_t k_split;
  250. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  251. };
  252. struct vk_mat_vec_push_constants {
  253. uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  254. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  255. uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3;
  256. };
  257. struct vk_mat_mat_id_push_constants {
  258. uint32_t M; uint32_t N; uint32_t K;
  259. uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  260. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  261. uint32_t nei0; uint32_t nei1; uint32_t nbi1; uint32_t ne11;
  262. };
  263. struct vk_mat_vec_id_push_constants {
  264. uint32_t ncols; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d;
  265. uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d;
  266. uint32_t nei0; uint32_t ne11;
  267. };
  268. struct vk_op_push_constants {
  269. uint32_t KX;
  270. uint32_t KY;
  271. float param1;
  272. float param2;
  273. };
  274. struct vk_op_unary_push_constants {
  275. uint32_t ne;
  276. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  277. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  278. uint32_t d_offset;
  279. float param1; float param2;
  280. };
  281. struct vk_op_binary_push_constants {
  282. uint32_t ne;
  283. uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  284. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13;
  285. uint32_t ne20; uint32_t ne21; uint32_t ne22; uint32_t ne23; uint32_t nb20; uint32_t nb21; uint32_t nb22; uint32_t nb23;
  286. uint32_t d_offset;
  287. float param1; float param2; int32_t param3;
  288. };
  289. struct vk_op_diag_mask_push_constants {
  290. uint32_t ncols;
  291. uint32_t rows_per_channel;
  292. int32_t n_past;
  293. };
  294. struct vk_op_rope_push_constants {
  295. uint32_t ncols;
  296. uint32_t n_dims;
  297. float freq_scale;
  298. uint32_t p_delta_rows;
  299. float freq_base;
  300. float ext_factor;
  301. float attn_factor;
  302. float corr_dims[2];
  303. float theta_scale;
  304. uint32_t has_ff;
  305. };
  306. struct vk_op_soft_max_push_constants {
  307. uint32_t KX;
  308. uint32_t KY;
  309. float scale;
  310. float max_bias;
  311. float m0;
  312. float m1;
  313. uint32_t n_head_log2;
  314. };
  315. struct vk_op_argsort_push_constants {
  316. uint32_t ncols;
  317. uint32_t ncols_pad;
  318. int32_t order;
  319. };
  320. struct vk_op_im2col_push_constants {
  321. uint32_t batch_offset; uint32_t offset_delta;
  322. uint32_t IC;
  323. uint32_t IW; uint32_t IH;
  324. uint32_t OW; uint32_t OH;
  325. uint32_t KW; uint32_t KH;
  326. uint32_t pelements;
  327. uint32_t CHW;
  328. int32_t s0; int32_t s1;
  329. int32_t p0; int32_t p1;
  330. int32_t d0; int32_t d1;
  331. };
  332. struct vk_op_timestep_embedding_push_constants {
  333. uint32_t nb1;
  334. uint32_t dim;
  335. uint32_t max_period;
  336. };
  337. // Allow pre-recording command buffers
  338. struct vk_staging_memcpy {
  339. vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
  340. void * dst;
  341. const void * src;
  342. size_t n;
  343. };
  344. struct vk_op_upscale_push_constants {
  345. uint32_t ne; uint32_t d_offset;
  346. uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
  347. uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13;
  348. float sf0; float sf1; float sf2; float sf3;
  349. };
  350. struct vk_context_struct {
  351. vk_submission * s;
  352. std::vector<vk_sequence> seqs;
  353. int exit_tensor_idx;
  354. std::vector<vk_staging_memcpy> in_memcpys;
  355. std::vector<vk_staging_memcpy> out_memcpys;
  356. vk_queue * q;
  357. };
  358. typedef std::shared_ptr<vk_context_struct> vk_context;
  359. typedef std::weak_ptr<vk_context_struct> vk_context_ref;
  360. struct ggml_vk_garbage_collector {
  361. std::vector<vk_semaphore> tl_semaphores;
  362. std::vector<vk_semaphore> semaphores;
  363. std::vector<vk::Event> events;
  364. std::vector<vk_buffer> temp_buffers;
  365. std::vector<vk_context> contexts;
  366. };
  367. #if defined(GGML_VULKAN_MEMORY_DEBUG) || defined(GGML_VULKAN_DEBUG)
  368. #define VK_LOG_MEMORY(msg) std::cerr << "ggml_vulkan memory: " << msg << std::endl
  369. static std::string format_size(size_t size) {
  370. const size_t kib = 1024;
  371. const size_t mib = kib * 1024;
  372. const size_t gib = mib * 1024;
  373. std::ostringstream oss;
  374. oss << std::fixed << std::setprecision(2);
  375. if (size >= gib) {
  376. oss << static_cast<double>(size) / gib << " GiB";
  377. } else if (size >= mib) {
  378. oss << static_cast<double>(size) / mib << " MiB";
  379. } else if (size >= kib) {
  380. oss << static_cast<double>(size) / kib << " KiB";
  381. } else {
  382. oss << size << " B";
  383. }
  384. return oss.str();
  385. }
  386. static std::mutex log_mutex;
  387. class vk_memory_logger {
  388. public:
  389. vk_memory_logger(): total_device(0), total_host(0) {}
  390. void log_allocation(vk_buffer_ref buf_ref, size_t size);
  391. void log_deallocation(vk_buffer_ref buf_ref);
  392. private:
  393. std::map<vk::Buffer, size_t> allocations; // Track allocations
  394. size_t total_device;
  395. size_t total_host;
  396. };
  397. #else
  398. #define VK_LOG_MEMORY(msg) ((void) 0)
  399. #endif // GGML_VULKAN_MEMORY_DEBUG
  400. #if defined(GGML_VULKAN_PERF)
  401. class vk_perf_logger {
  402. public:
  403. void print_timings() {
  404. std::cerr << "----------------\nVulkan Timings:" << std::endl;
  405. for (const auto& t : timings) {
  406. uint64_t total = 0;
  407. for (const auto& time : t.second) {
  408. total += time;
  409. }
  410. std::cerr << t.first << ": " << t.second.size() << " x " << (total / t.second.size() / 1000.0) << " ms" << std::endl;
  411. }
  412. timings.clear();
  413. }
  414. void log_timing(const ggml_tensor * node, uint64_t time) {
  415. if (node->op == GGML_OP_UNARY) {
  416. timings[ggml_unary_op_name(ggml_get_unary_op(node))].push_back(time);
  417. return;
  418. }
  419. if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
  420. const uint64_t m = node->src[0]->ne[1];
  421. const uint64_t n = node->src[1]->ne[1];
  422. const uint64_t k = node->src[1]->ne[0];
  423. std::string name = ggml_op_name(node->op);
  424. if (n == 1) {
  425. name += "_VEC m=" + std::to_string(m) + " k=" + std::to_string(k);
  426. } else {
  427. name += " m=" + std::to_string(m) + " n=" + std::to_string(n) + " k=" + std::to_string(k);
  428. }
  429. timings[name].push_back(time);
  430. return;
  431. }
  432. timings[ggml_op_name(node->op)].push_back(time);
  433. }
  434. private:
  435. std::map<std::string, std::vector<uint64_t>> timings;
  436. };
  437. #endif // GGML_VULKAN_PERF
  438. struct ggml_backend_vk_context {
  439. std::string name;
  440. vk_device device;
  441. size_t semaphore_idx, event_idx;
  442. ggml_vk_garbage_collector gc;
  443. size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k;
  444. vk_buffer prealloc_x, prealloc_y, prealloc_split_k;
  445. vk::Fence fence;
  446. vk_buffer buffer_pool[MAX_VK_BUFFERS];
  447. vk_context_ref compute_ctx;
  448. vk_context_ref transfer_ctx;
  449. std::vector<vk_context_ref> tensor_ctxs;
  450. };
  451. static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT
  452. static uint64_t vk_tensor_offset(const ggml_tensor * tensor) {
  453. if (tensor->view_src) {
  454. return (uint8_t *) tensor->view_src->data - (uint8_t *) vk_ptr_base;
  455. }
  456. return (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base;
  457. }
  458. struct ggml_backend_vk_buffer_context {
  459. vk_device_ref device;
  460. vk_buffer dev_buffer;
  461. std::string name;
  462. ggml_backend_vk_buffer_context(vk_device_ref device, vk_buffer&& dev_buffer, std::string& name) :
  463. device(device),
  464. dev_buffer(dev_buffer),
  465. name(name) {
  466. }
  467. ~ggml_backend_vk_buffer_context() {
  468. ggml_vk_destroy_buffer(dev_buffer);
  469. }
  470. };
  471. #ifdef GGML_VULKAN_MEMORY_DEBUG
  472. void vk_memory_logger::log_allocation(vk_buffer_ref buf_ref, size_t size) {
  473. std::lock_guard<std::mutex> guard(log_mutex);
  474. vk_buffer buf = buf_ref.lock();
  475. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  476. const std::string type = device ? "device" : "host";
  477. allocations[buf->buffer] = size;
  478. total_device += device ? size : 0;
  479. total_host += device ? 0 : size;
  480. VK_LOG_MEMORY(buf->device->name << ": +" << format_size(size) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  481. }
  482. void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) {
  483. if (buf_ref.expired() || buf_ref.lock()->size == 0) {
  484. return;
  485. }
  486. std::lock_guard<std::mutex> guard(log_mutex);
  487. vk_buffer buf = buf_ref.lock();
  488. const bool device = bool(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eDeviceLocal);
  489. std::string type = device ? "device" : "host";
  490. auto it = allocations.find(buf->buffer);
  491. total_device -= device ? it->second : 0;
  492. total_host -= device ? 0 : it->second;
  493. if (it != allocations.end()) {
  494. VK_LOG_MEMORY(buf->device->name << ": -" << format_size(it->second) << " " << type << " at " << buf->buffer << ". Total device: " << format_size(total_device) << ", total host: " << format_size(total_host));
  495. allocations.erase(it);
  496. } else {
  497. VK_LOG_MEMORY("ERROR " << buf->device->name << ": Attempted to deallocate unknown " << type << " memory at " << buf->buffer);
  498. }
  499. }
  500. #endif // GGML_VULKAN_MEMORY_DEBUG
  501. struct vk_instance_t {
  502. vk::Instance instance;
  503. std::vector<size_t> device_indices;
  504. vk_device devices[GGML_VK_MAX_DEVICES];
  505. };
  506. static bool vk_instance_initialized = false;
  507. static vk_instance_t vk_instance;
  508. #ifdef GGML_VULKAN_CHECK_RESULTS
  509. static size_t vk_skip_checks;
  510. static size_t vk_output_tensor;
  511. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name);
  512. static void ggml_vk_check_results_0(ggml_tensor * tensor);
  513. static void ggml_vk_check_results_1(ggml_tensor * tensor);
  514. #endif
  515. typedef void (*ggml_vk_func_t)(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  516. static void ggml_backend_vk_free(ggml_backend_t backend);
  517. // variables to track number of compiles in progress
  518. static uint32_t compile_count = 0;
  519. static std::mutex compile_count_mutex;
  520. static std::condition_variable compile_count_cond;
  521. static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, const std::string name, size_t spv_size, const void* spv_data, const std::string entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t> specialization_constants, uint32_t align) {
  522. VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << ")");
  523. GGML_ASSERT(parameter_count > 0);
  524. GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT
  525. pipeline = std::make_shared<vk_pipeline_struct>();
  526. pipeline->name = name;
  527. pipeline->parameter_count = parameter_count;
  528. pipeline->push_constant_size = push_constant_size;
  529. pipeline->wg_denoms = wg_denoms;
  530. pipeline->align = align;
  531. vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast<const uint32_t *>(spv_data));
  532. pipeline->shader_module = device->device.createShaderModule(shader_module_create_info);
  533. std::vector<vk::DescriptorSetLayoutBinding> dsl_binding;
  534. std::vector<vk::DescriptorBindingFlags> dsl_binding_flags;
  535. for (uint32_t i = 0; i < parameter_count; i++) {
  536. dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute});
  537. dsl_binding_flags.push_back({});
  538. }
  539. vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags };
  540. vk::PushConstantRange pcr(
  541. vk::ShaderStageFlagBits::eCompute,
  542. 0,
  543. pipeline->push_constant_size
  544. );
  545. vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info(
  546. {},
  547. dsl_binding);
  548. descriptor_set_layout_create_info.setPNext(&dslbfci);
  549. pipeline->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info);
  550. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count * VK_DEVICE_DESCRIPTOR_POOL_SIZE);
  551. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size);
  552. pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
  553. pipeline->descriptor_set_idx = 0;
  554. vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), pipeline->dsl, pcr);
  555. pipeline->layout = device->device.createPipelineLayout(pipeline_layout_create_info);
  556. std::vector<vk::SpecializationMapEntry> specialization_entries(specialization_constants.size());
  557. for (size_t i = 0; i < specialization_constants.size(); i++) {
  558. specialization_entries[i].constantID = i;
  559. specialization_entries[i].offset = i * sizeof(uint32_t);
  560. specialization_entries[i].size = sizeof(uint32_t);
  561. }
  562. vk::SpecializationInfo specialization_info(
  563. specialization_entries.size(),
  564. specialization_entries.data(),
  565. specialization_constants.size() * sizeof(uint32_t),
  566. specialization_constants.data()
  567. );
  568. vk::PipelineShaderStageCreateInfo pipeline_shader_create_info(
  569. vk::PipelineShaderStageCreateFlags(),
  570. vk::ShaderStageFlagBits::eCompute,
  571. pipeline->shader_module,
  572. entrypoint.c_str(),
  573. &specialization_info);
  574. vk::ComputePipelineCreateInfo compute_pipeline_create_info(
  575. vk::PipelineCreateFlags(),
  576. pipeline_shader_create_info,
  577. pipeline->layout);
  578. pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value;
  579. {
  580. std::lock_guard<std::mutex> guard(device->mutex);
  581. device->pipelines.insert({ pipeline->name, pipeline });
  582. }
  583. {
  584. std::lock_guard<std::mutex> guard(compile_count_mutex);
  585. assert(compile_count > 0);
  586. compile_count--;
  587. }
  588. compile_count_cond.notify_all();
  589. }
  590. static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {
  591. VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")");
  592. for (auto& pool : pipeline->descriptor_pools) {
  593. device.destroyDescriptorPool(pool);
  594. }
  595. pipeline->descriptor_pools.clear();
  596. pipeline->descriptor_sets.clear();
  597. pipeline->descriptor_set_idx = 0;
  598. device.destroyDescriptorSetLayout(pipeline->dsl);
  599. device.destroyPipelineLayout(pipeline->layout);
  600. device.destroyShaderModule(pipeline->shader_module);
  601. device.destroyPipeline(pipeline->pipeline);
  602. }
  603. static void ggml_pipeline_request_descriptor_sets(vk_device& device, vk_pipeline& pipeline, uint32_t n) {
  604. VK_LOG_DEBUG("ggml_pipeline_request_descriptor_sets(" << pipeline->name << ", " << n << ")");
  605. device->pipeline_descriptor_set_requirements[pipeline->name] += n;
  606. }
  607. static void ggml_pipeline_allocate_descriptor_sets(vk_device& device) {
  608. std::lock_guard<std::mutex> guard(device->mutex);
  609. for (auto& pair : device->pipeline_descriptor_set_requirements) {
  610. vk_pipeline pipeline = device->pipelines.at(pair.first).lock();
  611. const uint64_t n = pair.second;
  612. VK_LOG_DEBUG("ggml_pipeline_allocate_descriptor_sets(" << pipeline->name << ", " << n << ")");
  613. if (pipeline->descriptor_sets.size() >= pipeline->descriptor_set_idx + n) {
  614. // Enough descriptors are available
  615. continue;
  616. }
  617. uint32_t to_alloc = pipeline->descriptor_set_idx + n - pipeline->descriptor_sets.size();
  618. uint32_t pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE - pipeline->descriptor_sets.size() % VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  619. uint32_t pool_idx = pipeline->descriptor_sets.size() / VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  620. while (to_alloc > 0) {
  621. const uint32_t alloc_count = std::min(pool_remaining, to_alloc);
  622. to_alloc -= alloc_count;
  623. pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE;
  624. if (pool_idx >= pipeline->descriptor_pools.size()) {
  625. vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count * VK_DEVICE_DESCRIPTOR_POOL_SIZE);
  626. vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size);
  627. pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info));
  628. }
  629. std::vector<vk::DescriptorSetLayout> layouts(alloc_count);
  630. for (uint32_t i = 0; i < alloc_count; i++) {
  631. layouts[i] = pipeline->dsl;
  632. }
  633. vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[pool_idx], alloc_count, layouts.data());
  634. std::vector<vk::DescriptorSet> sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info);
  635. pipeline->descriptor_sets.insert(pipeline->descriptor_sets.end(), sets.begin(), sets.end());
  636. pool_idx++;
  637. }
  638. }
  639. }
  640. static void ggml_pipeline_cleanup(vk_pipeline& pipeline) {
  641. VK_LOG_DEBUG("ggml_pipeline_cleanup(" << pipeline->name << ")");
  642. pipeline->descriptor_set_idx = 0;
  643. }
  644. static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_queue& q) {
  645. VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()");
  646. std::lock_guard<std::mutex> guard(device->mutex);
  647. if (q.cmd_buffers.size() > q.cmd_buffer_idx) {
  648. // Reuse command buffer
  649. return q.cmd_buffers[q.cmd_buffer_idx++];
  650. }
  651. vk::CommandBufferAllocateInfo command_buffer_alloc_info(
  652. q.pool,
  653. vk::CommandBufferLevel::ePrimary,
  654. 1);
  655. const std::vector<vk::CommandBuffer> cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info);
  656. auto buf = cmd_buffers.front();
  657. q.cmd_buffers.push_back(buf);
  658. q.cmd_buffer_idx++;
  659. return buf;
  660. }
  661. static vk_submission ggml_vk_create_submission(vk_device& device, vk_queue& q, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  662. VK_LOG_DEBUG("ggml_vk_create_submission()");
  663. vk_submission s;
  664. s.buffer = ggml_vk_create_cmd_buffer(device, q);
  665. s.wait_semaphores = std::move(wait_semaphores);
  666. s.signal_semaphores = std::move(signal_semaphores);
  667. return s;
  668. }
  669. static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) {
  670. if (ctx->seqs.empty()) {
  671. if (fence) {
  672. ctx->q->queue.submit({}, fence);
  673. }
  674. return;
  675. }
  676. VK_LOG_DEBUG("ggml_vk_submit(" << ctx << ", " << fence << ")");
  677. std::vector<std::vector<uint64_t>> tl_wait_vals;
  678. std::vector<std::vector<uint64_t>> tl_signal_vals;
  679. std::vector<std::vector<vk::Semaphore>> tl_wait_semaphores;
  680. std::vector<std::vector<vk::Semaphore>> tl_signal_semaphores;
  681. std::vector<vk::TimelineSemaphoreSubmitInfo> tl_submit_infos;
  682. std::vector<vk::SubmitInfo> submit_infos;
  683. int idx = -1;
  684. std::vector<std::vector<vk::PipelineStageFlags>> stage_flags;
  685. size_t reserve = 0;
  686. for (const auto& sequence : ctx->seqs) {
  687. reserve += sequence.size();
  688. }
  689. // Pre-reserve vectors to prevent reallocation, which invalidates pointers
  690. tl_wait_semaphores.reserve(reserve);
  691. tl_wait_vals.reserve(reserve);
  692. tl_signal_semaphores.reserve(reserve);
  693. tl_signal_vals.reserve(reserve);
  694. tl_submit_infos.reserve(reserve);
  695. submit_infos.reserve(reserve);
  696. stage_flags.reserve(reserve);
  697. for (const auto& sequence : ctx->seqs) {
  698. for (const auto& submission : sequence) {
  699. stage_flags.push_back({});
  700. idx++;
  701. tl_wait_vals.push_back({});
  702. tl_wait_semaphores.push_back({});
  703. tl_signal_vals.push_back({});
  704. tl_signal_semaphores.push_back({});
  705. for (size_t i = 0; i < submission.wait_semaphores.size(); i++) {
  706. stage_flags[idx].push_back(ctx->q->stage_flags);
  707. tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value);
  708. tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s);
  709. }
  710. for (size_t i = 0; i < submission.signal_semaphores.size(); i++) {
  711. tl_signal_vals[idx].push_back(submission.signal_semaphores[i].value);
  712. tl_signal_semaphores[idx].push_back(submission.signal_semaphores[i].s);
  713. }
  714. tl_submit_infos.push_back({
  715. (uint32_t) submission.wait_semaphores.size(),
  716. tl_wait_vals[idx].data(),
  717. (uint32_t) submission.signal_semaphores.size(),
  718. tl_signal_vals[idx].data(),
  719. });
  720. tl_submit_infos[idx].sType = vk::StructureType::eTimelineSemaphoreSubmitInfo;
  721. tl_submit_infos[idx].pNext = nullptr;
  722. vk::SubmitInfo si{
  723. (uint32_t) submission.wait_semaphores.size(),
  724. tl_wait_semaphores[idx].data(),
  725. stage_flags[idx].data(),
  726. 1,
  727. &submission.buffer,
  728. (uint32_t) submission.signal_semaphores.size(),
  729. tl_signal_semaphores[idx].data(),
  730. };
  731. si.setPNext(&tl_submit_infos[idx]);
  732. submit_infos.push_back(si);
  733. }
  734. }
  735. ctx->q->queue.submit(submit_infos, fence);
  736. ctx->seqs.clear();
  737. }
  738. static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyProperties>& queue_family_props, const vk::QueueFlags& required, const vk::QueueFlags& avoid, int32_t compute_index, uint32_t min_num_queues) {
  739. VK_LOG_DEBUG("ggml_vk_find_queue_family_index()");
  740. const uint32_t qfsize = queue_family_props.size();
  741. // Try with avoid preferences first
  742. for (uint32_t i = 0; i < qfsize; i++) {
  743. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required && !(queue_family_props[i].queueFlags & avoid)) {
  744. return i;
  745. }
  746. }
  747. // Fall back to only required
  748. for (size_t i = 0; i < qfsize; i++) {
  749. if (queue_family_props[i].queueCount >= min_num_queues && (compute_index < 0 || i != (uint32_t) compute_index) && queue_family_props[i].queueFlags & required) {
  750. return i;
  751. }
  752. }
  753. // Fall back to reusing compute queue
  754. for (size_t i = 0; i < qfsize; i++) {
  755. if (queue_family_props[i].queueCount >= min_num_queues && queue_family_props[i].queueFlags & required) {
  756. return i;
  757. }
  758. }
  759. // Fall back to ignoring min_num_queries
  760. for (size_t i = 0; i < qfsize; i++) {
  761. if (queue_family_props[i].queueFlags & required) {
  762. return i;
  763. }
  764. }
  765. // All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations.
  766. // Thus, if the capabilities of a queue family include VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT, then reporting the VK_QUEUE_TRANSFER_BIT capability separately for that queue family is optional.
  767. if (compute_index >= 0) {
  768. return compute_index;
  769. }
  770. std::cerr << "ggml_vulkan: No suitable queue family index found." << std::endl;
  771. for(auto &q_family : queue_family_props) {
  772. std::cerr << "Queue number: " + std::to_string(q_family.queueCount) << " flags: " + to_string(q_family.queueFlags) << std::endl;
  773. }
  774. abort();
  775. }
  776. static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags, bool transfer_only) {
  777. VK_LOG_DEBUG("ggml_vk_create_queue()");
  778. std::lock_guard<std::mutex> guard(device->mutex);
  779. q.queue_family_index = queue_family_index;
  780. q.transfer_only = transfer_only;
  781. vk::CommandPoolCreateInfo command_pool_create_info_compute(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), queue_family_index);
  782. q.pool = device->device.createCommandPool(command_pool_create_info_compute);
  783. q.cmd_buffer_idx = 0;
  784. q.queue = device->device.getQueue(queue_family_index, queue_index);
  785. q.stage_flags = stage_flags;
  786. }
  787. static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_queue& q) {
  788. vk_context result = std::make_shared<vk_context_struct>();
  789. VK_LOG_DEBUG("ggml_vk_create_context(" << result << ")");
  790. ctx->gc.contexts.emplace_back(result);
  791. result->q = &q;
  792. return result;
  793. }
  794. static vk_context ggml_vk_create_temporary_context(vk_queue& q) {
  795. vk_context result = std::make_shared<vk_context_struct>();
  796. VK_LOG_DEBUG("ggml_vk_create_temporary_context(" << result << ")");
  797. result->q = &q;
  798. return result;
  799. }
  800. static vk_semaphore * ggml_vk_create_binary_semaphore(ggml_backend_vk_context * ctx) {
  801. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  802. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eBinary, 0 };
  803. vk::SemaphoreCreateInfo ci{};
  804. ci.setPNext(&tci);
  805. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  806. ctx->gc.semaphores.push_back({ semaphore, 0 });
  807. return &ctx->gc.semaphores[ctx->gc.semaphores.size() - 1];
  808. }
  809. static vk_semaphore * ggml_vk_create_timeline_semaphore(ggml_backend_vk_context * ctx) {
  810. VK_LOG_DEBUG("ggml_vk_create_timeline_semaphore()");
  811. if (ctx->semaphore_idx >= ctx->gc.tl_semaphores.size()) {
  812. vk::SemaphoreTypeCreateInfo tci{ vk::SemaphoreType::eTimeline, 0 };
  813. vk::SemaphoreCreateInfo ci{};
  814. ci.setPNext(&tci);
  815. vk::Semaphore semaphore = ctx->device->device.createSemaphore(ci);
  816. ctx->gc.tl_semaphores.push_back({ semaphore, 0 });
  817. }
  818. return &ctx->gc.tl_semaphores[ctx->semaphore_idx++];
  819. }
  820. static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) {
  821. if (ctx->event_idx >= ctx->gc.events.size()) {
  822. ctx->gc.events.push_back(ctx->device->device.createEvent({}));
  823. }
  824. return ctx->gc.events[ctx->event_idx++];
  825. }
  826. static void ggml_vk_queue_cleanup(vk_device& device, vk_queue& q) {
  827. VK_LOG_DEBUG("ggml_vk_queue_cleanup()");
  828. std::lock_guard<std::mutex> guard(device->mutex);
  829. // Requires command buffers to be done
  830. device->device.resetCommandPool(q.pool);
  831. q.cmd_buffer_idx = 0;
  832. }
  833. static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
  834. for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
  835. vk::MemoryType memory_type = mem_props->memoryTypes[i];
  836. if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
  837. (flags & memory_type.propertyFlags) == flags &&
  838. mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
  839. return static_cast<int32_t>(i);
  840. }
  841. }
  842. return UINT32_MAX;
  843. }
  844. static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  845. VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags) << ", " << to_string(fallback_flags) << ")");
  846. if (size > device->max_memory_allocation_size) {
  847. throw vk::OutOfDeviceMemoryError("Requested buffer size exceeds device memory allocation limit");
  848. }
  849. std::lock_guard<std::mutex> guard(device->mutex);
  850. vk_buffer buf = std::make_shared<vk_buffer_struct>();
  851. if (size == 0) {
  852. buf->size = 0;
  853. return buf;
  854. }
  855. buf->size = size;
  856. vk::BufferCreateInfo buffer_create_info{
  857. vk::BufferCreateFlags(),
  858. size,
  859. vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst,
  860. vk::SharingMode::eExclusive,
  861. 0,
  862. nullptr,
  863. };
  864. buf->buffer = device->device.createBuffer(buffer_create_info);
  865. vk::MemoryRequirements mem_req = device->device.getBufferMemoryRequirements(buf->buffer);
  866. vk::PhysicalDeviceMemoryProperties mem_props = device->physical_device.getMemoryProperties();
  867. uint32_t memory_type_index = UINT32_MAX;
  868. memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
  869. buf->memory_property_flags = req_flags;
  870. if (memory_type_index == UINT32_MAX && fallback_flags) {
  871. memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
  872. buf->memory_property_flags = fallback_flags;
  873. }
  874. if (memory_type_index == UINT32_MAX) {
  875. device->device.destroyBuffer(buf->buffer);
  876. buf->size = 0;
  877. throw vk::OutOfDeviceMemoryError("No suitable memory type found");
  878. }
  879. try {
  880. buf->device_memory = device->device.allocateMemory({ mem_req.size, memory_type_index });
  881. } catch (const vk::SystemError& e) {
  882. if (buf->memory_property_flags != fallback_flags) {
  883. // Try again with fallback flags
  884. memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
  885. buf->memory_property_flags = fallback_flags;
  886. try {
  887. buf->device_memory = device->device.allocateMemory({ mem_req.size, memory_type_index });
  888. }
  889. catch (const vk::SystemError& e) {
  890. device->device.destroyBuffer(buf->buffer);
  891. buf->size = 0;
  892. throw e;
  893. }
  894. } else {
  895. // Out of Host/Device memory, clean up buffer
  896. device->device.destroyBuffer(buf->buffer);
  897. buf->size = 0;
  898. throw e;
  899. }
  900. }
  901. buf->ptr = nullptr;
  902. if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  903. buf->ptr = device->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
  904. }
  905. device->device.bindBufferMemory(buf->buffer, buf->device_memory, 0);
  906. buf->device = device;
  907. #ifdef GGML_VULKAN_MEMORY_DEBUG
  908. device->memory_logger->log_allocation(buf, size);
  909. #endif
  910. return buf;
  911. }
  912. static vk_buffer ggml_vk_create_buffer_check(vk_device& device, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
  913. try {
  914. return ggml_vk_create_buffer(device, size, req_flags, fallback_flags);
  915. } catch (const vk::SystemError& e) {
  916. std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
  917. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  918. throw e;
  919. }
  920. }
  921. static vk_buffer ggml_vk_create_buffer_device(vk_device& device, size_t size) {
  922. vk_buffer buf;
  923. try {
  924. if (device->uma) {
  925. // Fall back to host memory type
  926. buf = ggml_vk_create_buffer(device, size, vk::MemoryPropertyFlagBits::eDeviceLocal, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  927. } else {
  928. // use rebar if available, otherwise fallback to device only visible memory
  929. buf = ggml_vk_create_buffer(device, size, vk::MemoryPropertyFlagBits::eDeviceLocal | vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, vk::MemoryPropertyFlagBits::eDeviceLocal);
  930. }
  931. } catch (const vk::SystemError& e) {
  932. std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
  933. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  934. throw e;
  935. }
  936. return buf;
  937. }
  938. static void ggml_vk_destroy_buffer(vk_buffer& buf) {
  939. if (buf == nullptr) {
  940. return;
  941. }
  942. #ifdef GGML_VULKAN_MEMORY_DEBUG
  943. if (buf->device != nullptr) {
  944. buf->device->memory_logger->log_deallocation(buf);
  945. }
  946. #endif
  947. buf.reset();
  948. }
  949. static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) {
  950. return { buf, 0, VK_WHOLE_SIZE };
  951. }
  952. static void ggml_vk_sync_buffers(vk_context& ctx) {
  953. VK_LOG_DEBUG("ggml_vk_sync_buffers()");
  954. const bool transfer_queue = ctx->q->transfer_only;
  955. ctx->s->buffer.pipelineBarrier(
  956. ctx->q->stage_flags,
  957. ctx->q->stage_flags,
  958. {},
  959. { {
  960. { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) },
  961. { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) }
  962. } },
  963. {},
  964. {}
  965. );
  966. }
  967. static void ggml_vk_wait_events(vk_context& ctx, std::vector<vk::Event>&& events) {
  968. VK_LOG_DEBUG("ggml_vk_wait_events()");
  969. if (events.empty()) {
  970. return;
  971. }
  972. ctx->s->buffer.waitEvents(
  973. events,
  974. ctx->q->stage_flags,
  975. ctx->q->stage_flags,
  976. {},
  977. {},
  978. {}
  979. );
  980. }
  981. static void ggml_vk_load_shaders(vk_device& device) {
  982. VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")");
  983. // mulmat
  984. std::initializer_list<uint32_t> warptile_l = { 128, 128, 128, 16, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size };
  985. std::initializer_list<uint32_t> warptile_m = { 128, 64, 64, 16, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size };
  986. std::initializer_list<uint32_t> warptile_s = { std::max(device->subgroup_size, 16u), 32, 32, 16, 32, 32, 2, 2, 2, device->subgroup_size };
  987. std::initializer_list<uint32_t> warptile_mmq_l = { 128, 128, 128, 32, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size };
  988. std::initializer_list<uint32_t> warptile_mmq_m = { 128, 64, 64, 32, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size };
  989. std::initializer_list<uint32_t> warptile_mmq_s = { std::max(device->subgroup_size, 16u), 32, 32, 32, 32, 32, 2, 2, 2, device->subgroup_size };
  990. std::array<uint32_t, 3> l_wg_denoms = {128, 128, 1 };
  991. std::array<uint32_t, 3> m_wg_denoms = { 64, 64, 1 };
  992. std::array<uint32_t, 3> s_wg_denoms = { 32, 32, 1 };
  993. uint32_t l_align = 128;
  994. uint32_t m_align = 64;
  995. uint32_t s_align = 32;
  996. device->pipeline_matmul_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  997. device->pipeline_matmul_f32_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  998. device->pipeline_matmul_f16_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  999. device->pipeline_matmul_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  1000. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0] = std::make_shared<vk_matmul_pipeline_struct>();
  1001. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1] = std::make_shared<vk_matmul_pipeline_struct>();
  1002. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0] = std::make_shared<vk_matmul_pipeline_struct>();
  1003. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1] = std::make_shared<vk_matmul_pipeline_struct>();
  1004. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0] = std::make_shared<vk_matmul_pipeline_struct>();
  1005. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1006. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1007. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1008. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1009. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1010. device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL] = std::make_shared<vk_matmul_pipeline_struct>();
  1011. device->pipeline_matmul_id_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  1012. device->pipeline_matmul_id_f16_f32 = std::make_shared<vk_matmul_pipeline_struct>();
  1013. device->pipeline_matmul_id_f16 = std::make_shared<vk_matmul_pipeline_struct>();
  1014. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0] = std::make_shared<vk_matmul_pipeline_struct>();
  1015. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1] = std::make_shared<vk_matmul_pipeline_struct>();
  1016. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0] = std::make_shared<vk_matmul_pipeline_struct>();
  1017. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1] = std::make_shared<vk_matmul_pipeline_struct>();
  1018. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0] = std::make_shared<vk_matmul_pipeline_struct>();
  1019. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1020. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1021. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1022. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1023. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K] = std::make_shared<vk_matmul_pipeline_struct>();
  1024. device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL] = std::make_shared<vk_matmul_pipeline_struct>();
  1025. std::vector<std::future<void>> compiles;
  1026. auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array<uint32_t, 3> wg_denoms, std::vector<uint32_t>&& specialization_constants, uint32_t align) {
  1027. {
  1028. // wait until fewer than N compiles are in progress
  1029. uint32_t N = std::max(1u, std::thread::hardware_concurrency());
  1030. std::unique_lock<std::mutex> guard(compile_count_mutex);
  1031. while (compile_count >= N) {
  1032. compile_count_cond.wait(guard);
  1033. }
  1034. compile_count++;
  1035. }
  1036. compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), name, spv_size, spv_data, entrypoint, parameter_count, push_constant_size, wg_denoms, specialization_constants, align));
  1037. };
  1038. if (device->fp16) {
  1039. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->l, "matmul_f32_l", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1040. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->m, "matmul_f32_m", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1041. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->s, "matmul_f32_s", matmul_f32_f32_len, matmul_f32_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1042. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_l, "matmul_f32_aligned_l", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1043. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_m, "matmul_f32_aligned_m", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1044. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_s, "matmul_f32_aligned_s", matmul_f32_f32_aligned_len, matmul_f32_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1045. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->l, "matmul_f32_f16_l", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1046. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->m, "matmul_f32_f16_m", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1047. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->s, "matmul_f32_f16_s", matmul_f32_f16_len, matmul_f32_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1048. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_l, "matmul_f32_f16_aligned_l", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1049. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_m, "matmul_f32_f16_aligned_m", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1050. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_s, "matmul_f32_f16_aligned_s", matmul_f32_f16_aligned_len, matmul_f32_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1051. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->l, "matmul_f16_l", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1052. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->m, "matmul_f16_m", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1053. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->s, "matmul_f16_s", matmul_f16_len, matmul_f16_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1054. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_l, "matmul_f16_aligned_l", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1055. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_m, "matmul_f16_aligned_m", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1056. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_s, "matmul_f16_aligned_s", matmul_f16_aligned_len, matmul_f16_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1057. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->l, "matmul_f16_f32_l", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1058. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->m, "matmul_f16_f32_m", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1059. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->s, "matmul_f16_f32_s", matmul_f16_f32_len, matmul_f16_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1060. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1061. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1062. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_len, matmul_f16_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1063. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->l, "matmul_q4_0_f32_l", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1064. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->m, "matmul_q4_0_f32_m", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1065. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->s, "matmul_q4_0_f32_s", matmul_q4_0_f32_len, matmul_q4_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1066. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_l, "matmul_q4_0_f32_aligned_l", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1067. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_m, "matmul_q4_0_f32_aligned_m", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1068. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_s, "matmul_q4_0_f32_aligned_s", matmul_q4_0_f32_aligned_len, matmul_q4_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1069. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->l, "matmul_q4_1_f32_l", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1070. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->m, "matmul_q4_1_f32_m", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1071. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->s, "matmul_q4_1_f32_s", matmul_q4_1_f32_len, matmul_q4_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1072. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_l, "matmul_q4_1_f32_aligned_l", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1073. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_m, "matmul_q4_1_f32_aligned_m", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1074. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_s, "matmul_q4_1_f32_aligned_s", matmul_q4_1_f32_aligned_len, matmul_q4_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1075. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->l, "matmul_q5_0_f32_l", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1076. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->m, "matmul_q5_0_f32_m", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1077. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->s, "matmul_q5_0_f32_s", matmul_q5_0_f32_len, matmul_q5_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1078. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_l, "matmul_q5_0_f32_aligned_l", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1079. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_m, "matmul_q5_0_f32_aligned_m", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1080. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_s, "matmul_q5_0_f32_aligned_s", matmul_q5_0_f32_aligned_len, matmul_q5_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1081. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->l, "matmul_q5_1_f32_l", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1082. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->m, "matmul_q5_1_f32_m", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1083. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->s, "matmul_q5_1_f32_s", matmul_q5_1_f32_len, matmul_q5_1_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1084. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_l, "matmul_q5_1_f32_aligned_l", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1085. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_m, "matmul_q5_1_f32_aligned_m", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1086. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_s, "matmul_q5_1_f32_aligned_s", matmul_q5_1_f32_aligned_len, matmul_q5_1_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1087. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->l, "matmul_q8_0_f32_l", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1088. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->m, "matmul_q8_0_f32_m", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1089. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->s, "matmul_q8_0_f32_s", matmul_q8_0_f32_len, matmul_q8_0_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1090. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_l, "matmul_q8_0_f32_aligned_l", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1091. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_m, "matmul_q8_0_f32_aligned_m", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1092. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_s, "matmul_q8_0_f32_aligned_s", matmul_q8_0_f32_aligned_len, matmul_q8_0_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1093. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->l, "matmul_q2_k_f32_l", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1094. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->m, "matmul_q2_k_f32_m", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1095. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->s, "matmul_q2_k_f32_s", matmul_q2_k_f32_len, matmul_q2_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1096. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_l, "matmul_q2_k_f32_aligned_l", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1097. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_m, "matmul_q2_k_f32_aligned_m", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1098. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_s, "matmul_q2_k_f32_aligned_s", matmul_q2_k_f32_aligned_len, matmul_q2_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1099. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->l, "matmul_q3_k_f32_l", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1100. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->m, "matmul_q3_k_f32_m", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1101. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->s, "matmul_q3_k_f32_s", matmul_q3_k_f32_len, matmul_q3_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1102. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_l, "matmul_q3_k_f32_aligned_l", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1103. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_m, "matmul_q3_k_f32_aligned_m", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1104. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_s, "matmul_q3_k_f32_aligned_s", matmul_q3_k_f32_aligned_len, matmul_q3_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1105. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->l, "matmul_q4_k_f32_l", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1106. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->m, "matmul_q4_k_f32_m", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1107. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->s, "matmul_q4_k_f32_s", matmul_q4_k_f32_len, matmul_q4_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1108. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_l, "matmul_q4_k_f32_aligned_l", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1109. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_m, "matmul_q4_k_f32_aligned_m", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1110. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_s, "matmul_q4_k_f32_aligned_s", matmul_q4_k_f32_aligned_len, matmul_q4_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1111. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->l, "matmul_q5_k_f32_l", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1112. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->m, "matmul_q5_k_f32_m", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1113. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->s, "matmul_q5_k_f32_s", matmul_q5_k_f32_len, matmul_q5_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1114. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_l, "matmul_q5_k_f32_aligned_l", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1115. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_m, "matmul_q5_k_f32_aligned_m", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1116. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_s, "matmul_q5_k_f32_aligned_s", matmul_q5_k_f32_aligned_len, matmul_q5_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1117. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->l, "matmul_q6_k_f32_l", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1118. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->m, "matmul_q6_k_f32_m", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1119. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->s, "matmul_q6_k_f32_s", matmul_q6_k_f32_len, matmul_q6_k_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1120. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_l, "matmul_q6_k_f32_aligned_l", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1121. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_m, "matmul_q6_k_f32_aligned_m", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1122. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_s, "matmul_q6_k_f32_aligned_s", matmul_q6_k_f32_aligned_len, matmul_q6_k_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1123. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->l, "matmul_iq4_nl_f32_l", matmul_iq4_nl_f32_len, matmul_iq4_nl_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1124. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->m, "matmul_iq4_nl_f32_m", matmul_iq4_nl_f32_len, matmul_iq4_nl_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1125. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->s, "matmul_iq4_nl_f32_s", matmul_iq4_nl_f32_len, matmul_iq4_nl_f32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1126. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_l, "matmul_iq4_nl_f32_aligned_l", matmul_iq4_nl_f32_aligned_len, matmul_iq4_nl_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1127. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_m, "matmul_iq4_nl_f32_aligned_m", matmul_iq4_nl_f32_aligned_len, matmul_iq4_nl_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1128. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_s, "matmul_iq4_nl_f32_aligned_s", matmul_iq4_nl_f32_aligned_len, matmul_iq4_nl_f32_aligned_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1129. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->l, "matmul_id_f32_l", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1130. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->m, "matmul_id_f32_m", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1131. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->s, "matmul_id_f32_s", matmul_id_f32_f32_len, matmul_id_f32_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1132. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_l, "matmul_id_f32_aligned_l", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1133. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_m, "matmul_id_f32_aligned_m", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1134. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_s, "matmul_id_f32_aligned_s", matmul_id_f32_f32_aligned_len, matmul_id_f32_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1135. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->l, "matmul_id_f16_l", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1136. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->m, "matmul_id_f16_m", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1137. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->s, "matmul_id_f16_s", matmul_id_f16_len, matmul_id_f16_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1138. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_l, "matmul_id_f16_aligned_l", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1139. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_m, "matmul_id_f16_aligned_m", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1140. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_s, "matmul_id_f16_aligned_s", matmul_id_f16_aligned_len, matmul_id_f16_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1141. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->l, "matmul_id_f16_f32_l", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1142. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->m, "matmul_id_f16_f32_m", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1143. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->s, "matmul_id_f16_f32_s", matmul_id_f16_f32_len, matmul_id_f16_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1144. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_l, "matmul_id_f16_f32_aligned_l", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1145. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_m, "matmul_id_f16_f32_aligned_m", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1146. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_s, "matmul_id_f16_f32_aligned_s", matmul_id_f16_f32_aligned_len, matmul_id_f16_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1147. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->l, "matmul_id_q4_0_f32_l", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1148. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->m, "matmul_id_q4_0_f32_m", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1149. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->s, "matmul_id_q4_0_f32_s", matmul_id_q4_0_f32_len, matmul_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1150. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_l, "matmul_id_q4_0_f32_aligned_l", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1151. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_m, "matmul_id_q4_0_f32_aligned_m", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1152. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_s, "matmul_id_q4_0_f32_aligned_s", matmul_id_q4_0_f32_aligned_len, matmul_id_q4_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1153. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->l, "matmul_id_q4_1_f32_l", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1154. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->m, "matmul_id_q4_1_f32_m", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1155. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->s, "matmul_id_q4_1_f32_s", matmul_id_q4_1_f32_len, matmul_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1156. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_l, "matmul_id_q4_1_f32_aligned_l", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1157. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_m, "matmul_id_q4_1_f32_aligned_m", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1158. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_s, "matmul_id_q4_1_f32_aligned_s", matmul_id_q4_1_f32_aligned_len, matmul_id_q4_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1159. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->l, "matmul_id_q5_0_f32_l", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1160. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->m, "matmul_id_q5_0_f32_m", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1161. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->s, "matmul_id_q5_0_f32_s", matmul_id_q5_0_f32_len, matmul_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1162. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_l, "matmul_id_q5_0_f32_aligned_l", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1163. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_m, "matmul_id_q5_0_f32_aligned_m", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1164. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_s, "matmul_id_q5_0_f32_aligned_s", matmul_id_q5_0_f32_aligned_len, matmul_id_q5_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1165. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->l, "matmul_id_q5_1_f32_l", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1166. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->m, "matmul_id_q5_1_f32_m", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1167. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->s, "matmul_id_q5_1_f32_s", matmul_id_q5_1_f32_len, matmul_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1168. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_l, "matmul_id_q5_1_f32_aligned_l", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1169. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_m, "matmul_id_q5_1_f32_aligned_m", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1170. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_s, "matmul_id_q5_1_f32_aligned_s", matmul_id_q5_1_f32_aligned_len, matmul_id_q5_1_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1171. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->l, "matmul_id_q8_0_f32_l", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1172. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->m, "matmul_id_q8_0_f32_m", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1173. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->s, "matmul_id_q8_0_f32_s", matmul_id_q8_0_f32_len, matmul_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1174. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_l, "matmul_id_q8_0_f32_aligned_l", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1175. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_m, "matmul_id_q8_0_f32_aligned_m", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1176. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_s, "matmul_id_q8_0_f32_aligned_s", matmul_id_q8_0_f32_aligned_len, matmul_id_q8_0_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1177. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->l, "matmul_id_q2_k_f32_l", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1178. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->m, "matmul_id_q2_k_f32_m", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1179. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->s, "matmul_id_q2_k_f32_s", matmul_id_q2_k_f32_len, matmul_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1180. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_l, "matmul_id_q2_k_f32_aligned_l", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1181. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_m, "matmul_id_q2_k_f32_aligned_m", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1182. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_s, "matmul_id_q2_k_f32_aligned_s", matmul_id_q2_k_f32_aligned_len, matmul_id_q2_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1183. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->l, "matmul_id_q3_k_f32_l", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1184. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->m, "matmul_id_q3_k_f32_m", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1185. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->s, "matmul_id_q3_k_f32_s", matmul_id_q3_k_f32_len, matmul_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1186. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_l, "matmul_id_q3_k_f32_aligned_l", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1187. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_m, "matmul_id_q3_k_f32_aligned_m", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1188. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_s, "matmul_id_q3_k_f32_aligned_s", matmul_id_q3_k_f32_aligned_len, matmul_id_q3_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1189. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->l, "matmul_id_q4_k_f32_l", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1190. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->m, "matmul_id_q4_k_f32_m", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1191. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->s, "matmul_id_q4_k_f32_s", matmul_id_q4_k_f32_len, matmul_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1192. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_l, "matmul_id_q4_k_f32_aligned_l", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1193. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_m, "matmul_id_q4_k_f32_aligned_m", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1194. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_s, "matmul_id_q4_k_f32_aligned_s", matmul_id_q4_k_f32_aligned_len, matmul_id_q4_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1195. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->l, "matmul_id_q5_k_f32_l", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1196. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->m, "matmul_id_q5_k_f32_m", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1197. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->s, "matmul_id_q5_k_f32_s", matmul_id_q5_k_f32_len, matmul_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1198. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_l, "matmul_id_q5_k_f32_aligned_l", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1199. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_m, "matmul_id_q5_k_f32_aligned_m", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1200. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_s, "matmul_id_q5_k_f32_aligned_s", matmul_id_q5_k_f32_aligned_len, matmul_id_q5_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1201. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->l, "matmul_id_q6_k_f32_l", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1202. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->m, "matmul_id_q6_k_f32_m", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1203. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->s, "matmul_id_q6_k_f32_s", matmul_id_q6_k_f32_len, matmul_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1204. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_l, "matmul_id_q6_k_f32_aligned_l", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1205. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_m, "matmul_id_q6_k_f32_aligned_m", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1206. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_s, "matmul_id_q6_k_f32_aligned_s", matmul_id_q6_k_f32_aligned_len, matmul_id_q6_k_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1207. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->l, "matmul_id_iq4_nl_f32_l", matmul_id_iq4_nl_f32_len, matmul_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1208. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->m, "matmul_id_iq4_nl_f32_m", matmul_id_iq4_nl_f32_len, matmul_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1209. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->s, "matmul_id_iq4_nl_f32_s", matmul_id_iq4_nl_f32_len, matmul_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1210. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_l, "matmul_id_iq4_nl_f32_aligned_l", matmul_id_iq4_nl_f32_aligned_len, matmul_id_iq4_nl_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1211. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_m, "matmul_id_iq4_nl_f32_aligned_m", matmul_id_iq4_nl_f32_aligned_len, matmul_id_iq4_nl_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1212. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_s, "matmul_id_iq4_nl_f32_aligned_s", matmul_id_iq4_nl_f32_aligned_len, matmul_id_iq4_nl_f32_aligned_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1213. } else {
  1214. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->l, "matmul_f32_l", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1215. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->m, "matmul_f32_m", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1216. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->s, "matmul_f32_s", matmul_f32_f32_fp32_len, matmul_f32_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1217. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_l, "matmul_f32_aligned_l", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1218. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_m, "matmul_f32_aligned_m", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1219. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32->a_s, "matmul_f32_aligned_s", matmul_f32_f32_aligned_fp32_len, matmul_f32_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1220. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->l, "matmul_f32_f16_l", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1221. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->m, "matmul_f32_f16_m", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1222. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->s, "matmul_f32_f16_s", matmul_f32_f16_fp32_len, matmul_f32_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1223. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_l, "matmul_f32_f16_aligned_l", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1224. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_m, "matmul_f32_f16_aligned_m", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1225. ggml_vk_create_pipeline(device, device->pipeline_matmul_f32_f16->a_s, "matmul_f32_f16_aligned_s", matmul_f32_f16_aligned_fp32_len, matmul_f32_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1226. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->l, "matmul_f16_l", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1227. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->m, "matmul_f16_m", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1228. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->s, "matmul_f16_s", matmul_f16_fp32_len, matmul_f16_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1229. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_l, "matmul_f16_aligned_l", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1230. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_m, "matmul_f16_aligned_m", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1231. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16->a_s, "matmul_f16_aligned_s", matmul_f16_aligned_fp32_len, matmul_f16_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1232. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->l, "matmul_f16_f32_l", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, 1);
  1233. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->m, "matmul_f16_f32_m", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, 1);
  1234. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->s, "matmul_f16_f32_s", matmul_f16_f32_fp32_len, matmul_f16_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, 1);
  1235. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_l, "matmul_f16_f32_aligned_l", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_l, l_align);
  1236. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_m, "matmul_f16_f32_aligned_m", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_m, m_align);
  1237. ggml_vk_create_pipeline(device, device->pipeline_matmul_f16_f32->a_s, "matmul_f16_f32_aligned_s", matmul_f16_f32_aligned_fp32_len, matmul_f16_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_s, s_align);
  1238. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->l, "matmul_q4_0_f32_l", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1239. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->m, "matmul_q4_0_f32_m", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1240. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->s, "matmul_q4_0_f32_s", matmul_q4_0_f32_fp32_len, matmul_q4_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1241. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_l, "matmul_q4_0_f32_aligned_l", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1242. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_m, "matmul_q4_0_f32_aligned_m", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1243. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0]->a_s, "matmul_q4_0_f32_aligned_s", matmul_q4_0_f32_aligned_fp32_len, matmul_q4_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1244. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->l, "matmul_q4_1_f32_l", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1245. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->m, "matmul_q4_1_f32_m", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1246. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->s, "matmul_q4_1_f32_s", matmul_q4_1_f32_fp32_len, matmul_q4_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1247. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_l, "matmul_q4_1_f32_aligned_l", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1248. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_m, "matmul_q4_1_f32_aligned_m", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1249. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1]->a_s, "matmul_q4_1_f32_aligned_s", matmul_q4_1_f32_aligned_fp32_len, matmul_q4_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1250. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->l, "matmul_q5_0_f32_l", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1251. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->m, "matmul_q5_0_f32_m", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1252. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->s, "matmul_q5_0_f32_s", matmul_q5_0_f32_fp32_len, matmul_q5_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1253. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_l, "matmul_q5_0_f32_aligned_l", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1254. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_m, "matmul_q5_0_f32_aligned_m", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1255. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0]->a_s, "matmul_q5_0_f32_aligned_s", matmul_q5_0_f32_aligned_fp32_len, matmul_q5_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1256. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->l, "matmul_q5_1_f32_l", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1257. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->m, "matmul_q5_1_f32_m", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1258. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->s, "matmul_q5_1_f32_s", matmul_q5_1_f32_fp32_len, matmul_q5_1_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1259. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_l, "matmul_q5_1_f32_aligned_l", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1260. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_m, "matmul_q5_1_f32_aligned_m", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1261. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1]->a_s, "matmul_q5_1_f32_aligned_s", matmul_q5_1_f32_aligned_fp32_len, matmul_q5_1_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1262. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->l, "matmul_q8_0_f32_l", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1263. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->m, "matmul_q8_0_f32_m", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1264. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->s, "matmul_q8_0_f32_s", matmul_q8_0_f32_fp32_len, matmul_q8_0_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1265. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_l, "matmul_q8_0_f32_aligned_l", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1266. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_m, "matmul_q8_0_f32_aligned_m", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1267. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0]->a_s, "matmul_q8_0_f32_aligned_s", matmul_q8_0_f32_aligned_fp32_len, matmul_q8_0_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1268. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->l, "matmul_q2_k_f32_l", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1269. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->m, "matmul_q2_k_f32_m", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1270. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->s, "matmul_q2_k_f32_s", matmul_q2_k_f32_fp32_len, matmul_q2_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1271. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_l, "matmul_q2_k_f32_aligned_l", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1272. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_m, "matmul_q2_k_f32_aligned_m", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1273. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K]->a_s, "matmul_q2_k_f32_aligned_s", matmul_q2_k_f32_aligned_fp32_len, matmul_q2_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1274. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->l, "matmul_q3_k_f32_l", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1275. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->m, "matmul_q3_k_f32_m", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1276. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->s, "matmul_q3_k_f32_s", matmul_q3_k_f32_fp32_len, matmul_q3_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1277. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_l, "matmul_q3_k_f32_aligned_l", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1278. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_m, "matmul_q3_k_f32_aligned_m", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1279. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K]->a_s, "matmul_q3_k_f32_aligned_s", matmul_q3_k_f32_aligned_fp32_len, matmul_q3_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1280. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->l, "matmul_q4_k_f32_l", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1281. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->m, "matmul_q4_k_f32_m", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1282. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->s, "matmul_q4_k_f32_s", matmul_q4_k_f32_fp32_len, matmul_q4_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1283. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_l, "matmul_q4_k_f32_aligned_l", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1284. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_m, "matmul_q4_k_f32_aligned_m", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1285. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K]->a_s, "matmul_q4_k_f32_aligned_s", matmul_q4_k_f32_aligned_fp32_len, matmul_q4_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1286. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->l, "matmul_q5_k_f32_l", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1287. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->m, "matmul_q5_k_f32_m", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1288. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->s, "matmul_q5_k_f32_s", matmul_q5_k_f32_fp32_len, matmul_q5_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1289. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_l, "matmul_q5_k_f32_aligned_l", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1290. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_m, "matmul_q5_k_f32_aligned_m", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1291. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K]->a_s, "matmul_q5_k_f32_aligned_s", matmul_q5_k_f32_aligned_fp32_len, matmul_q5_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1292. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->l, "matmul_q6_k_f32_l", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1293. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->m, "matmul_q6_k_f32_m", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1294. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->s, "matmul_q6_k_f32_s", matmul_q6_k_f32_fp32_len, matmul_q6_k_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1295. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_l, "matmul_q6_k_f32_aligned_l", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1296. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_m, "matmul_q6_k_f32_aligned_m", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1297. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K]->a_s, "matmul_q6_k_f32_aligned_s", matmul_q6_k_f32_aligned_fp32_len, matmul_q6_k_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1298. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->l, "matmul_iq4_nl_f32_l", matmul_iq4_nl_f32_fp32_len, matmul_iq4_nl_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1299. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->m, "matmul_iq4_nl_f32_m", matmul_iq4_nl_f32_fp32_len, matmul_iq4_nl_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1300. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->s, "matmul_iq4_nl_f32_s", matmul_iq4_nl_f32_fp32_len, matmul_iq4_nl_f32_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1301. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_l, "matmul_iq4_nl_f32_aligned_l", matmul_iq4_nl_f32_aligned_fp32_len, matmul_iq4_nl_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1302. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_m, "matmul_iq4_nl_f32_aligned_m", matmul_iq4_nl_f32_aligned_fp32_len, matmul_iq4_nl_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1303. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL]->a_s, "matmul_iq4_nl_f32_aligned_s", matmul_iq4_nl_f32_aligned_fp32_len, matmul_iq4_nl_f32_aligned_fp32_data, "main", 3, sizeof(vk_mat_mat_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1304. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->l, "matmul_id_f32_l", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1305. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->m, "matmul_id_f32_m", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1306. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->s, "matmul_id_f32_s", matmul_id_f32_f32_fp32_len, matmul_id_f32_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1307. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_l, "matmul_id_f32_aligned_l", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1308. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_m, "matmul_id_f32_aligned_m", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1309. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f32->a_s, "matmul_id_f32_aligned_s", matmul_id_f32_f32_aligned_fp32_len, matmul_id_f32_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1310. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->l, "matmul_id_f16_l", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1311. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->m, "matmul_id_f16_m", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1312. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->s, "matmul_id_f16_s", matmul_id_f16_fp32_len, matmul_id_f16_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1313. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_l, "matmul_id_f16_aligned_l", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1314. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_m, "matmul_id_f16_aligned_m", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1315. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16->a_s, "matmul_id_f16_aligned_s", matmul_id_f16_aligned_fp32_len, matmul_id_f16_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1316. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->l, "matmul_id_f16_f32_l", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, 1);
  1317. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->m, "matmul_id_f16_f32_m", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, 1);
  1318. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->s, "matmul_id_f16_f32_s", matmul_id_f16_f32_fp32_len, matmul_id_f16_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, 1);
  1319. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_l, "matmul_id_f16_f32_aligned_l", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_l, l_align);
  1320. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_m, "matmul_id_f16_f32_aligned_m", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_m, m_align);
  1321. ggml_vk_create_pipeline(device, device->pipeline_matmul_id_f16_f32->a_s, "matmul_id_f16_f32_aligned_s", matmul_id_f16_f32_aligned_fp32_len, matmul_id_f16_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_s, s_align);
  1322. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->l, "matmul_id_q4_0_f32_l", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1323. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->m, "matmul_id_q4_0_f32_m", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1324. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->s, "matmul_id_q4_0_f32_s", matmul_id_q4_0_f32_fp32_len, matmul_id_q4_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1325. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_l, "matmul_id_q4_0_f32_aligned_l", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1326. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_m, "matmul_id_q4_0_f32_aligned_m", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1327. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0]->a_s, "matmul_id_q4_0_f32_aligned_s", matmul_id_q4_0_f32_aligned_fp32_len, matmul_id_q4_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1328. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->l, "matmul_id_q4_1_f32_l", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1329. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->m, "matmul_id_q4_1_f32_m", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1330. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->s, "matmul_id_q4_1_f32_s", matmul_id_q4_1_f32_fp32_len, matmul_id_q4_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1331. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_l, "matmul_id_q4_1_f32_aligned_l", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1332. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_m, "matmul_id_q4_1_f32_aligned_m", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1333. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1]->a_s, "matmul_id_q4_1_f32_aligned_s", matmul_id_q4_1_f32_aligned_fp32_len, matmul_id_q4_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1334. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->l, "matmul_id_q5_0_f32_l", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1335. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->m, "matmul_id_q5_0_f32_m", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1336. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->s, "matmul_id_q5_0_f32_s", matmul_id_q5_0_f32_fp32_len, matmul_id_q5_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1337. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_l, "matmul_id_q5_0_f32_aligned_l", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1338. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_m, "matmul_id_q5_0_f32_aligned_m", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1339. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0]->a_s, "matmul_id_q5_0_f32_aligned_s", matmul_id_q5_0_f32_aligned_fp32_len, matmul_id_q5_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1340. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->l, "matmul_id_q5_1_f32_l", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1341. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->m, "matmul_id_q5_1_f32_m", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1342. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->s, "matmul_id_q5_1_f32_s", matmul_id_q5_1_f32_fp32_len, matmul_id_q5_1_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1343. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_l, "matmul_id_q5_1_f32_aligned_l", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1344. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_m, "matmul_id_q5_1_f32_aligned_m", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1345. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1]->a_s, "matmul_id_q5_1_f32_aligned_s", matmul_id_q5_1_f32_aligned_fp32_len, matmul_id_q5_1_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1346. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->l, "matmul_id_q8_0_f32_l", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1347. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->m, "matmul_id_q8_0_f32_m", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1348. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->s, "matmul_id_q8_0_f32_s", matmul_id_q8_0_f32_fp32_len, matmul_id_q8_0_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1349. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_l, "matmul_id_q8_0_f32_aligned_l", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1350. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_m, "matmul_id_q8_0_f32_aligned_m", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1351. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0]->a_s, "matmul_id_q8_0_f32_aligned_s", matmul_id_q8_0_f32_aligned_fp32_len, matmul_id_q8_0_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1352. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->l, "matmul_id_q2_k_f32_l", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1353. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->m, "matmul_id_q2_k_f32_m", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1354. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->s, "matmul_id_q2_k_f32_s", matmul_id_q2_k_f32_fp32_len, matmul_id_q2_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1355. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_l, "matmul_id_q2_k_f32_aligned_l", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1356. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_m, "matmul_id_q2_k_f32_aligned_m", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1357. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K]->a_s, "matmul_id_q2_k_f32_aligned_s", matmul_id_q2_k_f32_aligned_fp32_len, matmul_id_q2_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1358. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->l, "matmul_id_q3_k_f32_l", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1359. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->m, "matmul_id_q3_k_f32_m", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1360. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->s, "matmul_id_q3_k_f32_s", matmul_id_q3_k_f32_fp32_len, matmul_id_q3_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1361. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_l, "matmul_id_q3_k_f32_aligned_l", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1362. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_m, "matmul_id_q3_k_f32_aligned_m", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1363. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K]->a_s, "matmul_id_q3_k_f32_aligned_s", matmul_id_q3_k_f32_aligned_fp32_len, matmul_id_q3_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1364. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->l, "matmul_id_q4_k_f32_l", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1365. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->m, "matmul_id_q4_k_f32_m", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1366. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->s, "matmul_id_q4_k_f32_s", matmul_id_q4_k_f32_fp32_len, matmul_id_q4_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1367. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_l, "matmul_id_q4_k_f32_aligned_l", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1368. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_m, "matmul_id_q4_k_f32_aligned_m", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1369. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K]->a_s, "matmul_id_q4_k_f32_aligned_s", matmul_id_q4_k_f32_aligned_fp32_len, matmul_id_q4_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1370. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->l, "matmul_id_q5_k_f32_l", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1371. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->m, "matmul_id_q5_k_f32_m", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1372. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->s, "matmul_id_q5_k_f32_s", matmul_id_q5_k_f32_fp32_len, matmul_id_q5_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1373. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_l, "matmul_id_q5_k_f32_aligned_l", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1374. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_m, "matmul_id_q5_k_f32_aligned_m", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1375. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K]->a_s, "matmul_id_q5_k_f32_aligned_s", matmul_id_q5_k_f32_aligned_fp32_len, matmul_id_q5_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1376. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->l, "matmul_id_q6_k_f32_l", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1377. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->m, "matmul_id_q6_k_f32_m", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1378. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->s, "matmul_id_q6_k_f32_s", matmul_id_q6_k_f32_fp32_len, matmul_id_q6_k_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1379. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_l, "matmul_id_q6_k_f32_aligned_l", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1380. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_m, "matmul_id_q6_k_f32_aligned_m", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1381. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K]->a_s, "matmul_id_q6_k_f32_aligned_s", matmul_id_q6_k_f32_aligned_fp32_len, matmul_id_q6_k_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1382. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->l, "matmul_id_iq4_nl_f32_l", matmul_id_iq4_nl_f32_fp32_len, matmul_id_iq4_nl_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1383. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->m, "matmul_id_iq4_nl_f32_m", matmul_id_iq4_nl_f32_fp32_len, matmul_id_iq4_nl_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1384. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->s, "matmul_id_iq4_nl_f32_s", matmul_id_iq4_nl_f32_fp32_len, matmul_id_iq4_nl_f32_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1385. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_l, "matmul_id_iq4_nl_f32_aligned_l", matmul_id_iq4_nl_f32_aligned_fp32_len, matmul_id_iq4_nl_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), l_wg_denoms, warptile_mmq_l, l_align);
  1386. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_m, "matmul_id_iq4_nl_f32_aligned_m", matmul_id_iq4_nl_f32_aligned_fp32_len, matmul_id_iq4_nl_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), m_wg_denoms, warptile_mmq_m, m_align);
  1387. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL]->a_s, "matmul_id_iq4_nl_f32_aligned_s", matmul_id_iq4_nl_f32_aligned_fp32_len, matmul_id_iq4_nl_f32_aligned_fp32_data, "main", 4, sizeof(vk_mat_mat_id_push_constants), s_wg_denoms, warptile_mmq_s, s_align);
  1388. }
  1389. // mul mat vec
  1390. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1391. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1392. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1393. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1394. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1395. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1396. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1397. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1398. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1399. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1400. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1401. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1402. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1403. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1404. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1405. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1406. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1407. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1408. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1409. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1410. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1411. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1412. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1413. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1414. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1415. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1416. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1417. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1418. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1419. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1420. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1421. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1422. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1423. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1424. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1425. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1426. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1427. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1428. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1429. // dequant shaders
  1430. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1431. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_0], "dequant_q4_0", dequant_q4_0_len, dequant_q4_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1432. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_1], "dequant_q4_1", dequant_q4_1_len, dequant_q4_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1433. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_0], "dequant_q5_0", dequant_q5_0_len, dequant_q5_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1434. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_1], "dequant_q5_1", dequant_q5_1_len, dequant_q5_1_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1435. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q8_0], "dequant_q8_0", dequant_q8_0_len, dequant_q8_0_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1436. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q2_K], "dequant_q2_k", dequant_q2_k_len, dequant_q2_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1437. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q3_K], "dequant_q3_k", dequant_q3_k_len, dequant_q3_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1438. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1);
  1439. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1440. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1);
  1441. ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1);
  1442. // get_rows
  1443. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1444. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F16 ], "get_rows_f16", get_rows_f16_len, get_rows_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1445. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_0], "get_rows_q4_0", get_rows_q4_0_len, get_rows_q4_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1446. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q4_1], "get_rows_q4_1", get_rows_q4_1_len, get_rows_q4_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1447. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1448. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1449. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1450. ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1451. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1452. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1);
  1453. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_0], "get_rows_q4_0_f32", get_rows_q4_0_f32_len, get_rows_q4_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1454. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q4_1], "get_rows_q4_1_f32", get_rows_q4_1_f32_len, get_rows_q4_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1455. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1456. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1457. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1458. ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
  1459. ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256, 1, 1}, {}, 1);
  1460. ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  1461. ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1);
  1462. ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1463. ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1464. ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1);
  1465. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f32, "cpy_f32_f32", cpy_f32_f32_len, cpy_f32_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1466. ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_f16, "cpy_f32_f16", cpy_f32_f16_len, cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1467. ggml_vk_create_pipeline(device, device->pipeline_cpy_f16_f16, "cpy_f16_f16", cpy_f16_f16_len, cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1468. ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1469. ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1470. ggml_vk_create_pipeline(device, device->pipeline_acc_f32, "acc_f32", acc_f32_len, acc_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1471. ggml_vk_create_pipeline(device, device->pipeline_mul_f32, "mul_f32", mul_f32_len, mul_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1472. ggml_vk_create_pipeline(device, device->pipeline_div_f32, "div_f32", div_f32_len, div_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1473. ggml_vk_create_pipeline(device, device->pipeline_concat_f32, "concat_f32", concat_f32_len, concat_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1474. ggml_vk_create_pipeline(device, device->pipeline_concat_f16, "concat_f16", concat_f16_len, concat_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1475. ggml_vk_create_pipeline(device, device->pipeline_concat_i32, "concat_i32", concat_i32_len, concat_i32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {}, 1);
  1476. ggml_vk_create_pipeline(device, device->pipeline_upscale_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {}, 1);
  1477. ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1478. ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1479. ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1480. ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1481. ggml_vk_create_pipeline(device, device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1482. ggml_vk_create_pipeline(device, device->pipeline_pad_f32, "pad_f32", pad_f32_len, pad_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1483. ggml_vk_create_pipeline(device, device->pipeline_repeat_f32, "repeat_f32", repeat_f32_len, repeat_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
  1484. ggml_vk_create_pipeline(device, device->pipeline_gelu_f32, "gelu_f32", gelu_f32_len, gelu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1485. ggml_vk_create_pipeline(device, device->pipeline_gelu_quick_f32, "gelu_quick_f32", gelu_quick_f32_len, gelu_quick_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1486. ggml_vk_create_pipeline(device, device->pipeline_silu_f32, "silu_f32", silu_f32_len, silu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1487. ggml_vk_create_pipeline(device, device->pipeline_relu_f32, "relu_f32", relu_f32_len, relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1488. ggml_vk_create_pipeline(device, device->pipeline_leaky_relu_f32, "leaky_relu_f32", leaky_relu_f32_len, leaky_relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1489. ggml_vk_create_pipeline(device, device->pipeline_tanh_f32, "tanh_f32", tanh_f32_len, tanh_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
  1490. ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1);
  1491. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
  1492. ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
  1493. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1494. ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1495. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1496. ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
  1497. ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
  1498. ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
  1499. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1);
  1500. ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1);
  1501. ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1);
  1502. for (auto &c : compiles) {
  1503. c.wait();
  1504. }
  1505. }
  1506. static vk_device ggml_vk_get_device(size_t idx) {
  1507. VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
  1508. if (vk_instance.devices[idx] == nullptr) {
  1509. VK_LOG_DEBUG("Initializing new vk_device");
  1510. vk_device device = std::make_shared<vk_device_struct>();
  1511. vk_instance.devices[idx] = device;
  1512. #ifdef GGML_VULKAN_MEMORY_DEBUG
  1513. device->memory_logger = std::unique_ptr<vk_memory_logger>(new vk_memory_logger());
  1514. #endif
  1515. #ifdef GGML_VULKAN_PERF
  1516. device->perf_logger = std::unique_ptr<vk_perf_logger>(new vk_perf_logger());
  1517. #endif
  1518. size_t dev_num = vk_instance.device_indices[idx];
  1519. std::vector<vk::PhysicalDevice> physical_devices = vk_instance.instance.enumeratePhysicalDevices();
  1520. if (dev_num >= physical_devices.size()) {
  1521. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  1522. throw std::runtime_error("Device not found");
  1523. }
  1524. device->physical_device = physical_devices[dev_num];
  1525. const std::vector<vk::ExtensionProperties> ext_props = device->physical_device.enumerateDeviceExtensionProperties();
  1526. bool maintenance4_support = false;
  1527. // Check if maintenance4 is supported
  1528. for (const auto& properties : ext_props) {
  1529. if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) {
  1530. maintenance4_support = true;
  1531. }
  1532. }
  1533. vk::PhysicalDeviceProperties2 props2;
  1534. vk::PhysicalDeviceMaintenance3Properties props3;
  1535. vk::PhysicalDeviceMaintenance4Properties props4;
  1536. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  1537. props2.pNext = &props3;
  1538. props3.pNext = &subgroup_props;
  1539. if (maintenance4_support) {
  1540. subgroup_props.pNext = &props4;
  1541. }
  1542. device->physical_device.getProperties2(&props2);
  1543. device->properties = props2.properties;
  1544. const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE");
  1545. if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) {
  1546. device->max_memory_allocation_size = std::stoi(GGML_VK_FORCE_MAX_ALLOCATION_SIZE);
  1547. } else if (maintenance4_support) {
  1548. device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize);
  1549. } else {
  1550. device->max_memory_allocation_size = props3.maxMemoryAllocationSize;
  1551. }
  1552. device->vendor_id = device->properties.vendorID;
  1553. device->subgroup_size = subgroup_props.subgroupSize;
  1554. device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1555. bool fp16_storage = false;
  1556. bool fp16_compute = false;
  1557. for (const auto& properties : ext_props) {
  1558. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1559. fp16_storage = true;
  1560. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1561. fp16_compute = true;
  1562. }
  1563. }
  1564. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  1565. const bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  1566. device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1567. std::vector<vk::QueueFamilyProperties> queue_family_props = device->physical_device.getQueueFamilyProperties();
  1568. // Try to find a non-graphics compute queue and transfer-focused queues
  1569. const uint32_t compute_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eCompute, vk::QueueFlagBits::eGraphics, -1, 1);
  1570. const uint32_t transfer_queue_family_index = ggml_vk_find_queue_family_index(queue_family_props, vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eCompute | vk::QueueFlagBits::eGraphics, compute_queue_family_index, 1);
  1571. const float priorities[] = { 1.0f, 1.0f };
  1572. device->single_queue = compute_queue_family_index == transfer_queue_family_index && queue_family_props[compute_queue_family_index].queueCount == 1;
  1573. std::vector<vk::DeviceQueueCreateInfo> device_queue_create_infos;
  1574. if (compute_queue_family_index != transfer_queue_family_index) {
  1575. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1576. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), transfer_queue_family_index, 1, priorities + 1});
  1577. } else if(!device->single_queue) {
  1578. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 2, priorities});
  1579. } else {
  1580. device_queue_create_infos.push_back({vk::DeviceQueueCreateFlags(), compute_queue_family_index, 1, priorities});
  1581. }
  1582. vk::DeviceCreateInfo device_create_info;
  1583. std::vector<const char *> device_extensions;
  1584. vk::PhysicalDeviceFeatures device_features = device->physical_device.getFeatures();
  1585. VkPhysicalDeviceFeatures2 device_features2;
  1586. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1587. device_features2.pNext = nullptr;
  1588. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1589. VkPhysicalDeviceVulkan11Features vk11_features;
  1590. vk11_features.pNext = nullptr;
  1591. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1592. device_features2.pNext = &vk11_features;
  1593. VkPhysicalDeviceVulkan12Features vk12_features;
  1594. vk12_features.pNext = nullptr;
  1595. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1596. vk11_features.pNext = &vk12_features;
  1597. vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2);
  1598. device->fp16 = device->fp16 && vk12_features.shaderFloat16;
  1599. if (!vk11_features.storageBuffer16BitAccess) {
  1600. std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl;
  1601. throw std::runtime_error("Unsupported device");
  1602. }
  1603. device_extensions.push_back("VK_KHR_16bit_storage");
  1604. #ifdef GGML_VULKAN_VALIDATE
  1605. device_extensions.push_back("VK_KHR_shader_non_semantic_info");
  1606. #endif
  1607. if (device->fp16) {
  1608. device_extensions.push_back("VK_KHR_shader_float16_int8");
  1609. }
  1610. device->name = GGML_VK_NAME + std::to_string(idx);
  1611. device_create_info = {
  1612. vk::DeviceCreateFlags(),
  1613. device_queue_create_infos,
  1614. {},
  1615. device_extensions
  1616. };
  1617. device_create_info.setPNext(&device_features2);
  1618. device->device = device->physical_device.createDevice(device_create_info);
  1619. // Queues
  1620. ggml_vk_create_queue(device, device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer }, false);
  1621. // Shaders
  1622. ggml_vk_load_shaders(device);
  1623. if (!device->single_queue) {
  1624. const uint32_t transfer_queue_index = compute_queue_family_index == transfer_queue_family_index ? 1 : 0;
  1625. ggml_vk_create_queue(device, device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer }, true);
  1626. } else {
  1627. // TODO: Use pointer or reference to avoid copy
  1628. device->transfer_queue = device->compute_queue;
  1629. }
  1630. device->buffer_type = {
  1631. /* .iface = */ ggml_backend_vk_buffer_type_interface,
  1632. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), idx),
  1633. /* .context = */ new ggml_backend_vk_buffer_type_context{ device->name, device },
  1634. };
  1635. device->fence = device->device.createFence({});
  1636. device->idx = idx;
  1637. return device;
  1638. }
  1639. return vk_instance.devices[idx];
  1640. }
  1641. static void ggml_vk_print_gpu_info(size_t idx) {
  1642. GGML_ASSERT(idx < vk_instance.device_indices.size());
  1643. size_t dev_num = vk_instance.device_indices[idx];
  1644. VK_LOG_DEBUG("ggml_vk_print_gpu_info(" << dev_num << ")");
  1645. GGML_ASSERT(vk_instance_initialized);
  1646. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1647. if (dev_num >= devices.size()) {
  1648. std::cerr << "ggml_vulkan: Device with index " << dev_num << " does not exist." << std::endl;
  1649. throw std::runtime_error("Device not found");
  1650. }
  1651. vk::PhysicalDevice physical_device = devices[dev_num];
  1652. std::vector<vk::ExtensionProperties> ext_props = physical_device.enumerateDeviceExtensionProperties();
  1653. vk::PhysicalDeviceProperties2 props2;
  1654. vk::PhysicalDeviceMaintenance3Properties props3;
  1655. vk::PhysicalDeviceSubgroupProperties subgroup_props;
  1656. vk::PhysicalDeviceDriverProperties driver_props;
  1657. props2.pNext = &props3;
  1658. props3.pNext = &subgroup_props;
  1659. subgroup_props.pNext = &driver_props;
  1660. physical_device.getProperties2(&props2);
  1661. const size_t subgroup_size = subgroup_props.subgroupSize;
  1662. const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
  1663. bool fp16_storage = false;
  1664. bool fp16_compute = false;
  1665. for (auto properties : ext_props) {
  1666. if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) {
  1667. fp16_storage = true;
  1668. } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
  1669. fp16_compute = true;
  1670. }
  1671. }
  1672. const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16");
  1673. bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr;
  1674. bool fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
  1675. vk::PhysicalDeviceFeatures device_features = physical_device.getFeatures();
  1676. VkPhysicalDeviceFeatures2 device_features2;
  1677. device_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
  1678. device_features2.pNext = nullptr;
  1679. device_features2.features = (VkPhysicalDeviceFeatures)device_features;
  1680. VkPhysicalDeviceVulkan11Features vk11_features;
  1681. vk11_features.pNext = nullptr;
  1682. vk11_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
  1683. device_features2.pNext = &vk11_features;
  1684. VkPhysicalDeviceVulkan12Features vk12_features;
  1685. vk12_features.pNext = nullptr;
  1686. vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
  1687. vk11_features.pNext = &vk12_features;
  1688. vkGetPhysicalDeviceFeatures2(physical_device, &device_features2);
  1689. fp16 = fp16 && vk12_features.shaderFloat16;
  1690. std::string device_name = props2.properties.deviceName.data();
  1691. std::cerr << GGML_VK_NAME << idx << ": " << device_name << " (" << driver_props.driverName << ") | uma: " << uma << " | fp16: " << fp16 << " | warp size: " << subgroup_size << std::endl;
  1692. if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) {
  1693. std::cerr << "ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want." << std::endl;
  1694. }
  1695. }
  1696. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  1697. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions);
  1698. void ggml_vk_instance_init() {
  1699. if (vk_instance_initialized) {
  1700. return;
  1701. }
  1702. VK_LOG_DEBUG("ggml_vk_instance_init()");
  1703. vk_instance_initialized = true;
  1704. vk::ApplicationInfo app_info{ "ggml-vulkan", 1, nullptr, 0, VK_API_VERSION };
  1705. const std::vector<vk::ExtensionProperties> instance_extensions = vk::enumerateInstanceExtensionProperties();
  1706. const bool validation_ext = ggml_vk_instance_validation_ext_available(instance_extensions);
  1707. #ifdef __APPLE__
  1708. const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions);
  1709. #endif
  1710. std::vector<const char*> layers;
  1711. if (validation_ext) {
  1712. layers.push_back("VK_LAYER_KHRONOS_validation");
  1713. }
  1714. std::vector<const char*> extensions;
  1715. if (validation_ext) {
  1716. extensions.push_back("VK_EXT_validation_features");
  1717. }
  1718. #ifdef __APPLE__
  1719. if (portability_enumeration_ext) {
  1720. extensions.push_back("VK_KHR_portability_enumeration");
  1721. }
  1722. #endif
  1723. vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions);
  1724. #ifdef __APPLE__
  1725. if (portability_enumeration_ext) {
  1726. instance_create_info.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
  1727. }
  1728. #endif
  1729. std::vector<vk::ValidationFeatureEnableEXT> features_enable;
  1730. vk::ValidationFeaturesEXT validation_features;
  1731. if (validation_ext) {
  1732. features_enable = { vk::ValidationFeatureEnableEXT::eBestPractices };
  1733. validation_features = {
  1734. features_enable,
  1735. {},
  1736. };
  1737. validation_features.setPNext(nullptr);
  1738. instance_create_info.setPNext(&validation_features);
  1739. std::cerr << "ggml_vulkan: Validation layers enabled" << std::endl;
  1740. }
  1741. vk_instance.instance = vk::createInstance(instance_create_info);
  1742. size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size();
  1743. // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan
  1744. char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES");
  1745. if (devices_env != nullptr) {
  1746. std::string devices(devices_env);
  1747. std::replace(devices.begin(), devices.end(), ',', ' ');
  1748. std::stringstream ss(devices);
  1749. size_t tmp;
  1750. while (ss >> tmp) {
  1751. if(tmp >= num_available_devices) {
  1752. std::cerr << "ggml_vulkan: Invalid device index " << tmp << " in GGML_VK_VISIBLE_DEVICES." << std::endl;
  1753. throw std::runtime_error("Invalid Vulkan device index");
  1754. }
  1755. vk_instance.device_indices.push_back(tmp);
  1756. }
  1757. } else {
  1758. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  1759. // Make sure at least one device exists
  1760. if (devices.empty()) {
  1761. std::cerr << "ggml_vulkan: Error: No devices found." << std::endl;
  1762. GGML_ABORT("fatal error");
  1763. }
  1764. // Default to using all dedicated GPUs
  1765. for (size_t i = 0; i < devices.size(); i++) {
  1766. vk::PhysicalDeviceProperties2 new_props;
  1767. vk::PhysicalDeviceDriverProperties new_driver;
  1768. vk::PhysicalDeviceIDProperties new_id;
  1769. new_props.pNext = &new_driver;
  1770. new_driver.pNext = &new_id;
  1771. devices[i].getProperties2(&new_props);
  1772. if (new_props.properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu) {
  1773. // Check if there are two physical devices corresponding to the same GPU
  1774. auto old_device = std::find_if(
  1775. vk_instance.device_indices.begin(),
  1776. vk_instance.device_indices.end(),
  1777. [&devices, &new_id](const size_t k){
  1778. vk::PhysicalDeviceProperties2 old_props;
  1779. vk::PhysicalDeviceIDProperties old_id;
  1780. old_props.pNext = &old_id;
  1781. devices[k].getProperties2(&old_props);
  1782. return std::equal(std::begin(old_id.deviceUUID), std::end(old_id.deviceUUID), std::begin(new_id.deviceUUID));
  1783. }
  1784. );
  1785. if (old_device == vk_instance.device_indices.end()) {
  1786. vk_instance.device_indices.push_back(i);
  1787. } else {
  1788. // There can be two physical devices corresponding to the same GPU if there are 2 different drivers
  1789. // This can cause error when splitting layers aross the devices, need to keep only 1
  1790. VK_LOG_DEBUG("Device " << i << " and device " << *old_device << " have the same deviceUUID");
  1791. vk::PhysicalDeviceProperties2 old_props;
  1792. vk::PhysicalDeviceDriverProperties old_driver;
  1793. old_props.pNext = &old_driver;
  1794. devices[*old_device].getProperties2(&old_props);
  1795. std::map<vk::DriverId, int> driver_priorities {};
  1796. int old_priority = std::numeric_limits<int>::max();
  1797. int new_priority = std::numeric_limits<int>::max();
  1798. // Check https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDriverId.html for the list of driver id
  1799. // Smaller number -> higher priority
  1800. switch (old_props.properties.vendorID) {
  1801. case VK_VENDOR_ID_AMD:
  1802. driver_priorities[vk::DriverId::eMesaRadv] = 1;
  1803. driver_priorities[vk::DriverId::eAmdOpenSource] = 2;
  1804. driver_priorities[vk::DriverId::eAmdProprietary] = 3;
  1805. break;
  1806. case VK_VENDOR_ID_INTEL:
  1807. driver_priorities[vk::DriverId::eIntelOpenSourceMESA] = 1;
  1808. driver_priorities[vk::DriverId::eIntelProprietaryWindows] = 2;
  1809. break;
  1810. case VK_VENDOR_ID_NVIDIA:
  1811. driver_priorities[vk::DriverId::eNvidiaProprietary] = 1;
  1812. #if defined(VK_API_VERSION_1_3) && VK_HEADER_VERSION >= 235
  1813. driver_priorities[vk::DriverId::eMesaNvk] = 2;
  1814. #endif
  1815. break;
  1816. }
  1817. if (driver_priorities.count(old_driver.driverID)) {
  1818. old_priority = driver_priorities[old_driver.driverID];
  1819. }
  1820. if (driver_priorities.count(new_driver.driverID)) {
  1821. new_priority = driver_priorities[new_driver.driverID];
  1822. }
  1823. if (new_priority < old_priority) {
  1824. auto r = std::remove(vk_instance.device_indices.begin(), vk_instance.device_indices.end(), *old_device);
  1825. vk_instance.device_indices.erase(r, vk_instance.device_indices.end());
  1826. vk_instance.device_indices.push_back(i);
  1827. VK_LOG_DEBUG("Prioritize device " << i << " driver " << new_driver.driverName << " over device " << *old_device << " driver " << old_driver.driverName);
  1828. }
  1829. else {
  1830. VK_LOG_DEBUG("Prioritize device " << *old_device << " driver " << old_driver.driverName << " over device " << i << " driver " << new_driver.driverName << std::endl);
  1831. }
  1832. }
  1833. }
  1834. }
  1835. // If no dedicated GPUs found, fall back to GPU 0
  1836. if (vk_instance.device_indices.empty()) {
  1837. vk_instance.device_indices.push_back(0);
  1838. }
  1839. }
  1840. std::cerr << "ggml_vulkan: Found " << vk_instance.device_indices.size() << " Vulkan devices:" << std::endl;
  1841. for (size_t i = 0; i < vk_instance.device_indices.size(); i++) {
  1842. ggml_vk_print_gpu_info(i);
  1843. }
  1844. }
  1845. static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
  1846. VK_LOG_DEBUG("ggml_vk_init(" << ctx->name << ", " << idx << ")");
  1847. ggml_vk_instance_init();
  1848. GGML_ASSERT(idx < vk_instance.device_indices.size());
  1849. ctx->name = GGML_VK_NAME + std::to_string(idx);
  1850. ctx->device = ggml_vk_get_device(idx);
  1851. ctx->semaphore_idx = 0;
  1852. ctx->event_idx = 0;
  1853. ctx->prealloc_size_x = 0;
  1854. ctx->prealloc_size_y = 0;
  1855. ctx->prealloc_size_split_k = 0;
  1856. ctx->fence = ctx->device->device.createFence({});
  1857. #ifdef GGML_VULKAN_CHECK_RESULTS
  1858. const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS");
  1859. vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks));
  1860. const char* output_tensor = getenv("GGML_VULKAN_OUTPUT_TENSOR");
  1861. vk_output_tensor = (output_tensor == NULL ? 0 : atoi(output_tensor));
  1862. #endif
  1863. }
  1864. static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type type) {
  1865. VK_LOG_DEBUG("ggml_vk_get_to_fp16()");
  1866. switch (type) {
  1867. case GGML_TYPE_F32:
  1868. case GGML_TYPE_Q4_0:
  1869. case GGML_TYPE_Q4_1:
  1870. case GGML_TYPE_Q5_0:
  1871. case GGML_TYPE_Q5_1:
  1872. case GGML_TYPE_Q8_0:
  1873. case GGML_TYPE_Q2_K:
  1874. case GGML_TYPE_Q3_K:
  1875. case GGML_TYPE_Q4_K:
  1876. case GGML_TYPE_Q5_K:
  1877. case GGML_TYPE_Q6_K:
  1878. case GGML_TYPE_IQ4_NL:
  1879. break;
  1880. default:
  1881. return nullptr;
  1882. }
  1883. return ctx->device->pipeline_dequant[type];
  1884. }
  1885. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) {
  1886. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline(" << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")");
  1887. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  1888. return ctx->device->pipeline_matmul_f32;
  1889. }
  1890. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  1891. return ctx->device->pipeline_matmul_f32_f16;
  1892. }
  1893. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  1894. return ctx->device->pipeline_matmul_f16_f32;
  1895. }
  1896. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  1897. return ctx->device->pipeline_matmul_f16;
  1898. }
  1899. if (src1_type != GGML_TYPE_F32) {
  1900. return nullptr;
  1901. }
  1902. switch (src0_type) {
  1903. case GGML_TYPE_Q4_0:
  1904. case GGML_TYPE_Q4_1:
  1905. case GGML_TYPE_Q5_0:
  1906. case GGML_TYPE_Q5_1:
  1907. case GGML_TYPE_Q8_0:
  1908. case GGML_TYPE_Q2_K:
  1909. case GGML_TYPE_Q3_K:
  1910. case GGML_TYPE_Q4_K:
  1911. case GGML_TYPE_Q5_K:
  1912. case GGML_TYPE_Q6_K:
  1913. case GGML_TYPE_IQ4_NL:
  1914. break;
  1915. default:
  1916. return nullptr;
  1917. }
  1918. return ctx->device->pipeline_dequant_mul_mat_mat[src0_type];
  1919. }
  1920. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  1921. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  1922. GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16);
  1923. switch (a_type) {
  1924. case GGML_TYPE_F32:
  1925. case GGML_TYPE_F16:
  1926. case GGML_TYPE_Q4_0:
  1927. case GGML_TYPE_Q4_1:
  1928. case GGML_TYPE_Q5_0:
  1929. case GGML_TYPE_Q5_1:
  1930. case GGML_TYPE_Q8_0:
  1931. case GGML_TYPE_Q2_K:
  1932. case GGML_TYPE_Q3_K:
  1933. case GGML_TYPE_Q4_K:
  1934. case GGML_TYPE_Q5_K:
  1935. case GGML_TYPE_Q6_K:
  1936. case GGML_TYPE_IQ4_NL:
  1937. break;
  1938. default:
  1939. return nullptr;
  1940. }
  1941. return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type];
  1942. }
  1943. static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) {
  1944. VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()");
  1945. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  1946. return ctx->device->pipeline_matmul_id_f32;
  1947. }
  1948. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
  1949. return ctx->device->pipeline_matmul_id_f16_f32;
  1950. }
  1951. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  1952. return ctx->device->pipeline_matmul_id_f16;
  1953. }
  1954. GGML_ASSERT(src1_type == GGML_TYPE_F32);
  1955. switch (src0_type) {
  1956. case GGML_TYPE_Q4_0:
  1957. case GGML_TYPE_Q4_1:
  1958. case GGML_TYPE_Q5_0:
  1959. case GGML_TYPE_Q5_1:
  1960. case GGML_TYPE_Q8_0:
  1961. case GGML_TYPE_Q2_K:
  1962. case GGML_TYPE_Q3_K:
  1963. case GGML_TYPE_Q4_K:
  1964. case GGML_TYPE_Q5_K:
  1965. case GGML_TYPE_Q6_K:
  1966. case GGML_TYPE_IQ4_NL:
  1967. break;
  1968. default:
  1969. return nullptr;
  1970. }
  1971. return ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type];
  1972. }
  1973. static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) {
  1974. VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()");
  1975. GGML_ASSERT(b_type == GGML_TYPE_F32);
  1976. switch (a_type) {
  1977. case GGML_TYPE_F32:
  1978. case GGML_TYPE_F16:
  1979. case GGML_TYPE_Q4_0:
  1980. case GGML_TYPE_Q4_1:
  1981. case GGML_TYPE_Q5_0:
  1982. case GGML_TYPE_Q5_1:
  1983. case GGML_TYPE_Q8_0:
  1984. case GGML_TYPE_Q2_K:
  1985. case GGML_TYPE_Q3_K:
  1986. case GGML_TYPE_Q4_K:
  1987. case GGML_TYPE_Q5_K:
  1988. case GGML_TYPE_Q6_K:
  1989. case GGML_TYPE_IQ4_NL:
  1990. break;
  1991. default:
  1992. return nullptr;
  1993. }
  1994. return ctx->device->pipeline_dequant_mul_mat_vec_id_f32[a_type];
  1995. }
  1996. static vk_buffer ggml_vk_pool_malloc(ggml_backend_vk_context * ctx, size_t size) {
  1997. VK_LOG_DEBUG("ggml_vk_pool_malloc(" << size << ")");
  1998. VK_LOG_MEMORY("ggml_vk_pool_malloc");
  1999. int best_i = -1;
  2000. size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
  2001. int worst_i = -1;
  2002. size_t worst_size = 0; //largest unused buffer seen so far
  2003. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  2004. vk_buffer &b = ctx->buffer_pool[i];
  2005. if (b != nullptr && b->size >= size && b->size < best_size) {
  2006. best_i = i;
  2007. best_size = b->size;
  2008. }
  2009. if (b != nullptr && b->size > worst_size) {
  2010. worst_i = i;
  2011. worst_size = b->size;
  2012. }
  2013. }
  2014. if(best_i != -1) {
  2015. //found the smallest buffer that fits our needs
  2016. vk_buffer b = ctx->buffer_pool[best_i];
  2017. ctx->buffer_pool[best_i].reset();
  2018. return b;
  2019. }
  2020. if(worst_i != -1) {
  2021. //no buffer that fits our needs, resize largest one to save memory
  2022. vk_buffer& b = ctx->buffer_pool[worst_i];
  2023. ggml_vk_destroy_buffer(b);
  2024. }
  2025. return ggml_vk_create_buffer_device(ctx->device, size);
  2026. }
  2027. static void ggml_vk_pool_free(ggml_backend_vk_context * ctx, vk_buffer& buffer) {
  2028. VK_LOG_DEBUG("ggml_vk_pool_free(" << buffer->size << ")");
  2029. for (int i = 0; i < MAX_VK_BUFFERS; ++i) {
  2030. vk_buffer& b = ctx->buffer_pool[i];
  2031. if (b == nullptr) {
  2032. b = buffer;
  2033. return;
  2034. }
  2035. }
  2036. std::cerr << "ggml_vulkan: WARNING: vk buffer pool full, increase MAX_VK_BUFFERS" << std::endl;
  2037. ggml_vk_destroy_buffer(buffer);
  2038. }
  2039. // Returns an available temporary buffer that may only be used temporarily, it will be reused
  2040. static vk_buffer ggml_vk_create_buffer_temp(ggml_backend_vk_context * ctx, size_t size) {
  2041. // Try to find existing temp buffer with enough capacity
  2042. for (auto& buffer : ctx->gc.temp_buffers) {
  2043. if (buffer->size >= size) {
  2044. return buffer;
  2045. }
  2046. }
  2047. VK_LOG_MEMORY("ggml_vk_create_buffer_temp(" << size << ")");
  2048. // Otherwise create new buffer
  2049. vk_buffer buf = ggml_vk_pool_malloc(ctx, size);
  2050. ctx->gc.temp_buffers.push_back(buf);
  2051. return buf;
  2052. }
  2053. static void * ggml_vk_host_malloc(vk_device& device, size_t size) {
  2054. VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")");
  2055. vk_buffer buf = ggml_vk_create_buffer(device, size,
  2056. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  2057. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  2058. if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
  2059. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
  2060. size/1024.0/1024.0);
  2061. device->device.freeMemory(buf->device_memory);
  2062. device->device.destroyBuffer(buf->buffer);
  2063. return nullptr;
  2064. }
  2065. device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf));
  2066. return buf->ptr;
  2067. }
  2068. static void ggml_vk_host_free(vk_device& device, void* ptr) {
  2069. if (ptr == nullptr) {
  2070. return;
  2071. }
  2072. VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")");
  2073. vk_buffer buf;
  2074. size_t index;
  2075. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  2076. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  2077. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  2078. if (ptr >= addr && ptr < endr) {
  2079. buf = std::get<2>(device->pinned_memory[i]);
  2080. index = i;
  2081. break;
  2082. }
  2083. }
  2084. if (buf == nullptr) {
  2085. fprintf(stderr, "WARNING: failed to free pinned memory: memory not in map\n");
  2086. return;
  2087. }
  2088. ggml_vk_destroy_buffer(buf);
  2089. device->pinned_memory.erase(device->pinned_memory.begin() + index);
  2090. }
  2091. static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) {
  2092. buf = nullptr;
  2093. buf_offset = 0;
  2094. for (size_t i = 0; i < device->pinned_memory.size(); i++) {
  2095. const uint8_t* addr = (const uint8_t*) std::get<0>(device->pinned_memory[i]);
  2096. const uint8_t* endr = addr + std::get<1>(device->pinned_memory[i]);
  2097. if (ptr >= addr && ptr < endr) {
  2098. buf = std::get<2>(device->pinned_memory[i]);
  2099. buf_offset = ((const uint8_t *)ptr) - addr;
  2100. break;
  2101. }
  2102. }
  2103. }
  2104. static vk_submission ggml_vk_begin_submission(vk_device& device, vk_queue& q, bool one_time = true) {
  2105. vk_submission s;
  2106. s.buffer = ggml_vk_create_cmd_buffer(device, q);
  2107. if (one_time) {
  2108. s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
  2109. } else {
  2110. s.buffer.begin({ vk::CommandBufferUsageFlags{} });
  2111. }
  2112. return s;
  2113. }
  2114. static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& subctx, vk_pipeline& pipeline, std::initializer_list<vk::DescriptorBufferInfo> const& descriptor_buffer_infos, size_t push_constant_size, const void* push_constants, std::array<uint32_t, 3> elements) {
  2115. const uint32_t wg0 = CEIL_DIV(elements[0], pipeline->wg_denoms[0]);
  2116. const uint32_t wg1 = CEIL_DIV(elements[1], pipeline->wg_denoms[1]);
  2117. const uint32_t wg2 = CEIL_DIV(elements[2], pipeline->wg_denoms[2]);
  2118. VK_LOG_DEBUG("ggml_vk_dispatch_pipeline(" << pipeline->name << ", {";
  2119. for (auto& buffer : descriptor_buffer_infos) {
  2120. std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.range << "), ";
  2121. }
  2122. std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))");
  2123. GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size());
  2124. GGML_ASSERT(descriptor_buffer_infos.size() == pipeline->parameter_count);
  2125. vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++];
  2126. vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() };
  2127. ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {});
  2128. subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size, push_constants);
  2129. subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline);
  2130. subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute,
  2131. pipeline->layout,
  2132. 0,
  2133. { descriptor_set },
  2134. {});
  2135. subctx->s->buffer.dispatch(wg0, wg1, wg2);
  2136. }
  2137. static void ggml_vk_end_submission(vk_submission& s, std::vector<vk_semaphore> wait_semaphores, std::vector<vk_semaphore> signal_semaphores) {
  2138. s.buffer.end();
  2139. s.wait_semaphores = std::move(wait_semaphores);
  2140. s.signal_semaphores = std::move(signal_semaphores);
  2141. }
  2142. static void ggml_vk_ctx_end(vk_context& ctx) {
  2143. VK_LOG_DEBUG("ggml_vk_ctx_end(" << ctx << ", " << ctx->seqs.size() << ")");
  2144. if (ctx->s == nullptr) {
  2145. return;
  2146. }
  2147. ctx->s->buffer.end();
  2148. ctx->s = nullptr;
  2149. }
  2150. static void ggml_vk_ctx_begin(vk_device& device, vk_context& subctx) {
  2151. VK_LOG_DEBUG("ggml_vk_ctx_begin(" << device->name << ")");
  2152. if (subctx->s != nullptr) {
  2153. ggml_vk_ctx_end(subctx);
  2154. }
  2155. subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->q) });
  2156. subctx->s = subctx->seqs[subctx->seqs.size() - 1].data();
  2157. }
  2158. static size_t ggml_vk_align_size(size_t width, size_t align) {
  2159. VK_LOG_DEBUG("ggml_vk_align_size(" << width << ", " << align << ")");
  2160. return CEIL_DIV(width, align) * align;
  2161. }
  2162. static void deferred_memcpy(void * dst, const void * src, size_t size, std::vector<vk_staging_memcpy>* memcpys = nullptr) {
  2163. if (memcpys == nullptr) {
  2164. memcpy(dst, src, size);
  2165. } else {
  2166. memcpys->emplace_back(dst, src, size);
  2167. }
  2168. }
  2169. static void ggml_vk_ensure_sync_staging_buffer(vk_device& device, size_t size) {
  2170. if (device->sync_staging == nullptr || device->sync_staging->size < size) {
  2171. VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")");
  2172. ggml_vk_destroy_buffer(device->sync_staging);
  2173. device->sync_staging = ggml_vk_create_buffer_check(device, size,
  2174. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
  2175. vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
  2176. }
  2177. }
  2178. static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context& subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) {
  2179. VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")");
  2180. GGML_ASSERT(!ggml_is_contiguous(tensor));
  2181. // Buffer is already mapped
  2182. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2183. std::cerr << "ggml_vulkan: buffer_write_nc_async dst buffer is host_visible. Use synchronous write." << std::endl;
  2184. GGML_ABORT("fatal error");
  2185. }
  2186. // Check if src is pinned memory
  2187. vk_buffer buf;
  2188. size_t buf_offset;
  2189. ggml_vk_host_get(ctx->device, tensor->data, buf, buf_offset);
  2190. const uint64_t ne0 = tensor->ne[0];
  2191. const uint64_t ne1 = tensor->ne[1];
  2192. const uint64_t ne2 = tensor->ne[2];
  2193. const uint64_t ne3 = tensor->ne[3];
  2194. const uint64_t nb0 = tensor->nb[0];
  2195. const uint64_t nb1 = tensor->nb[1];
  2196. const uint64_t nb2 = tensor->nb[2];
  2197. const uint64_t nb3 = tensor->nb[3];
  2198. const ggml_type type = tensor->type;
  2199. const uint64_t ts = ggml_type_size(type);
  2200. const uint64_t bs = ggml_blck_size(type);
  2201. const uint64_t dstnb0 = ts;
  2202. const uint64_t dstnb1 = dstnb0*(ne0/bs);
  2203. const uint64_t dstnb2 = dstnb1*ne1;
  2204. const uint64_t dstnb3 = dstnb2*ne2;
  2205. const uint64_t ne = ggml_nelements(tensor);
  2206. if (buf != nullptr) {
  2207. // Memory is pinned, use as staging buffer
  2208. std::vector<vk::BufferCopy> slices;
  2209. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  2210. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  2211. // Find longest contiguous slice
  2212. if (ne1*nb1 == dstnb2) {
  2213. slices.push_back({ buf_offset + i3*nb3 + i2*nb2, offset + i3*dstnb3 + i2*dstnb2, dstnb2 });
  2214. } else {
  2215. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  2216. if (ne0*nb0/bs == dstnb1) {
  2217. slices.push_back({ buf_offset + i3*nb3 + i2*nb2 + i1*nb1, offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, dstnb1 });
  2218. } else {
  2219. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  2220. const uint64_t d_off = offset + i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  2221. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  2222. slices.push_back({ s_off + i1*nb0, d_off + i0*dstnb0, dstnb0 });
  2223. }
  2224. }
  2225. }
  2226. }
  2227. }
  2228. }
  2229. ggml_vk_sync_buffers(subctx);
  2230. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  2231. return;
  2232. }
  2233. if (!sync_staging) {
  2234. GGML_ABORT("Asynchronous write to non-pinned memory not supported");
  2235. }
  2236. // Staging buffer required
  2237. vk_buffer& staging = ctx->device->sync_staging;
  2238. const uint64_t copy_size = ts*ne/bs;
  2239. ggml_vk_ensure_sync_staging_buffer(ctx->device, copy_size);
  2240. VkBufferCopy buf_copy{ 0, offset, copy_size };
  2241. ggml_vk_sync_buffers(subctx);
  2242. vkCmdCopyBuffer(subctx->s->buffer, staging->buffer, dst->buffer, 1, &buf_copy);
  2243. for (uint64_t i3 = 0; i3 < ne3; i3++) {
  2244. for (uint64_t i2 = 0; i2 < ne2; i2++) {
  2245. // Find longest contiguous slice
  2246. if (ne1*nb1 == dstnb2) {
  2247. deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2, dstnb2, &subctx->in_memcpys);
  2248. } else {
  2249. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  2250. if (ne0*nb0/bs == dstnb1) {
  2251. deferred_memcpy((uint8_t *)staging->ptr + i3*dstnb3 + i2*dstnb2 + i1*dstnb1, (const uint8_t *) tensor->data + buf_offset + i3*nb3 + i2*nb2 + i1*nb1, dstnb1, &subctx->in_memcpys);
  2252. } else {
  2253. const uint64_t s_off = buf_offset + i3*nb3 + i2*nb2 + i1*nb1;
  2254. const uint64_t d_off = i3*dstnb3 + i2*dstnb2 + i1*dstnb1;
  2255. for (uint64_t i0 = 0; i0 < ne0; i0++) {
  2256. deferred_memcpy((uint8_t *)staging->ptr + d_off + i0*dstnb0, (const uint8_t *) tensor->data + s_off + i0*nb0, dstnb0, &subctx->in_memcpys);
  2257. }
  2258. }
  2259. }
  2260. }
  2261. }
  2262. }
  2263. }
  2264. static void ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height, bool sync_staging = false) {
  2265. VK_LOG_DEBUG("ggml_vk_buffer_write_2d_async(" << width << ", " << height << ")");
  2266. // Buffer is already mapped
  2267. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2268. std::cerr << "ggml_vulkan: buffer_write_async dst buffer is host_visible. Use synchronous write." << std::endl;
  2269. GGML_ABORT("fatal error");
  2270. }
  2271. // Check if src is pinned memory
  2272. vk_buffer buf = nullptr;
  2273. size_t buf_offset;
  2274. ggml_vk_host_get(dst->device, src, buf, buf_offset);
  2275. if (buf != nullptr) {
  2276. // Memory is pinned, use as staging buffer
  2277. std::vector<vk::BufferCopy> slices(1);
  2278. if (width == spitch) {
  2279. // Only do single write if stride is equal
  2280. slices[0].srcOffset = buf_offset;
  2281. slices[0].dstOffset = offset;
  2282. slices[0].size = width * height;
  2283. } else {
  2284. slices.resize(height);
  2285. for (size_t i = 0; i < height; i++) {
  2286. slices[i].srcOffset = buf_offset + i * spitch;
  2287. slices[i].dstOffset = offset + i * width;
  2288. slices[i].size = width;
  2289. }
  2290. }
  2291. ggml_vk_sync_buffers(subctx);
  2292. subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices);
  2293. return;
  2294. }
  2295. VK_LOG_DEBUG("STAGING");
  2296. if (!sync_staging) {
  2297. GGML_ABORT("Asynchronous write to non-pinned memory not supported");
  2298. }
  2299. // Staging buffer required
  2300. const size_t copy_size = width*height;
  2301. ggml_vk_ensure_sync_staging_buffer(dst->device, copy_size);
  2302. vk_buffer& staging_buffer = dst->device->sync_staging;
  2303. VkBufferCopy buf_copy = {
  2304. 0,
  2305. offset,
  2306. copy_size};
  2307. ggml_vk_sync_buffers(subctx);
  2308. vkCmdCopyBuffer(subctx->s->buffer, staging_buffer->buffer, dst->buffer, 1, &buf_copy);
  2309. if (width == spitch) {
  2310. deferred_memcpy((uint8_t *)staging_buffer->ptr, src, width * height, &subctx->in_memcpys);
  2311. } else {
  2312. for (size_t i = 0; i < height; i++) {
  2313. deferred_memcpy((uint8_t *)staging_buffer->ptr + i * width, (const uint8_t *) src + i * spitch, width, &subctx->in_memcpys);
  2314. }
  2315. }
  2316. }
  2317. static void ggml_vk_buffer_write_async(vk_context subctx, vk_buffer& dst, size_t offset, const void * src, size_t size, bool sync_staging = false) {
  2318. VK_LOG_DEBUG("ggml_vk_buffer_write_async(" << size << ")");
  2319. return ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, size, size, 1, sync_staging);
  2320. }
  2321. static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * src, size_t spitch, size_t width, size_t height) {
  2322. VK_LOG_DEBUG("ggml_vk_buffer_write_2d(" << width << ", " << height << ")");
  2323. // Buffer is already mapped
  2324. if(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
  2325. GGML_ASSERT(dst->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  2326. for (size_t i = 0; i < height; i++) {
  2327. memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width);
  2328. }
  2329. } else {
  2330. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue);
  2331. ggml_vk_ctx_begin(dst->device, subctx);
  2332. ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, spitch, width, height, true);
  2333. ggml_vk_ctx_end(subctx);
  2334. for (auto& cpy : subctx->in_memcpys) {
  2335. memcpy(cpy.dst, cpy.src, cpy.n);
  2336. }
  2337. ggml_vk_submit(subctx, dst->device->fence);
  2338. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences");
  2339. dst->device->device.resetFences({ dst->device->fence });
  2340. }
  2341. }
  2342. static void ggml_vk_buffer_write(vk_buffer& dst, size_t offset, const void * src, size_t size) {
  2343. VK_LOG_DEBUG("ggml_vk_buffer_write(" << size << ")");
  2344. ggml_vk_buffer_write_2d(dst, offset, src, 0, size, 1);
  2345. }
  2346. static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) {
  2347. VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")");
  2348. GGML_ASSERT(width > 0);
  2349. GGML_ASSERT(height > 0);
  2350. GGML_ASSERT(src != nullptr);
  2351. // TODO: staging_offset is not used
  2352. // Check if dst is pinned memory
  2353. vk_buffer buf = nullptr;
  2354. size_t buf_offset;
  2355. ggml_vk_host_get(src->device, dst, buf, buf_offset);
  2356. std::vector<vk::BufferCopy> slices(1);
  2357. if (width == spitch && width == dpitch) {
  2358. // Only do single write if stride is equal
  2359. slices[0].srcOffset = offset;
  2360. slices[0].dstOffset = buf_offset;
  2361. slices[0].size = width * height;
  2362. } else {
  2363. slices.resize(height);
  2364. for (size_t i = 0; i < height; i++) {
  2365. slices[i].srcOffset = offset + i * spitch;
  2366. slices[i].dstOffset = buf_offset + i * dpitch;
  2367. slices[i].size = width;
  2368. }
  2369. }
  2370. if (buf != nullptr) {
  2371. // Memory is pinned, use as staging buffer
  2372. ggml_vk_sync_buffers(subctx);
  2373. subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices);
  2374. return;
  2375. }
  2376. VK_LOG_DEBUG("STAGING");
  2377. if (!sync_staging) {
  2378. GGML_ABORT("Asynchronous read from non-pinned memory not supported");
  2379. }
  2380. // Fall back to staging buffer
  2381. const size_t copy_size = dpitch * height;
  2382. ggml_vk_ensure_sync_staging_buffer(src->device, copy_size);
  2383. vk_buffer& staging_buffer = src->device->sync_staging;
  2384. ggml_vk_sync_buffers(subctx);
  2385. subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices);
  2386. deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys);
  2387. }
  2388. static void ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) {
  2389. return ggml_vk_buffer_read_2d_async(subctx, src, offset, dst, size, size, size, 1, sync_staging);
  2390. }
  2391. static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_t size) {
  2392. VK_LOG_DEBUG("ggml_vk_buffer_read(" << src->buffer << ", " << offset << ", " << size << ")");
  2393. // If the device is not an UMA device the memory is host-accessible through rebar. While writing
  2394. // through PCIe is sufficient fast reading back data from PCIe is slower than going through
  2395. // the HW device to host copy path.
  2396. if(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible && src->device->uma) {
  2397. GGML_ASSERT(src->memory_property_flags & vk::MemoryPropertyFlagBits::eHostCoherent);
  2398. memcpy(dst, (uint8_t *) src->ptr + offset, size);
  2399. } else {
  2400. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue);
  2401. ggml_vk_ctx_begin(src->device, subctx);
  2402. ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true);
  2403. ggml_vk_ctx_end(subctx);
  2404. ggml_vk_submit(subctx, src->device->fence);
  2405. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences");
  2406. src->device->device.resetFences({ src->device->fence });
  2407. for (auto& cpy : subctx->out_memcpys) {
  2408. memcpy(cpy.dst, cpy.src, cpy.n);
  2409. }
  2410. }
  2411. }
  2412. static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  2413. VK_LOG_DEBUG("ggml_vk_buffer_copy_async(" << size << ")");
  2414. // Make sure both buffers are on same device
  2415. GGML_ASSERT(src->device == dst->device);
  2416. VkBufferCopy bc{ src_offset, dst_offset, size };
  2417. vkCmdCopyBuffer(ctx->s->buffer, src->buffer, dst->buffer, 1, &bc);
  2418. }
  2419. static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) {
  2420. if (src->device == dst->device) {
  2421. VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")");
  2422. // Copy within the device
  2423. vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue);
  2424. ggml_vk_ctx_begin(src->device, subctx);
  2425. ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size);
  2426. ggml_vk_ctx_end(subctx);
  2427. ggml_vk_submit(subctx, src->device->fence);
  2428. VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences");
  2429. src->device->device.resetFences({ src->device->fence });
  2430. } else {
  2431. VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")");
  2432. // Copy device to device
  2433. ggml_vk_ensure_sync_staging_buffer(src->device, size);
  2434. ggml_vk_ensure_sync_staging_buffer(dst->device, size);
  2435. // Copy to src staging buffer
  2436. ggml_vk_buffer_copy(src->device->sync_staging, 0, src, src_offset, size);
  2437. // memcpy to dst staging buffer
  2438. memcpy(dst->device->sync_staging->ptr, src->device->sync_staging->ptr, size);
  2439. // Copy to dst buffer
  2440. ggml_vk_buffer_copy(dst, dst_offset, dst->device->sync_staging, 0, size);
  2441. }
  2442. }
  2443. static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) {
  2444. VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")");
  2445. vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue);
  2446. ggml_vk_ctx_begin(dst->device, subctx);
  2447. subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c);
  2448. ggml_vk_ctx_end(subctx);
  2449. ggml_vk_submit(subctx, dst->device->fence);
  2450. VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_memset waitForFences");
  2451. dst->device->device.resetFences({ dst->device->fence });
  2452. }
  2453. static uint32_t ggml_vk_guess_split_k(int m, int n, int k) {
  2454. VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ")");
  2455. // if (k > 128 && (m < 128 || n < 128) && m > 2 && n > 2) {
  2456. // return 4;
  2457. // }
  2458. return 1;
  2459. GGML_UNUSED(m); GGML_UNUSED(n); GGML_UNUSED(k);
  2460. }
  2461. static vk_pipeline ggml_vk_guess_matmul_pipeline_amd(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
  2462. if (m <= 32 || n <= 32) {
  2463. return aligned ? mmp->a_s : mmp->s;
  2464. }
  2465. return aligned ? mmp->a_m : mmp->m;
  2466. GGML_UNUSED(ctx);
  2467. }
  2468. static vk_pipeline ggml_vk_guess_matmul_pipeline_apple(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) {
  2469. return aligned ? mmp->a_m : mmp->m;
  2470. GGML_UNUSED(ctx);
  2471. }
  2472. static vk_pipeline ggml_vk_guess_matmul_pipeline_intel(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) {
  2473. return aligned ? mmp->a_s : mmp->s;
  2474. GGML_UNUSED(ctx);
  2475. }
  2476. static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) {
  2477. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")");
  2478. switch (ctx->device->vendor_id) {
  2479. case VK_VENDOR_ID_AMD:
  2480. return ggml_vk_guess_matmul_pipeline_amd(ctx, mmp, m, n, aligned);
  2481. case VK_VENDOR_ID_APPLE:
  2482. return ggml_vk_guess_matmul_pipeline_apple(ctx, mmp, aligned);
  2483. case VK_VENDOR_ID_INTEL:
  2484. return ggml_vk_guess_matmul_pipeline_intel(ctx, mmp, aligned);
  2485. default:
  2486. break;
  2487. }
  2488. if (m <= 32 || n <= 32) {
  2489. return aligned ? mmp->a_s : mmp->s;
  2490. }
  2491. if (m <= 64 || n <= 64) {
  2492. return aligned ? mmp->a_m : mmp->m;
  2493. }
  2494. return aligned ? mmp->a_l : mmp->l;
  2495. }
  2496. static uint32_t ggml_vk_guess_matmul_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n) {
  2497. VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")");
  2498. return ggml_vk_guess_matmul_pipeline(ctx, mmp, m, n, true)->align;
  2499. }
  2500. static void ggml_vk_matmul(
  2501. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  2502. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& split_k_buffer,
  2503. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  2504. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  2505. uint32_t split_k, uint32_t batch, uint32_t ne02, uint32_t ne12, uint32_t broadcast2, uint32_t broadcast3) {
  2506. VK_LOG_DEBUG("ggml_vk_matmul(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), split_k: (" << (split_k_buffer.buffer != nullptr ? split_k_buffer.buffer->buffer : VK_NULL_HANDLE) << ", " << split_k_buffer.offset << ", " << split_k_buffer.size << "), m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", split_k: " << split_k << ", batch: " << batch << ", ne02: " << ne02 << ", ne12: " << ne12 << ", broadcast2: " << broadcast2 << ", broadcast3: " << broadcast3 << ")");
  2507. ggml_vk_sync_buffers(subctx);
  2508. if (split_k == 1) {
  2509. const vk_mat_mat_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, k, ne02, ne12, broadcast2, broadcast3 };
  2510. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d }, sizeof(vk_mat_mat_push_constants), &pc, { m, n, batch });
  2511. return;
  2512. }
  2513. GGML_ASSERT(batch_stride_d == m * n);
  2514. const vk_mat_mat_push_constants pc1 = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d, CEIL_DIV(k, split_k), ne02, ne12, broadcast2, broadcast3 };
  2515. // Make sure enough workgroups get assigned for split k to work
  2516. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, split_k_buffer }, sizeof(vk_mat_mat_push_constants), &pc1, { (CEIL_DIV(m, pipeline->wg_denoms[0]) * pipeline->wg_denoms[0]) * split_k, n, batch });
  2517. ggml_vk_sync_buffers(subctx);
  2518. const std::array<uint32_t, 2> pc2 = { (uint32_t)(m * n * batch), split_k };
  2519. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2.size() * sizeof(uint32_t), pc2.data(), { m * n * batch, 1, 1 });
  2520. }
  2521. static void ggml_vk_matmul_id(
  2522. ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline,
  2523. vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids,
  2524. uint32_t m, uint32_t n, uint32_t k, uint32_t stride_a, uint32_t stride_b, uint32_t stride_d,
  2525. uint32_t batch_stride_a, uint32_t batch_stride_b, uint32_t batch_stride_d,
  2526. uint32_t n_as, uint32_t nei0, uint32_t nei1, uint32_t nbi1, uint32_t ne11) {
  2527. VK_LOG_DEBUG("ggml_vk_matmul_id(a: (" << a.buffer->buffer << ", " << a.offset << ", " << a.size << "), b: (" << b.buffer->buffer << ", " << b.offset << ", " << b.size << "), d: (" << d.buffer->buffer << ", " << d.offset << ", " << d.size << "), ids: (" << ids.buffer->buffer << ", " << ids.offset << ", " << ids.size << "), " <<
  2528. "m: " << m << ", n: " << n << ", k: " << k << ", stride_a: " << stride_a << ", stride_b: " << stride_b << ", stride_d: " << stride_d << ", " <<
  2529. "batch_stride_a: " << batch_stride_a << ", batch_stride_b: " << batch_stride_b << ", batch_stride_d: " << batch_stride_d << ", " <<
  2530. "n_as: " << n_as << ", nei0: " << nei0 << ", nei1: " << nei1 << ", nbi1: " << nbi1 << ", ne11: " << ne11 << ")");
  2531. ggml_vk_sync_buffers(subctx);
  2532. const vk_mat_mat_id_push_constants pc = { m, n, k, stride_a, stride_b, stride_d, batch_stride_a, batch_stride_b, batch_stride_d,
  2533. nei0, nei1, nbi1, ne11 };
  2534. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { a, b, d, ids }, sizeof(vk_mat_mat_id_push_constants), &pc, { m, nei1, n_as });
  2535. }
  2536. static bool ggml_vk_dim01_contiguous(const ggml_tensor * tensor) {
  2537. return
  2538. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2539. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  2540. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2541. }
  2542. static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, ggml_type from, ggml_type to) {
  2543. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F32) {
  2544. return ctx->device->pipeline_cpy_f32_f32;
  2545. }
  2546. if (from == GGML_TYPE_F32 && to == GGML_TYPE_F16) {
  2547. return ctx->device->pipeline_cpy_f32_f16;
  2548. }
  2549. if (from == GGML_TYPE_F16 && to == GGML_TYPE_F16) {
  2550. return ctx->device->pipeline_cpy_f16_f16;
  2551. }
  2552. std::cerr << "Missing CPY op for types: " << ggml_type_name(from) << " " << ggml_type_name(to) << std::endl;
  2553. GGML_ABORT("fatal error");
  2554. }
  2555. static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out) {
  2556. VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), ";
  2557. std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")");
  2558. const int tensor_type_size = ggml_type_size(tensor->type);
  2559. const uint32_t ne = ggml_nelements(tensor);
  2560. const vk_op_unary_push_constants pc = {
  2561. (uint32_t)ne,
  2562. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size,
  2563. (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]),
  2564. 0,
  2565. 0.0f, 0.0f,
  2566. };
  2567. ggml_vk_sync_buffers(subctx);
  2568. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, sizeof(vk_op_unary_push_constants), &pc, { ne, 1, 1 });
  2569. }
  2570. static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  2571. VK_LOG_DEBUG("ggml_vk_mul_mat_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2572. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2573. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  2574. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  2575. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2576. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2577. const uint64_t ne00 = src0->ne[0];
  2578. const uint64_t ne01 = src0->ne[1];
  2579. const uint64_t ne02 = src0->ne[2];
  2580. const uint64_t ne03 = src0->ne[3];
  2581. const uint64_t ne10 = src1->ne[0];
  2582. const uint64_t ne11 = src1->ne[1];
  2583. const uint64_t ne12 = src1->ne[2];
  2584. const uint64_t ne13 = src1->ne[3];
  2585. const uint64_t ne20 = dst->ne[0];
  2586. const uint64_t ne21 = dst->ne[1];
  2587. const uint64_t r2 = ne12 / ne02;
  2588. const uint64_t r3 = ne13 / ne03;
  2589. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  2590. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  2591. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  2592. vk_buffer d_Qx;
  2593. size_t qx_buf_offset = 0;
  2594. vk_buffer d_Qy;
  2595. size_t qy_buf_offset = 0;
  2596. bool src0_uma = false;
  2597. bool src1_uma = false;
  2598. if (ctx->device->uma) {
  2599. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  2600. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2601. src0_uma = d_Qx != nullptr;
  2602. src1_uma = d_Qy != nullptr;
  2603. }
  2604. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2605. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2606. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  2607. vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type);
  2608. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  2609. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
  2610. if (mmp == nullptr) {
  2611. // Fall back to dequant + f16 mulmat
  2612. mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, GGML_TYPE_F16, y_f32_kernel ? GGML_TYPE_F32 : GGML_TYPE_F16);
  2613. }
  2614. // Not implemented
  2615. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2616. const int x_ne = ne01 * ne00;
  2617. const int y_ne = ne11 * ne10;
  2618. const int d_ne = ne11 * ne01;
  2619. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11));
  2620. const bool aligned = ne10 == kpad && ne01 > 8 && ne11 > 8;
  2621. const uint32_t split_k = ggml_vk_guess_split_k(ne01, ne11, ne10);
  2622. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned);
  2623. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  2624. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2625. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  2626. const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2627. const uint64_t d_sz = sizeof(float) * d_ne;
  2628. vk_pipeline to_fp16_vk_0 = nullptr;
  2629. vk_pipeline to_fp16_vk_1 = nullptr;
  2630. if (x_non_contig) {
  2631. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  2632. } else {
  2633. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  2634. }
  2635. if (y_non_contig) {
  2636. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  2637. } else {
  2638. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2639. }
  2640. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2641. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2642. if (dryrun) {
  2643. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  2644. const uint64_t y_sz_upd = y_sz * ne12 * ne13;
  2645. const uint64_t split_k_size = split_k > 1 ? d_sz * ne12 * ne13 * 4 : 0;
  2646. if (
  2647. (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
  2648. (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size) ||
  2649. (split_k > 1 && split_k_size > ctx->device->max_memory_allocation_size)) {
  2650. GGML_ABORT("Requested preallocation size is too large");
  2651. }
  2652. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  2653. ctx->prealloc_size_x = x_sz_upd;
  2654. }
  2655. if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
  2656. ctx->prealloc_size_y = y_sz_upd;
  2657. }
  2658. if (split_k > 1 && ctx->prealloc_size_split_k < split_k_size) {
  2659. ctx->prealloc_size_split_k = split_k_size;
  2660. }
  2661. // Request descriptor sets
  2662. ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
  2663. if (qx_needs_dequant) {
  2664. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  2665. }
  2666. if (qy_needs_dequant) {
  2667. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
  2668. }
  2669. if (split_k > 1) {
  2670. ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, 1);
  2671. }
  2672. return;
  2673. }
  2674. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  2675. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  2676. GGML_ASSERT(d_D != nullptr);
  2677. GGML_ASSERT(d_D->size >= d_buf_offset + d_sz * ne02 * ne03);
  2678. vk_buffer d_X;
  2679. uint64_t x_buf_offset = 0;
  2680. vk_buffer d_Y;
  2681. uint64_t y_buf_offset = 0;
  2682. if (!src0_uma) {
  2683. d_Qx = src0_buf_ctx->dev_buffer;
  2684. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  2685. GGML_ASSERT(d_Qx != nullptr);
  2686. }
  2687. if (!src1_uma) {
  2688. d_Qy = src1_buf_ctx->dev_buffer;
  2689. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  2690. GGML_ASSERT(d_Qy != nullptr);
  2691. }
  2692. if (qx_needs_dequant) {
  2693. d_X = ctx->prealloc_x;
  2694. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  2695. } else {
  2696. d_X = d_Qx;
  2697. x_buf_offset = qx_buf_offset;
  2698. GGML_ASSERT(qx_sz == x_sz);
  2699. }
  2700. if (qy_needs_dequant) {
  2701. d_Y = ctx->prealloc_y;
  2702. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  2703. } else {
  2704. d_Y = d_Qy;
  2705. y_buf_offset = qy_buf_offset;
  2706. GGML_ASSERT(qy_sz == y_sz);
  2707. }
  2708. if (x_non_contig) {
  2709. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2710. } else if (qx_needs_dequant) {
  2711. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  2712. ggml_vk_sync_buffers(subctx);
  2713. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  2714. }
  2715. if (y_non_contig) {
  2716. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2717. }
  2718. uint32_t stride_batch_x = ne00*ne01;
  2719. uint32_t stride_batch_y = ne10*ne11;
  2720. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2721. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2722. }
  2723. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2724. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2725. }
  2726. // compute
  2727. ggml_vk_matmul(
  2728. ctx, subctx, pipeline,
  2729. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  2730. { d_D, d_buf_offset, d_sz * ne12 * ne13 }, { ctx->prealloc_split_k, 0, d_sz * ne12 * ne13 * split_k },
  2731. ne01, ne11, ne10,
  2732. ne10, ne10, ne01, stride_batch_x, stride_batch_y, ne20*ne21,
  2733. split_k, ne12*ne13, ne02, ne12, r2, r3
  2734. ); // NOLINT
  2735. }
  2736. static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  2737. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2738. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2739. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  2740. std::cerr << "), " << (dryrun ? "dryrun" : "") << "),)");
  2741. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  2742. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  2743. const uint64_t ne00 = src0->ne[0];
  2744. const uint64_t ne01 = src0->ne[1];
  2745. const uint64_t ne02 = src0->ne[2];
  2746. const uint64_t ne03 = src0->ne[3];
  2747. const uint64_t ne10 = src1->ne[0];
  2748. const uint64_t ne11 = src1->ne[1];
  2749. const uint64_t ne12 = src1->ne[2];
  2750. const uint64_t ne13 = src1->ne[3];
  2751. GGML_ASSERT(ne11 == 1);
  2752. const uint64_t ne20 = dst->ne[0];
  2753. const uint64_t ne21 = dst->ne[1];
  2754. const uint64_t ne22 = dst->ne[2];
  2755. const uint64_t ne23 = dst->ne[3];
  2756. const uint64_t r2 = ne12 / ne02;
  2757. const uint64_t r3 = ne13 / ne03;
  2758. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  2759. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  2760. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  2761. vk_buffer d_Qx;
  2762. size_t qx_buf_offset = 0;
  2763. vk_buffer d_Qy;
  2764. size_t qy_buf_offset = 0;
  2765. bool src0_uma = false;
  2766. bool src1_uma = false;
  2767. if (ctx->device->uma) {
  2768. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  2769. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2770. src0_uma = d_Qx != nullptr;
  2771. src1_uma = d_Qy != nullptr;
  2772. }
  2773. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  2774. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  2775. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  2776. const bool qx_needs_dequant = x_non_contig;
  2777. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  2778. // Not implemented
  2779. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  2780. const uint64_t x_ne = ne01 * ne00;
  2781. const uint64_t y_ne = ne11 * ne10;
  2782. const uint64_t d_ne = ne11 * ne01;
  2783. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  2784. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2785. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  2786. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  2787. const uint64_t d_sz = sizeof(float) * d_ne;
  2788. vk_pipeline to_fp16_vk_0 = nullptr;
  2789. vk_pipeline to_fp16_vk_1 = nullptr;
  2790. if (x_non_contig) {
  2791. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  2792. }
  2793. if (y_non_contig) {
  2794. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  2795. } else {
  2796. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  2797. }
  2798. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type);
  2799. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  2800. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  2801. GGML_ASSERT(dmmv != nullptr);
  2802. if (dryrun) {
  2803. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  2804. const uint64_t y_sz_upd = y_sz * ne12 * ne13;
  2805. if (
  2806. (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
  2807. (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size)) {
  2808. GGML_ABORT("Requested preallocation size is too large");
  2809. }
  2810. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  2811. ctx->prealloc_size_x = x_sz_upd;
  2812. }
  2813. if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
  2814. ctx->prealloc_size_y = y_sz_upd;
  2815. }
  2816. // Request descriptor sets
  2817. if (qx_needs_dequant) {
  2818. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  2819. }
  2820. if (qy_needs_dequant) {
  2821. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
  2822. }
  2823. ggml_pipeline_request_descriptor_sets(ctx->device, dmmv, 1);
  2824. return;
  2825. }
  2826. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  2827. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  2828. GGML_ASSERT(d_D != nullptr);
  2829. vk_buffer d_X;
  2830. uint64_t x_buf_offset = 0;
  2831. vk_buffer d_Y;
  2832. uint64_t y_buf_offset = 0;
  2833. if(!src0_uma) {
  2834. d_Qx = src0_buf_ctx->dev_buffer;
  2835. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  2836. GGML_ASSERT(d_Qx != nullptr);
  2837. }
  2838. if(!src1_uma) {
  2839. d_Qy = src1_buf_ctx->dev_buffer;
  2840. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  2841. GGML_ASSERT(d_Qy != nullptr);
  2842. }
  2843. if (qx_needs_dequant) {
  2844. d_X = ctx->prealloc_x;
  2845. } else {
  2846. d_X = d_Qx;
  2847. x_buf_offset = qx_buf_offset;
  2848. GGML_ASSERT(qx_sz == x_sz);
  2849. }
  2850. if (qy_needs_dequant) {
  2851. d_Y = ctx->prealloc_y;
  2852. } else {
  2853. d_Y = d_Qy;
  2854. y_buf_offset = qy_buf_offset;
  2855. GGML_ASSERT(qy_sz == y_sz);
  2856. }
  2857. if (x_non_contig) {
  2858. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  2859. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  2860. }
  2861. if (y_non_contig) {
  2862. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  2863. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  2864. }
  2865. uint32_t stride_batch_x = ne00*ne01;
  2866. uint32_t stride_batch_y = ne10*ne11;
  2867. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  2868. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  2869. }
  2870. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  2871. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  2872. }
  2873. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  2874. uint32_t groups_x = ne01;
  2875. uint32_t groups_z = 1;
  2876. if (ne01 > max_groups_x) {
  2877. groups_z = 64;
  2878. groups_x /= groups_z;
  2879. }
  2880. // compute
  2881. const vk_mat_vec_push_constants pc = {
  2882. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  2883. stride_batch_x, stride_batch_y, (uint32_t)(ne20*ne21),
  2884. (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3,
  2885. };
  2886. ggml_vk_sync_buffers(subctx);
  2887. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  2888. { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 }, vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23} },
  2889. sizeof(vk_mat_vec_push_constants), &pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z });
  2890. }
  2891. static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  2892. VK_LOG_DEBUG("ggml_vk_mul_mat_p021_f16_f32(" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2893. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2894. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  2895. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  2896. GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
  2897. GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // NOLINT
  2898. GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // NOLINT
  2899. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2900. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2901. const uint64_t ne00 = src0->ne[0];
  2902. const uint64_t ne01 = src0->ne[1];
  2903. const uint64_t ne02 = src0->ne[2];
  2904. // const uint64_t ne03 = src0->ne[3];
  2905. const uint64_t ne10 = src1->ne[0];
  2906. const uint64_t ne11 = src1->ne[1];
  2907. const uint64_t ne12 = src1->ne[2];
  2908. // const uint64_t ne13 = src1->ne[3];
  2909. GGML_ASSERT(ne11 == 1);
  2910. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  2911. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  2912. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  2913. vk_buffer d_Qy;
  2914. size_t qy_buf_offset = 0;
  2915. bool src1_uma = false;
  2916. if (ctx->device->uma) {
  2917. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2918. src1_uma = d_Qy != nullptr;
  2919. }
  2920. const uint64_t x_ne = ne00 * ne01 * ne02;
  2921. const uint64_t y_ne = ne10 * ne11 * ne12;
  2922. const uint64_t d_ne = ne01 * ne11 * ne12;
  2923. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  2924. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  2925. const uint64_t d_sz = sizeof(float) * d_ne;
  2926. if (dryrun) {
  2927. // Request descriptor sets
  2928. ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, 1);
  2929. return;
  2930. }
  2931. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  2932. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  2933. GGML_ASSERT(d_D != nullptr);
  2934. vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
  2935. const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  2936. GGML_ASSERT(d_Qx != nullptr);
  2937. if (!src1_uma) {
  2938. d_Qy = src1_buf_ctx->dev_buffer;
  2939. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  2940. GGML_ASSERT(d_Qx != nullptr);
  2941. }
  2942. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2943. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  2944. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  2945. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  2946. // compute
  2947. const std::array<uint32_t, 6> pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  2948. ggml_vk_sync_buffers(subctx);
  2949. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  2950. }
  2951. static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  2952. VK_LOG_DEBUG("ggml_vk_mul_mat_nc_f16_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  2953. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  2954. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  2955. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  2956. GGML_ASSERT(!ggml_is_transposed(src0));
  2957. GGML_ASSERT(!ggml_is_transposed(src1));
  2958. GGML_ASSERT(!ggml_is_permuted(src0));
  2959. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2960. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2961. const uint64_t ne00 = src0->ne[0];
  2962. const uint64_t ne01 = src0->ne[1];
  2963. const uint64_t ne02 = src0->ne[2];
  2964. // const uint64_t ne03 = src0->ne[3];
  2965. const uint64_t nb01 = src0->nb[1];
  2966. const uint64_t nb02 = src0->nb[2];
  2967. // const uint64_t ne10 = src1->ne[0];
  2968. const uint64_t ne11 = src1->ne[1];
  2969. const uint64_t ne12 = src1->ne[2];
  2970. // const uint64_t ne13 = src1->ne[3];
  2971. GGML_ASSERT(ne11 == 1);
  2972. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  2973. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  2974. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  2975. vk_buffer d_Qy = nullptr;
  2976. size_t qy_buf_offset = 0;
  2977. bool src1_uma = false;
  2978. if (ctx->device->uma) {
  2979. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  2980. src1_uma = d_Qy != nullptr;
  2981. }
  2982. const uint64_t d_ne = ne01 * ne11 * ne12;
  2983. const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t);
  2984. const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t);
  2985. const uint64_t qx_sz = ggml_nbytes(src0);
  2986. const uint64_t qy_sz = ggml_nbytes(src1);
  2987. const uint64_t d_sz = sizeof(float) * d_ne;
  2988. if (dryrun) {
  2989. // Request descriptor sets
  2990. ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1);
  2991. return;
  2992. }
  2993. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  2994. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  2995. GGML_ASSERT(d_D != nullptr);
  2996. vk_buffer d_Qx = src0_buf_ctx->dev_buffer;
  2997. const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  2998. GGML_ASSERT(d_Qx != nullptr);
  2999. if (!src1_uma) {
  3000. d_Qy = src1_buf_ctx->dev_buffer;
  3001. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  3002. GGML_ASSERT(d_Qx != nullptr);
  3003. }
  3004. const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  3005. const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset;
  3006. const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  3007. const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset;
  3008. // compute
  3009. const std::array<uint32_t, 7> pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, (uint32_t)(ne12 / ne02), (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) };
  3010. ggml_vk_sync_buffers(subctx);
  3011. ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32,
  3012. { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 7 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 });
  3013. }
  3014. static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3015. VK_LOG_DEBUG("ggml_vk_mul_mat(" << src0 << ", " << src1 << ", " << dst << ")");
  3016. if (src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && dst->ne[1] == 1) {
  3017. ggml_vk_mul_mat_vec_p021_f16_f32(ctx, subctx, src0, src1, dst, dryrun);
  3018. } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1) {
  3019. ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst, dryrun);
  3020. } else if (dst->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  3021. ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst, dryrun);
  3022. } else {
  3023. ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst, dryrun);
  3024. }
  3025. }
  3026. static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, bool dryrun = false) {
  3027. VK_LOG_DEBUG("ggml_vk_mul_mat_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  3028. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  3029. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  3030. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3] << "),)");
  3031. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  3032. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3033. const uint64_t ne00 = src0->ne[0];
  3034. const uint64_t ne01 = src0->ne[1];
  3035. const uint64_t ne02 = src0->ne[2];
  3036. const uint64_t ne03 = src0->ne[3];
  3037. const uint64_t ne10 = src1->ne[0];
  3038. const uint64_t ne11 = src1->ne[1];
  3039. const uint64_t ne12 = src1->ne[2];
  3040. const uint64_t ne13 = src1->ne[3];
  3041. const uint64_t nei0 = ids->ne[0];
  3042. const uint64_t nei1 = ids->ne[1];
  3043. GGML_ASSERT(nei0 * nei1 <= 3072);
  3044. const uint32_t nbi1 = ids->nb[1];
  3045. const uint32_t nbi2 = ids->nb[2];
  3046. const uint64_t ne20 = dst->ne[0];
  3047. const uint64_t ne21 = dst->ne[1];
  3048. const uint64_t ne22 = dst->ne[2];
  3049. const uint64_t ne23 = dst->ne[3];
  3050. const uint64_t n_as = ne02;
  3051. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  3052. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  3053. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  3054. ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
  3055. vk_buffer d_Qx;
  3056. size_t qx_buf_offset = 0;
  3057. vk_buffer d_Qy;
  3058. size_t qy_buf_offset = 0;
  3059. vk_buffer d_ids;
  3060. size_t ids_buf_offset = 0;
  3061. bool src0_uma = false;
  3062. bool src1_uma = false;
  3063. bool ids_uma = false;
  3064. if (ctx->device->uma) {
  3065. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  3066. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  3067. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  3068. src0_uma = d_Qx != nullptr;
  3069. src1_uma = d_Qy != nullptr;
  3070. ids_uma = d_ids != nullptr;
  3071. }
  3072. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  3073. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  3074. const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig;
  3075. vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type);
  3076. const bool qx_needs_dequant = mmp == nullptr || x_non_contig;
  3077. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig;
  3078. if (mmp == nullptr) {
  3079. GGML_ABORT("fatal error");
  3080. }
  3081. // Not implemented
  3082. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  3083. const uint64_t x_ne = ne01 * ne00;
  3084. const uint64_t y_ne = ne11 * ne10;
  3085. const uint64_t d_ne = ne21 * ne20;
  3086. const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, nei1));
  3087. const bool aligned = ne10 == kpad && ne01 > 8 && nei1 > 8;
  3088. vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, nei1, aligned);
  3089. const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type);
  3090. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  3091. const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne;
  3092. const uint64_t y_sz = y_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  3093. const uint64_t ids_sz = nbi2;
  3094. const uint64_t d_sz = sizeof(float) * d_ne;
  3095. vk_pipeline to_fp16_vk_0 = nullptr;
  3096. vk_pipeline to_fp16_vk_1 = nullptr;
  3097. if (x_non_contig) {
  3098. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, GGML_TYPE_F16);
  3099. } else {
  3100. to_fp16_vk_0 = ggml_vk_get_to_fp16(ctx, src0->type);
  3101. }
  3102. if (y_non_contig) {
  3103. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, GGML_TYPE_F16);
  3104. } else {
  3105. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  3106. }
  3107. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  3108. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  3109. if (dryrun) {
  3110. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  3111. const uint64_t y_sz_upd = y_sz * ne12 * ne13;
  3112. if (
  3113. (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
  3114. (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size)) {
  3115. GGML_ABORT("Requested preallocation size is too large");
  3116. }
  3117. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  3118. ctx->prealloc_size_x = x_sz_upd;
  3119. }
  3120. if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
  3121. ctx->prealloc_size_y = y_sz_upd;
  3122. }
  3123. // Request descriptor sets
  3124. ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
  3125. if (qx_needs_dequant) {
  3126. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  3127. }
  3128. if (qy_needs_dequant) {
  3129. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
  3130. }
  3131. return;
  3132. }
  3133. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  3134. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  3135. GGML_ASSERT(d_D != nullptr);
  3136. vk_buffer d_X;
  3137. uint64_t x_buf_offset = 0;
  3138. vk_buffer d_Y;
  3139. uint64_t y_buf_offset = 0;
  3140. if (!src0_uma) {
  3141. d_Qx = src0_buf_ctx->dev_buffer;
  3142. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  3143. GGML_ASSERT(d_Qx != nullptr);
  3144. }
  3145. if (!src1_uma) {
  3146. d_Qy = src1_buf_ctx->dev_buffer;
  3147. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  3148. GGML_ASSERT(d_Qy != nullptr);
  3149. }
  3150. if (!ids_uma) {
  3151. d_ids = ids_buf_ctx->dev_buffer;
  3152. ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
  3153. GGML_ASSERT(d_ids != nullptr);
  3154. }
  3155. if (qx_needs_dequant) {
  3156. d_X = ctx->prealloc_x;
  3157. GGML_ASSERT(d_X->size >= x_sz * ne02 * ne03);
  3158. } else {
  3159. d_X = d_Qx;
  3160. x_buf_offset = qx_buf_offset;
  3161. GGML_ASSERT(qx_sz == x_sz);
  3162. }
  3163. if (qy_needs_dequant) {
  3164. d_Y = ctx->prealloc_y;
  3165. GGML_ASSERT(d_Y->size >= y_sz * ne02 * ne03);
  3166. } else {
  3167. d_Y = d_Qy;
  3168. y_buf_offset = qy_buf_offset;
  3169. GGML_ASSERT(qy_sz == y_sz);
  3170. }
  3171. if (x_non_contig) {
  3172. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  3173. } else if (qx_needs_dequant) {
  3174. const std::vector<uint32_t> pc = { (uint32_t)ne01, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)(ggml_nelements(src0)) };
  3175. ggml_vk_sync_buffers(subctx);
  3176. ggml_vk_dispatch_pipeline(ctx, subctx, to_fp16_vk_0,
  3177. { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz * ne02 * ne03 }, vk_subbuffer{ d_X, 0, x_sz * ne02 * ne03 } }, pc.size() * sizeof(uint32_t), pc.data(), { (uint32_t)(x_ne * ne02 * ne03), 1, 1});
  3178. }
  3179. if (y_non_contig) {
  3180. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  3181. }
  3182. uint32_t stride_batch_x = ne00*ne01;
  3183. uint32_t stride_batch_y = ne10*ne11;
  3184. if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) {
  3185. stride_batch_x = src0->nb[0] / ggml_type_size(src0->type);
  3186. }
  3187. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  3188. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  3189. }
  3190. // compute
  3191. ggml_vk_matmul_id(
  3192. ctx, subctx, pipeline,
  3193. { d_X, x_buf_offset, x_sz * ne02 * ne03 }, { d_Y, y_buf_offset, y_sz * ne12 * ne13 },
  3194. { d_D, d_buf_offset, d_sz * ne22 * ne23 }, { d_ids, ids_buf_offset, ids_sz },
  3195. ne01, ne21, ne10, ne10, ne10, ne01,
  3196. stride_batch_x, stride_batch_y, ne20*ne21,
  3197. n_as, nei0, nei1, nbi1 / ggml_type_size(ids->type), ne11
  3198. ); // NOLINT
  3199. }
  3200. static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, bool dryrun = false) {
  3201. VK_LOG_DEBUG("ggml_vk_mul_mat_vec_id_q_f16((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  3202. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  3203. std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3];
  3204. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  3205. std::cerr << "), " << (dryrun ? "dryrun" : "") << ")");
  3206. GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT
  3207. GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT
  3208. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3209. const uint64_t ne00 = src0->ne[0];
  3210. const uint64_t ne01 = src0->ne[1];
  3211. const uint64_t ne02 = src0->ne[2];
  3212. const uint64_t ne03 = src0->ne[3];
  3213. const uint64_t ne10 = src1->ne[0];
  3214. const uint64_t ne11 = src1->ne[1];
  3215. const uint64_t ne12 = src1->ne[2];
  3216. const uint64_t ne13 = src1->ne[3];
  3217. const uint64_t nei0 = ids->ne[0];
  3218. const uint64_t nei1 = ids->ne[1];
  3219. const uint64_t nbi2 = ids->nb[2];
  3220. GGML_ASSERT(nei1 == 1);
  3221. const uint64_t ne20 = dst->ne[0];
  3222. const uint64_t ne21 = dst->ne[1];
  3223. const uint64_t ne22 = dst->ne[2];
  3224. const uint64_t ne23 = dst->ne[3];
  3225. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  3226. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  3227. ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  3228. ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context;
  3229. vk_buffer d_Qx;
  3230. size_t qx_buf_offset = 0;
  3231. vk_buffer d_Qy;
  3232. size_t qy_buf_offset = 0;
  3233. vk_buffer d_ids;
  3234. size_t ids_buf_offset = 0;
  3235. bool src0_uma = false;
  3236. bool src1_uma = false;
  3237. bool ids_uma = false;
  3238. if (ctx->device->uma) {
  3239. ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset);
  3240. ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset);
  3241. ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset);
  3242. src0_uma = d_Qx != nullptr;
  3243. src1_uma = d_Qy != nullptr;
  3244. ids_uma = d_ids != nullptr;
  3245. }
  3246. const bool x_non_contig = !ggml_vk_dim01_contiguous(src0);
  3247. const bool y_non_contig = !ggml_vk_dim01_contiguous(src1);
  3248. const bool f16_f32_kernel = src1->type == GGML_TYPE_F32;
  3249. const bool qx_needs_dequant = x_non_contig;
  3250. const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !f16_f32_kernel) || y_non_contig;
  3251. // Not implemented
  3252. GGML_ASSERT(y_non_contig || !qy_needs_dequant); // NOLINT
  3253. const uint64_t x_ne = ne01 * ne00;
  3254. const uint64_t y_ne = ne11 * ne10;
  3255. const uint64_t d_ne = ne21 * ne20;
  3256. const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment);
  3257. const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type);
  3258. const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz;
  3259. const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne;
  3260. const uint64_t ids_sz = nbi2;
  3261. const uint64_t d_sz = sizeof(float) * d_ne;
  3262. vk_pipeline to_fp16_vk_0 = nullptr;
  3263. vk_pipeline to_fp16_vk_1 = nullptr;
  3264. if (x_non_contig) {
  3265. to_fp16_vk_0 = ggml_vk_get_cpy_pipeline(ctx, src0->type, src0->type);
  3266. }
  3267. if (y_non_contig) {
  3268. to_fp16_vk_1 = ggml_vk_get_cpy_pipeline(ctx, src1->type, src1->type);
  3269. } else {
  3270. to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type);
  3271. }
  3272. vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec_id(ctx, src0->type, src1->type);
  3273. GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT
  3274. GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT
  3275. GGML_ASSERT(dmmv != nullptr);
  3276. if (dryrun) {
  3277. const uint64_t x_sz_upd = x_sz * ne02 * ne03;
  3278. const uint64_t y_sz_upd = y_sz * ne12 * ne13;
  3279. if (
  3280. (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) ||
  3281. (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size)) {
  3282. GGML_ABORT("Requested preallocation size is too large");
  3283. }
  3284. if (qx_needs_dequant && ctx->prealloc_size_x < x_sz_upd) {
  3285. ctx->prealloc_size_x = x_sz_upd;
  3286. }
  3287. if (qy_needs_dequant && ctx->prealloc_size_y < y_sz_upd) {
  3288. ctx->prealloc_size_y = y_sz_upd;
  3289. }
  3290. // Request descriptor sets
  3291. if (qx_needs_dequant) {
  3292. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1);
  3293. }
  3294. if (qy_needs_dequant) {
  3295. ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1);
  3296. }
  3297. ggml_pipeline_request_descriptor_sets(ctx->device, dmmv, 1);
  3298. return;
  3299. }
  3300. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  3301. const uint64_t d_buf_offset = vk_tensor_offset(dst) + dst->view_offs;
  3302. GGML_ASSERT(d_D != nullptr);
  3303. vk_buffer d_X;
  3304. uint64_t x_buf_offset = 0;
  3305. vk_buffer d_Y;
  3306. uint64_t y_buf_offset = 0;
  3307. if(!src0_uma) {
  3308. d_Qx = src0_buf_ctx->dev_buffer;
  3309. qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  3310. GGML_ASSERT(d_Qx != nullptr);
  3311. }
  3312. if(!src1_uma) {
  3313. d_Qy = src1_buf_ctx->dev_buffer;
  3314. qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  3315. GGML_ASSERT(d_Qy != nullptr);
  3316. }
  3317. if(!ids_uma) {
  3318. d_ids = ids_buf_ctx->dev_buffer;
  3319. ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs;
  3320. GGML_ASSERT(d_ids != nullptr);
  3321. }
  3322. if (qx_needs_dequant) {
  3323. d_X = ctx->prealloc_x;
  3324. } else {
  3325. d_X = d_Qx;
  3326. x_buf_offset = qx_buf_offset;
  3327. GGML_ASSERT(qx_sz == x_sz);
  3328. }
  3329. if (qy_needs_dequant) {
  3330. d_Y = ctx->prealloc_y;
  3331. } else {
  3332. d_Y = d_Qy;
  3333. y_buf_offset = qy_buf_offset;
  3334. GGML_ASSERT(qy_sz == y_sz);
  3335. }
  3336. if (x_non_contig) {
  3337. GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment));
  3338. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, { d_Qx, qx_buf_offset, VK_WHOLE_SIZE }, { d_X, 0, VK_WHOLE_SIZE });
  3339. }
  3340. if (y_non_contig) {
  3341. GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne);
  3342. ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE });
  3343. }
  3344. uint32_t stride_batch_y = ne10*ne11;
  3345. if (!ggml_vk_dim01_contiguous(src1) && !qy_needs_dequant) {
  3346. stride_batch_y = src1->nb[0] / ggml_type_size(src1->type);
  3347. }
  3348. const uint32_t max_groups_x = ctx->device->properties.limits.maxComputeWorkGroupCount[0];
  3349. uint32_t groups_x = ne01;
  3350. uint32_t groups_z = 1;
  3351. if (ne01 > max_groups_x) {
  3352. groups_z = 64;
  3353. groups_x /= groups_z;
  3354. }
  3355. // compute
  3356. const vk_mat_vec_id_push_constants pc = {
  3357. (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01,
  3358. (uint32_t)x_ne, stride_batch_y, (uint32_t)(ne20*ne21),
  3359. (uint32_t)nei0, (uint32_t)ne11,
  3360. };
  3361. ggml_vk_sync_buffers(subctx);
  3362. ggml_vk_dispatch_pipeline(ctx, subctx, dmmv,
  3363. { vk_subbuffer{ d_X, x_buf_offset, x_sz * ne02 * ne03 },
  3364. vk_subbuffer{ d_Y, y_buf_offset, y_sz * ne12 * ne13 }, vk_subbuffer{ d_D, d_buf_offset, d_sz * ne22 * ne23}, vk_subbuffer{ d_ids, ids_buf_offset, ids_sz } },
  3365. sizeof(vk_mat_vec_id_push_constants), &pc, { groups_x, (uint32_t)nei0, groups_z });
  3366. }
  3367. static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
  3368. VK_LOG_DEBUG("ggml_vk_mul_mat_id(" << src0 << ", " << src1 << ", " << src2 << ", " << dst << ")");
  3369. if (src2->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) {
  3370. ggml_vk_mul_mat_vec_id_q_f16(ctx, subctx, src0, src1, src2, dst, dryrun);
  3371. } else {
  3372. ggml_vk_mul_mat_id_q_f16(ctx, subctx, src0, src1, src2, dst, dryrun);
  3373. }
  3374. }
  3375. static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op) {
  3376. switch (op) {
  3377. case GGML_OP_GET_ROWS:
  3378. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  3379. if (dst->type == GGML_TYPE_F16) {
  3380. return ctx->device->pipeline_get_rows[src0->type];
  3381. }
  3382. if (dst->type == GGML_TYPE_F32) {
  3383. return ctx->device->pipeline_get_rows_f32[src0->type];
  3384. }
  3385. return nullptr;
  3386. case GGML_OP_ACC:
  3387. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3388. return ctx->device->pipeline_acc_f32;
  3389. }
  3390. return nullptr;
  3391. case GGML_OP_ADD:
  3392. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3393. return ctx->device->pipeline_add_f32;
  3394. }
  3395. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  3396. return ctx->device->pipeline_add_f16_f32_f16;
  3397. }
  3398. return nullptr;
  3399. case GGML_OP_MUL:
  3400. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3401. return ctx->device->pipeline_mul_f32;
  3402. }
  3403. return nullptr;
  3404. case GGML_OP_DIV:
  3405. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3406. return ctx->device->pipeline_div_f32;
  3407. }
  3408. return nullptr;
  3409. case GGML_OP_CONCAT:
  3410. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3411. return ctx->device->pipeline_concat_f32;
  3412. }
  3413. if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3414. return ctx->device->pipeline_concat_f16;
  3415. }
  3416. if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
  3417. return ctx->device->pipeline_concat_i32;
  3418. }
  3419. return nullptr;
  3420. case GGML_OP_UPSCALE:
  3421. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3422. return ctx->device->pipeline_upscale_f32;
  3423. }
  3424. return nullptr;
  3425. case GGML_OP_SCALE:
  3426. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3427. return ctx->device->pipeline_scale_f32;
  3428. }
  3429. return nullptr;
  3430. case GGML_OP_SQR:
  3431. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3432. return ctx->device->pipeline_sqr_f32;
  3433. }
  3434. return nullptr;
  3435. case GGML_OP_SIN:
  3436. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3437. return ctx->device->pipeline_sin_f32;
  3438. }
  3439. return nullptr;
  3440. case GGML_OP_COS:
  3441. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3442. return ctx->device->pipeline_cos_f32;
  3443. }
  3444. return nullptr;
  3445. case GGML_OP_CLAMP:
  3446. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3447. return ctx->device->pipeline_clamp_f32;
  3448. }
  3449. return nullptr;
  3450. case GGML_OP_PAD:
  3451. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3452. return ctx->device->pipeline_pad_f32;
  3453. }
  3454. return nullptr;
  3455. case GGML_OP_REPEAT:
  3456. if (ggml_type_size(src0->type) == sizeof(float) && ggml_type_size(dst->type) == sizeof(float)) {
  3457. return ctx->device->pipeline_repeat_f32;
  3458. }
  3459. return nullptr;
  3460. case GGML_OP_CPY:
  3461. case GGML_OP_CONT:
  3462. case GGML_OP_DUP:
  3463. return ggml_vk_get_cpy_pipeline(ctx, src0->type, dst->type);
  3464. case GGML_OP_NORM:
  3465. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3466. return ctx->device->pipeline_norm_f32;
  3467. }
  3468. return nullptr;
  3469. case GGML_OP_GROUP_NORM:
  3470. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3471. return ctx->device->pipeline_group_norm_f32;
  3472. }
  3473. return nullptr;
  3474. case GGML_OP_RMS_NORM:
  3475. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3476. return ctx->device->pipeline_rms_norm_f32;
  3477. }
  3478. return nullptr;
  3479. case GGML_OP_UNARY:
  3480. switch (ggml_get_unary_op(dst)) {
  3481. case GGML_UNARY_OP_SILU:
  3482. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3483. return ctx->device->pipeline_silu_f32;
  3484. }
  3485. break;
  3486. case GGML_UNARY_OP_GELU:
  3487. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3488. return ctx->device->pipeline_gelu_f32;
  3489. }
  3490. break;
  3491. case GGML_UNARY_OP_GELU_QUICK:
  3492. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3493. return ctx->device->pipeline_gelu_quick_f32;
  3494. }
  3495. break;
  3496. case GGML_UNARY_OP_RELU:
  3497. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3498. return ctx->device->pipeline_relu_f32;
  3499. }
  3500. break;
  3501. case GGML_UNARY_OP_TANH:
  3502. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3503. return ctx->device->pipeline_tanh_f32;
  3504. }
  3505. break;
  3506. default:
  3507. break;
  3508. }
  3509. return nullptr;
  3510. case GGML_OP_DIAG_MASK_INF:
  3511. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3512. return ctx->device->pipeline_diag_mask_inf_f32;
  3513. }
  3514. return nullptr;
  3515. case GGML_OP_SOFT_MAX:
  3516. GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
  3517. if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
  3518. return ctx->device->pipeline_soft_max_f32;
  3519. }
  3520. if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
  3521. return ctx->device->pipeline_soft_max_f32_f16;
  3522. }
  3523. return nullptr;
  3524. case GGML_OP_ROPE:
  3525. {
  3526. const int mode = ((const int32_t *) dst->op_params)[2];
  3527. const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
  3528. if (is_neox) {
  3529. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3530. return ctx->device->pipeline_rope_neox_f32;
  3531. }
  3532. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3533. return ctx->device->pipeline_rope_neox_f16;
  3534. }
  3535. } else {
  3536. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3537. return ctx->device->pipeline_rope_norm_f32;
  3538. }
  3539. if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
  3540. return ctx->device->pipeline_rope_norm_f16;
  3541. }
  3542. }
  3543. return nullptr;
  3544. }
  3545. case GGML_OP_ARGSORT:
  3546. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
  3547. return ctx->device->pipeline_argsort_f32;
  3548. }
  3549. return nullptr;
  3550. case GGML_OP_SUM_ROWS:
  3551. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3552. return ctx->device->pipeline_sum_rows_f32;
  3553. }
  3554. return nullptr;
  3555. case GGML_OP_IM2COL:
  3556. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3557. return ctx->device->pipeline_im2col_f32;
  3558. }
  3559. if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
  3560. return ctx->device->pipeline_im2col_f32_f16;
  3561. }
  3562. return nullptr;
  3563. case GGML_OP_TIMESTEP_EMBEDDING:
  3564. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3565. return ctx->device->pipeline_timestep_embedding_f32;
  3566. }
  3567. return nullptr;
  3568. case GGML_OP_LEAKY_RELU:
  3569. if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
  3570. return ctx->device->pipeline_leaky_relu_f32;
  3571. }
  3572. return nullptr;
  3573. default:
  3574. return nullptr;
  3575. }
  3576. GGML_UNUSED(src2);
  3577. }
  3578. static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
  3579. switch (op) {
  3580. case GGML_OP_CPY:
  3581. case GGML_OP_GET_ROWS:
  3582. case GGML_OP_ADD:
  3583. case GGML_OP_MUL:
  3584. case GGML_OP_DIV:
  3585. case GGML_OP_CONCAT:
  3586. case GGML_OP_UPSCALE:
  3587. case GGML_OP_SCALE:
  3588. case GGML_OP_SQR:
  3589. case GGML_OP_SIN:
  3590. case GGML_OP_COS:
  3591. case GGML_OP_CLAMP:
  3592. case GGML_OP_PAD:
  3593. case GGML_OP_REPEAT:
  3594. return true;
  3595. default:
  3596. return false;
  3597. }
  3598. }
  3599. template<typename PC>
  3600. static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, const PC&& pc, bool dryrun = false) {
  3601. VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3];
  3602. if (src1 != nullptr) {
  3603. std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3];
  3604. }
  3605. if (src2 != nullptr) {
  3606. std::cerr << "), (" << src2 << ", name=" << src2->name << ", type=" << src2->type << ", ne0=" << src2->ne[0] << ", ne1=" << src2->ne[1] << ", ne2=" << src2->ne[2] << ", ne3=" << src2->ne[3] << ", nb0=" << src2->nb[0] << ", nb1=" << src2->nb[1] << ", nb2=" << src2->nb[2] << ", nb3=" << src2->nb[3];
  3607. }
  3608. std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
  3609. std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
  3610. GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
  3611. GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
  3612. GGML_ASSERT(dst->buffer != nullptr);
  3613. const uint64_t ne00 = src0->ne[0];
  3614. const uint64_t ne01 = src0->ne[1];
  3615. const uint64_t ne02 = src0->ne[2];
  3616. const uint64_t ne03 = src0->ne[3];
  3617. const uint64_t ne0 = ne00 * ne01;
  3618. const bool use_src1 = src1 != nullptr;
  3619. const uint64_t ne10 = use_src1 ? src1->ne[0] : 0;
  3620. const uint64_t ne11 = use_src1 ? src1->ne[1] : 0;
  3621. const uint64_t ne12 = use_src1 ? src1->ne[2] : 0;
  3622. const uint64_t ne13 = use_src1 ? src1->ne[3] : 0;
  3623. const uint64_t ne1 = ne10 * ne11;
  3624. // const uint64_t nb10 = use_src1 ? src1->nb[0] : 0;
  3625. const bool use_src2 = src2 != nullptr;
  3626. const uint64_t ne20 = use_src2 ? src2->ne[0] : 0;
  3627. const uint64_t ne21 = use_src2 ? src2->ne[1] : 0;
  3628. const uint64_t ne22 = use_src2 ? src2->ne[2] : 0;
  3629. const uint64_t ne23 = use_src2 ? src2->ne[3] : 0;
  3630. const uint64_t ne2 = ne20 * ne21;
  3631. const uint64_t ned0 = dst->ne[0];
  3632. const uint64_t ned1 = dst->ne[1];
  3633. const uint64_t ned2 = dst->ne[2];
  3634. const uint64_t ned3 = dst->ne[3];
  3635. const uint64_t ned = ned0 * ned1;
  3636. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op);
  3637. if (pipeline == nullptr) {
  3638. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(op) << " for " << ggml_type_name(src0->type);
  3639. if (src1 != nullptr) {
  3640. std::cerr << " and " << ggml_type_name(src1->type);
  3641. }
  3642. std::cerr << " to " << ggml_type_name(dst->type) << std::endl;
  3643. GGML_ABORT("fatal error");
  3644. }
  3645. if (dryrun) {
  3646. ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1);
  3647. return;
  3648. }
  3649. const bool op_supports_incontiguous = ggml_vk_op_supports_incontiguous(op);
  3650. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  3651. ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  3652. ggml_backend_vk_buffer_context * src1_buf_ctx = use_src1 ? (ggml_backend_vk_buffer_context *)src1->buffer->context : nullptr;
  3653. ggml_backend_vk_buffer_context * src2_buf_ctx = use_src2 ? (ggml_backend_vk_buffer_context *)src2->buffer->context : nullptr;
  3654. vk_buffer d_X = nullptr;
  3655. size_t x_buf_offset = 0;
  3656. vk_buffer d_Y = nullptr;
  3657. size_t y_buf_offset = 0;
  3658. vk_buffer d_Z = nullptr;
  3659. size_t z_buf_offset = 0;
  3660. bool src0_uma = false;
  3661. bool src1_uma = false;
  3662. bool src2_uma = false;
  3663. if (ctx->device->uma) {
  3664. ggml_vk_host_get(ctx->device, src0->data, d_X, x_buf_offset);
  3665. src0_uma = d_X != nullptr;
  3666. if (use_src1) {
  3667. ggml_vk_host_get(ctx->device, src1->data, d_Y, y_buf_offset);
  3668. src1_uma = d_Y != nullptr;
  3669. }
  3670. if (use_src2) {
  3671. ggml_vk_host_get(ctx->device, src2->data, d_Z, z_buf_offset);
  3672. src2_uma = d_Z != nullptr;
  3673. }
  3674. }
  3675. uint64_t x_sz = ggml_type_size(src0->type)/ggml_blck_size(src0->type) * ne0;
  3676. uint64_t y_sz = use_src1 ? ggml_type_size(src1->type) * ne1 : 0;
  3677. uint64_t z_sz = use_src2 ? ggml_type_size(src2->type) * ne2 : 0;
  3678. uint64_t d_sz = ggml_type_size(dst->type) * ned;
  3679. vk_buffer d_D = dst_buf_ctx->dev_buffer;
  3680. // Workaround for tiny tensor inputs on ROPE
  3681. if (op == GGML_OP_ROPE && use_src1 && y_sz > d_D->size) {
  3682. y_sz = VK_WHOLE_SIZE;
  3683. }
  3684. GGML_ASSERT(d_D != nullptr);
  3685. uint64_t d_buf_offset = ((vk_tensor_offset(dst) + dst->view_offs) / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  3686. GGML_ASSERT(d_buf_offset == vk_tensor_offset(dst) || op == GGML_OP_CPY); // NOLINT
  3687. if(!src0_uma) {
  3688. d_X = src0_buf_ctx->dev_buffer;
  3689. x_buf_offset = vk_tensor_offset(src0) + src0->view_offs;
  3690. GGML_ASSERT(d_X != nullptr);
  3691. }
  3692. if (use_src1 && !src1_uma) {
  3693. d_Y = src1_buf_ctx->dev_buffer;
  3694. y_buf_offset = vk_tensor_offset(src1) + src1->view_offs;
  3695. GGML_ASSERT(d_Y != nullptr);
  3696. }
  3697. if (use_src2 && !src2_uma) {
  3698. d_Z = src2_buf_ctx->dev_buffer;
  3699. z_buf_offset = vk_tensor_offset(src2) + src2->view_offs;
  3700. GGML_ASSERT(d_Z != nullptr);
  3701. }
  3702. if (op_supports_incontiguous) {
  3703. x_sz = ggml_nbytes(src0);
  3704. y_sz = use_src1 ? ggml_nbytes(src1) : 0;
  3705. z_sz = use_src2 ? ggml_nbytes(src2) : 0;
  3706. d_sz = ggml_nbytes(dst);
  3707. if (x_buf_offset + x_sz >= d_X->size) {
  3708. x_sz = VK_WHOLE_SIZE;
  3709. }
  3710. if (use_src1 && y_buf_offset + y_sz >= d_Y->size) {
  3711. y_sz = VK_WHOLE_SIZE;
  3712. }
  3713. if (use_src2 && z_buf_offset + z_sz >= d_Z->size) {
  3714. z_sz = VK_WHOLE_SIZE;
  3715. }
  3716. if (d_buf_offset + d_sz >= d_D->size) {
  3717. d_sz = VK_WHOLE_SIZE;
  3718. }
  3719. }
  3720. std::array<uint32_t, 3> elements;
  3721. // Single call if dimension 2 is contiguous
  3722. GGML_ASSERT(op_supports_incontiguous || (ggml_is_contiguous(src0) && (src1 == nullptr || ggml_is_contiguous(src1))));
  3723. switch (op) {
  3724. case GGML_OP_NORM:
  3725. case GGML_OP_RMS_NORM:
  3726. case GGML_OP_SOFT_MAX:
  3727. case GGML_OP_SUM_ROWS:
  3728. {
  3729. const uint32_t nr = ggml_nrows(src0);
  3730. if (nr > 262144) {
  3731. elements = { 512, 512, CEIL_DIV(nr, 262144) };
  3732. } else if (nr > 512) {
  3733. elements = { 512, CEIL_DIV(nr, 512), 1 };
  3734. } else {
  3735. elements = { nr, 1, 1 };
  3736. }
  3737. } break;
  3738. case GGML_OP_GROUP_NORM:
  3739. {
  3740. const uint32_t num_groups = dst->op_params[0];
  3741. elements = { num_groups * (uint32_t)src0->ne[3], 1, 1 };
  3742. } break;
  3743. case GGML_OP_DIAG_MASK_INF:
  3744. case GGML_OP_ROPE:
  3745. elements = { (uint32_t)ggml_nrows(src0), (uint32_t)ne00, 1 };
  3746. break;
  3747. case GGML_OP_GET_ROWS:
  3748. elements = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)(ne11 * ne12) };
  3749. break;
  3750. case GGML_OP_ARGSORT:
  3751. elements = { (uint32_t)ne00, (uint32_t)ggml_nrows(src0), 1 };
  3752. break;
  3753. case GGML_OP_IM2COL:
  3754. {
  3755. const bool is_2D = dst->op_params[6] == 1;
  3756. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  3757. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  3758. const uint32_t KW = src0->ne[0];
  3759. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  3760. const uint32_t OW = dst->ne[1];
  3761. const uint32_t batch = src1->ne[3];
  3762. elements = { OW * KW * KH, OH, batch * IC };
  3763. } break;
  3764. case GGML_OP_TIMESTEP_EMBEDDING:
  3765. {
  3766. const uint32_t dim = dst->op_params[0];
  3767. uint32_t half_ceil = (dim + 1) / 2;
  3768. elements = { half_ceil, (uint32_t)src0->ne[0], 1 };
  3769. } break;
  3770. case GGML_OP_ADD:
  3771. case GGML_OP_DIV:
  3772. case GGML_OP_MUL:
  3773. case GGML_OP_SCALE:
  3774. case GGML_OP_SQR:
  3775. case GGML_OP_SIN:
  3776. case GGML_OP_COS:
  3777. case GGML_OP_CLAMP:
  3778. case GGML_OP_PAD:
  3779. case GGML_OP_REPEAT:
  3780. case GGML_OP_CPY:
  3781. case GGML_OP_CONCAT:
  3782. case GGML_OP_UPSCALE:
  3783. case GGML_OP_UNARY:
  3784. {
  3785. const uint32_t ne = ggml_nelements(dst);
  3786. if (ne > 262144) {
  3787. elements = { 512, 512, CEIL_DIV(ne, 262144) };
  3788. } else if (ne > 512) {
  3789. elements = { 512, CEIL_DIV(ne, 512), 1 };
  3790. } else {
  3791. elements = { ne, 1, 1 };
  3792. }
  3793. } break;
  3794. default:
  3795. elements = { (uint32_t)ggml_nelements(src0), 1, 1 };
  3796. break;
  3797. }
  3798. if (!op_supports_incontiguous) {
  3799. if (x_sz != VK_WHOLE_SIZE) {
  3800. x_sz *= ne02 * ne03;
  3801. }
  3802. if (use_src1 && y_sz != VK_WHOLE_SIZE) {
  3803. y_sz *= ne12 * ne13;
  3804. }
  3805. if (use_src2 && z_sz != VK_WHOLE_SIZE) {
  3806. z_sz *= ne22 * ne23;
  3807. }
  3808. if (d_sz != VK_WHOLE_SIZE) {
  3809. d_sz *= ned2 * ned3;
  3810. }
  3811. }
  3812. if (op == GGML_OP_SOFT_MAX) {
  3813. // Empty src1 is possible in soft_max, but the shader needs a buffer
  3814. vk_subbuffer subbuf_y;
  3815. if (use_src1) {
  3816. subbuf_y = { d_Y, y_buf_offset, y_sz };
  3817. } else {
  3818. subbuf_y = { d_X, 0, x_sz };
  3819. }
  3820. ggml_vk_sync_buffers(subctx);
  3821. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, subbuf_y, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3822. } else if (op == GGML_OP_ROPE) {
  3823. // Empty src2 is possible in rope, but the shader needs a buffer
  3824. vk_subbuffer subbuf_z;
  3825. if (use_src2) {
  3826. subbuf_z = { d_Z, z_buf_offset, z_sz };
  3827. } else {
  3828. subbuf_z = { d_X, 0, x_sz };
  3829. }
  3830. ggml_vk_sync_buffers(subctx);
  3831. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, subbuf_z, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3832. } else if (op == GGML_OP_IM2COL) {
  3833. // im2col uses only src1 and dst buffers
  3834. ggml_vk_sync_buffers(subctx);
  3835. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3836. } else if (use_src2) {
  3837. ggml_vk_sync_buffers(subctx);
  3838. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_Z, z_buf_offset, z_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3839. } else if (use_src1) {
  3840. ggml_vk_sync_buffers(subctx);
  3841. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_Y, y_buf_offset, y_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3842. } else {
  3843. ggml_vk_sync_buffers(subctx);
  3844. ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { vk_subbuffer{ d_X, x_buf_offset, x_sz }, vk_subbuffer{ d_D, d_buf_offset, d_sz } }, sizeof(PC), &pc, elements);
  3845. }
  3846. }
  3847. static void ggml_vk_get_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3848. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3849. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3850. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3851. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_GET_ROWS, {
  3852. (uint32_t)ggml_nelements(src0),
  3853. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3854. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3855. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3856. 0,
  3857. 0.0f, 0.0f, 0,
  3858. }, dryrun);
  3859. }
  3860. static void ggml_vk_acc(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3861. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3862. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3863. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3864. const uint32_t d_offset = ((vk_tensor_offset(dst) + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
  3865. int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
  3866. int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
  3867. // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
  3868. int offset = dst->op_params[3] / 4; // offset in bytes
  3869. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_ACC, {
  3870. (uint32_t)ggml_nelements(src0),
  3871. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t)src0->nb[3] / src0_type_size,
  3872. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3873. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t)nb1, (uint32_t)nb2, (uint32_t) dst->nb[3] / dst_type_size,
  3874. d_offset,
  3875. 0.0f, 0.0f, offset,
  3876. }, dryrun);
  3877. }
  3878. static void ggml_vk_add(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3879. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3880. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3881. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3882. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_ADD, {
  3883. (uint32_t)ggml_nelements(src0),
  3884. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3885. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3886. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3887. 0,
  3888. 0.0f, 0.0f, 0,
  3889. }, dryrun);
  3890. }
  3891. static void ggml_vk_mul(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3892. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3893. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3894. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3895. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_MUL, {
  3896. (uint32_t)ggml_nelements(src0),
  3897. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3898. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3899. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3900. 0,
  3901. 0.0f, 0.0f, 0,
  3902. }, dryrun);
  3903. }
  3904. static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3905. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3906. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3907. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3908. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_DIV, {
  3909. (uint32_t)ggml_nelements(src0),
  3910. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3911. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3912. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3913. 0,
  3914. 0.0f, 0.0f, 0,
  3915. }, dryrun);
  3916. }
  3917. static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  3918. int * op_params = (int *)dst->op_params;
  3919. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3920. const uint32_t src1_type_size = ggml_type_size(src1->type);
  3921. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3922. ggml_vk_op_f32<vk_op_binary_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONCAT, {
  3923. (uint32_t)ggml_nelements(dst),
  3924. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3925. (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size,
  3926. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3927. 0,
  3928. 0.0f, 0.0f, op_params[0],
  3929. }, dryrun);
  3930. }
  3931. static void ggml_vk_upscale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  3932. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3933. const float sf0 = (float)dst->ne[0] / src0->ne[0];
  3934. const float sf1 = (float)dst->ne[1] / src0->ne[1];
  3935. const float sf2 = (float)dst->ne[2] / src0->ne[2];
  3936. const float sf3 = (float)dst->ne[3] / src0->ne[3];
  3937. ggml_vk_op_f32<vk_op_upscale_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UPSCALE, {
  3938. (uint32_t)ggml_nelements(dst), 0,
  3939. (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3940. (uint32_t)dst->ne[0], (uint32_t)dst->ne[1], (uint32_t)dst->ne[2],(uint32_t)dst->ne[3],
  3941. sf0, sf1, sf2, sf3,
  3942. }, dryrun);
  3943. }
  3944. static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  3945. float * op_params = (float *)dst->op_params;
  3946. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3947. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3948. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SCALE, {
  3949. (uint32_t)ggml_nelements(src0),
  3950. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3951. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3952. 0,
  3953. op_params[0], 0.0f
  3954. }, dryrun);
  3955. }
  3956. static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  3957. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3958. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3959. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, {
  3960. (uint32_t)ggml_nelements(src0),
  3961. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3962. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3963. 0,
  3964. 0.0f, 0.0f,
  3965. }, dryrun);
  3966. }
  3967. static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  3968. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3969. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3970. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SIN, {
  3971. (uint32_t)ggml_nelements(src0),
  3972. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3973. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3974. 0,
  3975. 0.0f, 0.0f,
  3976. }, dryrun);
  3977. }
  3978. static void ggml_vk_cos(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  3979. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3980. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3981. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_COS, {
  3982. (uint32_t)ggml_nelements(src0),
  3983. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3984. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3985. 0,
  3986. 0.0f, 0.0f,
  3987. }, dryrun);
  3988. }
  3989. static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  3990. float * op_params = (float *)dst->op_params;
  3991. const uint32_t src0_type_size = ggml_type_size(src0->type);
  3992. const uint32_t dst_type_size = ggml_type_size(dst->type);
  3993. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CLAMP, {
  3994. (uint32_t)ggml_nelements(src0),
  3995. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  3996. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  3997. 0,
  3998. op_params[0], op_params[1],
  3999. }, dryrun);
  4000. }
  4001. static void ggml_vk_pad(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4002. const uint32_t src0_type_size = ggml_type_size(src0->type);
  4003. const uint32_t dst_type_size = ggml_type_size(dst->type);
  4004. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_PAD, {
  4005. (uint32_t)ggml_nelements(dst),
  4006. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  4007. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  4008. 0,
  4009. 0.0f, 0.0f,
  4010. }, dryrun);
  4011. }
  4012. static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4013. const uint32_t src0_type_size = ggml_type_size(src0->type);
  4014. const uint32_t dst_type_size = ggml_type_size(dst->type);
  4015. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_REPEAT, {
  4016. (uint32_t)ggml_nelements(dst),
  4017. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  4018. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  4019. 0,
  4020. 0.0f, 0.0f,
  4021. }, dryrun);
  4022. }
  4023. static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4024. const uint32_t src0_type_size = ggml_type_size(src0->type);
  4025. const uint32_t dst_type_size = ggml_type_size(dst->type);
  4026. const uint32_t d_offset = ((vk_tensor_offset(dst) + dst->view_offs) % ctx->device->properties.limits.minStorageBufferOffsetAlignment) / dst_type_size;
  4027. ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_CPY, {
  4028. (uint32_t)ggml_nelements(src0),
  4029. (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
  4030. (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
  4031. d_offset,
  4032. 0.0f, 0.0f,
  4033. }, dryrun);
  4034. }
  4035. static void ggml_vk_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4036. float * op_params = (float *)dst->op_params;
  4037. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
  4038. }
  4039. static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4040. const int * int_op_params = (const int *)dst->op_params;
  4041. const float * float_op_params = (const float *)dst->op_params;
  4042. const uint32_t num_groups = int_op_params[0];
  4043. const float eps = float_op_params[1];
  4044. const uint32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
  4045. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f }, dryrun);
  4046. }
  4047. static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4048. float * op_params = (float *)dst->op_params;
  4049. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun);
  4050. }
  4051. static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4052. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun);
  4053. }
  4054. static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4055. int32_t * op_params = (int32_t *)dst->op_params;
  4056. ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] }, dryrun);
  4057. }
  4058. static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  4059. float * op_params = (float *)dst->op_params;
  4060. float scale = op_params[0];
  4061. float max_bias = op_params[1];
  4062. const uint32_t ncols = (uint32_t)src0->ne[0];
  4063. const uint32_t nrows_x = (uint32_t)ggml_nrows(src0);
  4064. const uint32_t nrows_y = (uint32_t)src0->ne[1];
  4065. const uint32_t n_head_kv = nrows_x/nrows_y;
  4066. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  4067. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  4068. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  4069. ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_SOFT_MAX, {
  4070. ncols,
  4071. src1 != nullptr ? nrows_y : (uint32_t)0,
  4072. scale, max_bias,
  4073. m0, m1,
  4074. n_head_log2,
  4075. }, dryrun);
  4076. }
  4077. static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
  4078. const int n_dims = ((int32_t *) dst->op_params)[1];
  4079. // const int mode = ((int32_t *) dst->op_params)[2];
  4080. // const int n_ctx = ((int32_t *) dst->op_params)[3];
  4081. const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
  4082. const float freq_base = ((float *) dst->op_params)[5];
  4083. const float freq_scale = ((float *) dst->op_params)[6];
  4084. const float ext_factor = ((float *) dst->op_params)[7];
  4085. const float attn_factor = ((float *) dst->op_params)[8];
  4086. const float beta_fast = ((float *) dst->op_params)[9];
  4087. const float beta_slow = ((float *) dst->op_params)[10];
  4088. float corr_dims[2];
  4089. ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
  4090. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  4091. ggml_vk_op_f32<vk_op_rope_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_ROPE, {
  4092. (uint32_t)src0->ne[0], (uint32_t)n_dims, freq_scale, (uint32_t)src0->ne[1],
  4093. freq_base, ext_factor, attn_factor, {corr_dims[0], corr_dims[1]}, theta_scale,
  4094. src2 != nullptr,
  4095. }, dryrun);
  4096. }
  4097. static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4098. int32_t * op_params = (int32_t *)dst->op_params;
  4099. uint32_t ncols = src0->ne[0];
  4100. uint32_t ncols_pad = 1;
  4101. while (ncols_pad < ncols) {
  4102. ncols_pad *= 2;
  4103. }
  4104. GGML_ASSERT(ncols_pad <= 1024);
  4105. ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
  4106. ncols,
  4107. ncols_pad,
  4108. op_params[0],
  4109. }, dryrun);
  4110. }
  4111. static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4112. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f }, dryrun);
  4113. }
  4114. static void ggml_vk_im2col(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
  4115. const int32_t s0 = dst->op_params[0];
  4116. const int32_t s1 = dst->op_params[1];
  4117. const int32_t p0 = dst->op_params[2];
  4118. const int32_t p1 = dst->op_params[3];
  4119. const int32_t d0 = dst->op_params[4];
  4120. const int32_t d1 = dst->op_params[5];
  4121. const bool is_2D = dst->op_params[6] == 1;
  4122. const uint32_t IC = src1->ne[is_2D ? 2 : 1];
  4123. const uint32_t IH = is_2D ? src1->ne[1] : 1;
  4124. const uint32_t IW = src1->ne[0];
  4125. const uint32_t KH = is_2D ? src0->ne[1] : 1;
  4126. const uint32_t KW = src0->ne[0];
  4127. const uint32_t OH = is_2D ? dst->ne[2] : 1;
  4128. const uint32_t OW = dst->ne[1];
  4129. const uint32_t offset_delta = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
  4130. const uint32_t batch_offset = src1->nb[3] / 4; // nb is byte offset, src is type float32
  4131. const uint32_t pelements = OW * KW * KH;
  4132. ggml_vk_op_f32<vk_op_im2col_push_constants>(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_IM2COL, {
  4133. batch_offset, offset_delta,
  4134. IC, IW, IH, OW, OH, KW, KH,
  4135. pelements,
  4136. IC * KH * KW,
  4137. s0, s1, p0, p1, d0, d1,
  4138. }, dryrun);
  4139. }
  4140. static void ggml_vk_timestep_embedding(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4141. const uint32_t dim = dst->op_params[0];
  4142. const uint32_t max_period = dst->op_params[1];
  4143. const uint32_t nb1 = dst->nb[1] / ggml_type_size(dst->type);
  4144. ggml_vk_op_f32<vk_op_timestep_embedding_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_TIMESTEP_EMBEDDING, {
  4145. nb1, dim, max_period,
  4146. }, dryrun);
  4147. }
  4148. static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
  4149. const float * op_params = (const float *)dst->op_params;
  4150. ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f }, dryrun);
  4151. }
  4152. #ifdef GGML_VULKAN_RUN_TESTS
  4153. static void ggml_vk_print_matrix_area(const void * data, ggml_type type, int ne0, int ne1, int i0, int i1, int i2) {
  4154. if (type != GGML_TYPE_F32 && type != GGML_TYPE_F16) {
  4155. return;
  4156. }
  4157. i0 = std::max(i0, 5);
  4158. i1 = std::max(i1, 5);
  4159. i2 = std::max(i2, 0);
  4160. fprintf(stderr, " ");
  4161. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4162. fprintf(stderr, "%7d ", idx1);
  4163. }
  4164. fprintf(stderr, "\n");
  4165. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  4166. fprintf(stderr, "%7d: ", idx0);
  4167. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4168. if (idx0 >= 0 && idx0 < ne0 && idx1 >= 0 && idx1 < ne1) {
  4169. float val;
  4170. if (type == GGML_TYPE_F32) {
  4171. val = *((const float *) data + i2*ne1*ne0 + idx1*ne0 + idx0);
  4172. } else if (type == GGML_TYPE_F16) {
  4173. val = ggml_fp16_to_fp32(*((const ggml_fp16_t *) data + i2*ne1*ne0 + idx1*ne0 + idx0));
  4174. } else {
  4175. GGML_ABORT("fatal error");
  4176. }
  4177. fprintf(stderr, "% 7.2f ", val);
  4178. } else {
  4179. fprintf(stderr, " ");
  4180. }
  4181. }
  4182. fprintf(stderr, "\n");
  4183. }
  4184. }
  4185. template <typename X_TYPE, typename Y_TYPE>
  4186. static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, int split_k, int shader_size) {
  4187. VK_LOG_DEBUG("ggml_vk_test_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << shader_size << ")");
  4188. const size_t x_ne = m * k * batch;
  4189. const size_t y_ne = k * n * batch;
  4190. const size_t d_ne = m * n * batch;
  4191. vk_pipeline p;
  4192. std::string shname;
  4193. if (shader_size == 0) {
  4194. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4195. p = ctx->device->pipeline_matmul_f32->a_s;
  4196. shname = "F32_ALIGNED_S";
  4197. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4198. p = ctx->device->pipeline_matmul_f32_f16->a_s;
  4199. shname = "F32_F16_ALIGNED_S";
  4200. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4201. p = ctx->device->pipeline_matmul_f16_f32->a_s;
  4202. shname = "F16_F32_ALIGNED_S";
  4203. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4204. p = ctx->device->pipeline_matmul_f16->a_s;
  4205. shname = "F16_ALIGNED_S";
  4206. } else {
  4207. GGML_ABORT("fatal error");
  4208. }
  4209. } else if (shader_size == 1) {
  4210. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4211. p = ctx->device->pipeline_matmul_f32->a_m;
  4212. shname = "F32_ALIGNED_M";
  4213. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4214. p = ctx->device->pipeline_matmul_f32_f16->a_m;
  4215. shname = "F32_F16_ALIGNED_M";
  4216. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4217. p = ctx->device->pipeline_matmul_f16_f32->a_m;
  4218. shname = "F16_F32_ALIGNED_M";
  4219. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4220. p = ctx->device->pipeline_matmul_f16->a_m;
  4221. shname = "F16_ALIGNED_M";
  4222. } else {
  4223. GGML_ABORT("fatal error");
  4224. }
  4225. } else if (shader_size == 2) {
  4226. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4227. p = ctx->device->pipeline_matmul_f32->a_l;
  4228. shname = "F32_ALIGNED_L";
  4229. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4230. p = ctx->device->pipeline_matmul_f32_f16->a_l;
  4231. shname = "F32_F16_ALIGNED_L";
  4232. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4233. p = ctx->device->pipeline_matmul_f16_f32->a_l;
  4234. shname = "F16_F32_ALIGNED_L";
  4235. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4236. p = ctx->device->pipeline_matmul_f16->a_l;
  4237. shname = "F16_ALIGNED_L";
  4238. } else {
  4239. GGML_ABORT("fatal error");
  4240. }
  4241. } else {
  4242. GGML_ASSERT(0);
  4243. }
  4244. const size_t kpad = ggml_vk_align_size(k, p->align);
  4245. if (k != kpad) {
  4246. if (shader_size == 0) {
  4247. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4248. p = ctx->device->pipeline_matmul_f32->s;
  4249. shname = "F32_S";
  4250. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4251. p = ctx->device->pipeline_matmul_f32_f16->s;
  4252. shname = "F32_F16_S";
  4253. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4254. p = ctx->device->pipeline_matmul_f16_f32->s;
  4255. shname = "F16_F32_S";
  4256. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4257. p = ctx->device->pipeline_matmul_f16->s;
  4258. shname = "F16_S";
  4259. }
  4260. } else if (shader_size == 1) {
  4261. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4262. p = ctx->device->pipeline_matmul_f32->m;
  4263. shname = "F32_M";
  4264. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4265. p = ctx->device->pipeline_matmul_f32_f16->m;
  4266. shname = "F32_F16_M";
  4267. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4268. p = ctx->device->pipeline_matmul_f16_f32->m;
  4269. shname = "F16_F32_M";
  4270. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4271. p = ctx->device->pipeline_matmul_f16->m;
  4272. shname = "F16_M";
  4273. }
  4274. } else if (shader_size == 2) {
  4275. if (std::is_same<float, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4276. p = ctx->device->pipeline_matmul_f32->l;
  4277. shname = "F32_L";
  4278. } else if (std::is_same<float, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4279. p = ctx->device->pipeline_matmul_f32_f16->l;
  4280. shname = "F32_F16_L";
  4281. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<float, Y_TYPE>()) {
  4282. p = ctx->device->pipeline_matmul_f16_f32->l;
  4283. shname = "F16_F32_L";
  4284. } else if (std::is_same<ggml_fp16_t, X_TYPE>() && std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4285. p = ctx->device->pipeline_matmul_f16->l;
  4286. shname = "F16_L";
  4287. }
  4288. }
  4289. }
  4290. ggml_pipeline_request_descriptor_sets(ctx->device, p, num_it);
  4291. if (split_k > 1) {
  4292. ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  4293. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  4294. // Resize buffer
  4295. if (ctx->prealloc_split_k != nullptr) {
  4296. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4297. }
  4298. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4299. }
  4300. }
  4301. ggml_pipeline_allocate_descriptor_sets(ctx->device);
  4302. vk_buffer d_X = ggml_vk_create_buffer_check(ctx->device, sizeof(X_TYPE) * x_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4303. vk_buffer d_Y = ggml_vk_create_buffer_check(ctx->device, sizeof(Y_TYPE) * y_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4304. vk_buffer d_D = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4305. X_TYPE* x = (X_TYPE *) malloc(sizeof(X_TYPE) * x_ne);
  4306. Y_TYPE* y = (Y_TYPE *) malloc(sizeof(Y_TYPE) * y_ne);
  4307. float* d = (float *) malloc(sizeof(float) * d_ne);
  4308. for (size_t i = 0; i < x_ne; i++) {
  4309. if (std::is_same<float, X_TYPE>()) {
  4310. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4311. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  4312. x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  4313. } else {
  4314. GGML_ABORT("fatal error");
  4315. }
  4316. }
  4317. for (size_t i = 0; i < y_ne; i++) {
  4318. if (std::is_same<float, Y_TYPE>()) {
  4319. // y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4320. y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  4321. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4322. // y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f);
  4323. y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f);
  4324. } else {
  4325. GGML_ABORT("fatal error");
  4326. }
  4327. }
  4328. ggml_vk_buffer_write(d_X, 0, x, sizeof(X_TYPE) * k * m * batch);
  4329. ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch);
  4330. vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4331. for (size_t i = 0; i < num_it; i++) {
  4332. ggml_vk_ctx_begin(ctx->device, subctx);
  4333. ggml_vk_matmul(
  4334. ctx, subctx, p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k),
  4335. m, n, k,
  4336. k, k, m, k*m, k*n, m*n,
  4337. split_k, batch, batch, batch, 1, 1
  4338. );
  4339. ggml_vk_ctx_end(subctx);
  4340. }
  4341. auto begin = std::chrono::high_resolution_clock::now();
  4342. ggml_vk_submit(subctx, ctx->fence);
  4343. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences");
  4344. ctx->device->device.resetFences({ ctx->fence });
  4345. auto end = std::chrono::high_resolution_clock::now();
  4346. double time = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4347. // copy dst to host
  4348. ggml_vk_buffer_read(d_D, 0, d, sizeof(float) * d_ne);
  4349. float * d_chk = (float *) malloc(sizeof(float) * d_ne);
  4350. ggml_init_params iparams = {
  4351. /*.mem_size =*/ 1024*1024*1024,
  4352. /*.mem_buffer =*/ NULL,
  4353. /*.no_alloc =*/ true,
  4354. };
  4355. ggml_context * ggml_ctx = ggml_init(iparams);
  4356. ggml_type src0_type;
  4357. ggml_type src1_type;
  4358. if (std::is_same<float, X_TYPE>()) {
  4359. src0_type = GGML_TYPE_F32;
  4360. } else if (std::is_same<ggml_fp16_t, X_TYPE>()) {
  4361. src0_type = GGML_TYPE_F16;
  4362. } else {
  4363. GGML_ABORT("fatal error");
  4364. }
  4365. if (std::is_same<float, Y_TYPE>()) {
  4366. src1_type = GGML_TYPE_F32;
  4367. } else if (std::is_same<ggml_fp16_t, Y_TYPE>()) {
  4368. src1_type = GGML_TYPE_F16;
  4369. } else {
  4370. GGML_ABORT("fatal error");
  4371. }
  4372. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, src0_type, k, m, batch);
  4373. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, src1_type, k, n, batch);
  4374. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  4375. src0_ggml->data = x;
  4376. src1_ggml->data = y;
  4377. tensor_ggml->data = d_chk;
  4378. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  4379. ggml_build_forward_expand(cgraph, tensor_ggml);
  4380. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  4381. ggml_free(ggml_ctx);
  4382. double avg_err = 0.0;
  4383. int first_err_n = -1;
  4384. int first_err_m = -1;
  4385. int first_err_b = -1;
  4386. for (size_t i = 0; i < m*n*batch; i++) {
  4387. double err = std::fabs(d[i] - d_chk[i]);
  4388. avg_err += err;
  4389. if (err > 0.05f && first_err_n == -1) {
  4390. first_err_b = i / (m * n);
  4391. first_err_n = (i % (m * n)) / m;
  4392. first_err_m = (i % (m * n)) % m;
  4393. }
  4394. }
  4395. avg_err /= m * n;
  4396. double tflops = 2.0*m*n*k*batch*num_it / (time / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
  4397. std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
  4398. if (avg_err > 0.1) {
  4399. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  4400. std::cerr << "Actual result: " << std::endl << std::endl;
  4401. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4402. std::cerr << std::endl;
  4403. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n + 15, first_err_b);
  4404. std::cerr << "Expected result: " << std::endl << std::endl;
  4405. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4406. if (split_k > 1) {
  4407. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  4408. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  4409. std::cerr << "d_buf0: " << std::endl << std::endl;
  4410. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4411. std::cerr << "d_buf1: " << std::endl << std::endl;
  4412. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4413. std::cerr << "d_buf2: " << std::endl << std::endl;
  4414. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4415. std::cerr << "d_buf3: " << std::endl << std::endl;
  4416. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4417. free(split_k_buf);
  4418. }
  4419. }
  4420. free(d_chk);
  4421. ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue);
  4422. ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue);
  4423. ggml_vk_destroy_buffer(d_X);
  4424. ggml_vk_destroy_buffer(d_Y);
  4425. ggml_vk_destroy_buffer(d_D);
  4426. ggml_pipeline_cleanup(p);
  4427. ggml_pipeline_cleanup(ctx->device->pipeline_matmul_split_k_reduce);
  4428. free(x);
  4429. free(y);
  4430. free(d);
  4431. }
  4432. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4433. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16) {
  4434. return;
  4435. }
  4436. i0 = std::max(i0, 5);
  4437. i1 = std::max(i1, 5);
  4438. i2 = std::max(i2, 0);
  4439. i3 = std::max(i3, 0);
  4440. fprintf(stderr, " ");
  4441. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4442. fprintf(stderr, "%7d ", idx1);
  4443. }
  4444. fprintf(stderr, "\n");
  4445. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  4446. fprintf(stderr, "%7d: ", idx0);
  4447. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  4448. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  4449. float val;
  4450. if (tensor->type == GGML_TYPE_F32) {
  4451. val = *(float *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  4452. } else if (tensor->type == GGML_TYPE_F16) {
  4453. val = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor->data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  4454. } else {
  4455. GGML_ABORT("fatal error");
  4456. }
  4457. fprintf(stderr, "% 7.2f ", val);
  4458. } else {
  4459. fprintf(stderr, " ");
  4460. }
  4461. }
  4462. fprintf(stderr, "\n");
  4463. }
  4464. }
  4465. static void ggml_vk_quantize_data(const float * from, void * to, size_t ne, ggml_type quant) {
  4466. ggml_quantize_chunk(quant, from, to, 0, 1, ne, nullptr);
  4467. }
  4468. static void ggml_vk_dequantize_data(const void * from, float * to, size_t ne, ggml_type quant) {
  4469. if (quant == GGML_TYPE_F32) {
  4470. memcpy(to, from, sizeof(float) * ne);
  4471. return;
  4472. }
  4473. const auto * tt = ggml_get_type_traits(quant);
  4474. ggml_to_float_t dequant_fn = tt->to_float;
  4475. dequant_fn(from, to, ne);
  4476. }
  4477. static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_type quant) {
  4478. VK_LOG_DEBUG("ggml_vk_test_dequant(" << ne << ")");
  4479. const size_t x_sz = sizeof(float) * ne;
  4480. const size_t x_sz_f16 = sizeof(ggml_fp16_t) * ne;
  4481. const size_t qx_sz = ne * ggml_type_size(quant)/ggml_blck_size(quant);
  4482. float * x = (float *) malloc(x_sz);
  4483. void * qx = malloc(qx_sz);
  4484. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4485. vk_buffer x_buf = ggml_vk_create_buffer_check(ctx->device, x_sz_f16, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4486. float * x_ref = (float *) malloc(x_sz);
  4487. ggml_fp16_t * x_chk = (ggml_fp16_t *) malloc(x_sz_f16);
  4488. for (size_t i = 0; i < ne; i++) {
  4489. x[i] = rand() / (float)RAND_MAX;
  4490. }
  4491. vk_pipeline p = ggml_vk_get_to_fp16(ctx, quant);
  4492. ggml_vk_quantize_data(x, qx, ne, quant);
  4493. ggml_vk_dequantize_data(qx, x_ref, ne, quant);
  4494. ggml_pipeline_request_descriptor_sets(ctx->device, p, 1);
  4495. ggml_pipeline_allocate_descriptor_sets(ctx->device);
  4496. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  4497. vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4498. ggml_vk_ctx_begin(ctx->device, subctx);
  4499. const std::vector<uint32_t> pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne };
  4500. ggml_vk_dispatch_pipeline(ctx, subctx, p, { vk_subbuffer{ qx_buf, 0, qx_sz }, vk_subbuffer{ x_buf, 0, x_sz_f16 } }, pc.size() * sizeof(int), pc.data(), { (uint32_t)ne, 1, 1});
  4501. ggml_vk_ctx_end(subctx);
  4502. auto begin = std::chrono::high_resolution_clock::now();
  4503. ggml_vk_submit(subctx, ctx->fence);
  4504. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  4505. ctx->device->device.resetFences({ ctx->fence });
  4506. auto end = std::chrono::high_resolution_clock::now();
  4507. double ms_dequant = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4508. ggml_vk_buffer_read(x_buf, 0, x_chk, x_sz_f16);
  4509. int first_err = -1;
  4510. double avg_err = 0.0;
  4511. for (size_t i = 0; i < ne; i++) {
  4512. double error = std::fabs(x_ref[i] - ggml_fp16_to_fp32(x_chk[i]));
  4513. avg_err += error;
  4514. if (first_err < 0 && error > 0.05) {
  4515. first_err = i;
  4516. }
  4517. }
  4518. avg_err /= ne;
  4519. std::cerr << "TEST DEQUANT " << ggml_type_name(quant) << " time=" << ms_dequant << "ms avg_err=" << avg_err << std::endl;
  4520. if (avg_err > 0.1) {
  4521. std::cerr << "first_error = " << first_err << std::endl;
  4522. std::cerr << "Actual result: " << std::endl << std::endl;
  4523. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  4524. std::cerr << ggml_fp16_to_fp32(x_chk[i]) << ", ";
  4525. }
  4526. std::cerr << std::endl << "Expected result: " << std::endl << std::endl;
  4527. for (int i = std::max(0, first_err - 5); i < std::min((int)ne, first_err + 5); i++) {
  4528. std::cerr << x_ref[i] << ", ";
  4529. }
  4530. std::cerr << std::endl;
  4531. }
  4532. ggml_vk_destroy_buffer(x_buf);
  4533. ggml_vk_destroy_buffer(qx_buf);
  4534. free(x);
  4535. free(qx);
  4536. free(x_ref);
  4537. free(x_chk);
  4538. }
  4539. static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, size_t n, size_t k, size_t batch, size_t num_it, size_t split_k, size_t shader_size, ggml_type quant) {
  4540. VK_LOG_DEBUG("ggml_vk_test_dequant_matmul(" << m << ", " << n << ", " << k << ", " << batch << ", " << num_it << ", " << split_k << ", " << ggml_type_name(quant) << ")");
  4541. const size_t x_ne = m * k * batch;
  4542. const size_t y_ne = k * n * batch;
  4543. const size_t d_ne = m * n * batch;
  4544. vk_pipeline p;
  4545. std::string shname;
  4546. if (shader_size == 0) {
  4547. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_s;
  4548. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S";
  4549. } else if (shader_size == 1) {
  4550. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_m;
  4551. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M";
  4552. } else if (shader_size == 2) {
  4553. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->a_l;
  4554. shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L";
  4555. } else {
  4556. GGML_ASSERT(0);
  4557. }
  4558. const size_t kpad = ggml_vk_align_size(k, p->align);
  4559. if (k != kpad) {
  4560. if (shader_size == 0) {
  4561. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->s;
  4562. shname = std::string(ggml_type_name(quant)) + "_S";
  4563. } else if (shader_size == 1) {
  4564. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->m;
  4565. shname = std::string(ggml_type_name(quant)) + "_M";
  4566. } else if (shader_size == 2) {
  4567. p = ctx->device->pipeline_dequant_mul_mat_mat[quant]->l;
  4568. shname = std::string(ggml_type_name(quant)) + "_L";
  4569. } else {
  4570. GGML_ASSERT(0);
  4571. }
  4572. }
  4573. const size_t x_sz = sizeof(float) * x_ne;
  4574. const size_t y_sz = sizeof(float) * y_ne;
  4575. const size_t qx_sz = x_ne * ggml_type_size(quant)/ggml_blck_size(quant);
  4576. const size_t d_sz = sizeof(float) * d_ne;
  4577. float * x = (float *) malloc(x_sz);
  4578. float * y = (float *) malloc(y_sz);
  4579. void * qx = malloc(qx_sz);
  4580. vk_buffer qx_buf = ggml_vk_create_buffer_check(ctx->device, qx_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4581. vk_buffer y_buf = ggml_vk_create_buffer_check(ctx->device, y_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4582. vk_buffer d_buf = ggml_vk_create_buffer_check(ctx->device, d_sz, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4583. float * d = (float *) malloc(d_sz);
  4584. float * d_chk = (float *) malloc(d_sz);
  4585. for (size_t i = 0; i < x_ne; i++) {
  4586. x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f;
  4587. }
  4588. ggml_vk_quantize_data(x, qx, x_ne, quant);
  4589. for (size_t i = 0; i < y_ne; i++) {
  4590. // y[i] = rand() / (float)RAND_MAX;
  4591. y[i] = (i % k == i / k) ? 1.0f : 0.0f;
  4592. }
  4593. ggml_pipeline_request_descriptor_sets(ctx->device, p, num_it);
  4594. if (split_k > 1) {
  4595. ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it);
  4596. if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) {
  4597. // Resize buffer
  4598. if (ctx->prealloc_split_k != nullptr) {
  4599. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4600. }
  4601. ctx->prealloc_split_k = ggml_vk_create_buffer_check(ctx->device, sizeof(float) * d_ne * split_k, vk::MemoryPropertyFlagBits::eDeviceLocal);
  4602. }
  4603. }
  4604. ggml_pipeline_allocate_descriptor_sets(ctx->device);
  4605. ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz);
  4606. ggml_vk_buffer_write(y_buf, 0, y, y_sz);
  4607. vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4608. for (size_t i = 0; i < num_it; i++) {
  4609. ggml_vk_ctx_begin(ctx->device, subctx);
  4610. ggml_vk_matmul(
  4611. ctx, subctx, p, ggml_vk_subbuffer(qx_buf), ggml_vk_subbuffer(y_buf), ggml_vk_subbuffer(d_buf), ggml_vk_subbuffer(ctx->prealloc_split_k),
  4612. m, n, k,
  4613. k, k, m, k*m, k*n, m*n,
  4614. split_k, batch, batch, batch, 1, 1
  4615. );
  4616. ggml_vk_ctx_end(subctx);
  4617. }
  4618. auto begin = std::chrono::high_resolution_clock::now();
  4619. ggml_vk_submit(subctx, ctx->fence);
  4620. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences");
  4621. ctx->device->device.resetFences({ ctx->fence });
  4622. auto end = std::chrono::high_resolution_clock::now();
  4623. double time_ms = std::chrono::duration_cast<std::chrono::microseconds>(end-begin).count() / 1000.0;
  4624. ggml_vk_buffer_read(d_buf, 0, d, d_sz);
  4625. ggml_init_params iparams = {
  4626. /*.mem_size =*/ 1024*1024*1024,
  4627. /*.mem_buffer =*/ NULL,
  4628. /*.no_alloc =*/ true,
  4629. };
  4630. ggml_context * ggml_ctx = ggml_init(iparams);
  4631. ggml_tensor * src0_ggml = ggml_new_tensor_3d(ggml_ctx, quant, k, m, batch);
  4632. ggml_tensor * src1_ggml = ggml_new_tensor_3d(ggml_ctx, GGML_TYPE_F32, k, n, batch);
  4633. ggml_tensor * tensor_ggml = ggml_mul_mat(ggml_ctx, src0_ggml, src1_ggml);
  4634. src0_ggml->data = qx;
  4635. src1_ggml->data = y;
  4636. tensor_ggml->data = d_chk;
  4637. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  4638. ggml_build_forward_expand(cgraph, tensor_ggml);
  4639. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 1);
  4640. ggml_free(ggml_ctx);
  4641. double avg_err = 0.0;
  4642. int first_err_n = -1;
  4643. int first_err_m = -1;
  4644. int first_err_b = -1;
  4645. for (size_t i = 0; i < m*n*batch; i++) {
  4646. double err = std::fabs(d[i] - d_chk[i]);
  4647. avg_err += err;
  4648. if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) {
  4649. first_err_b = i / (m * n);
  4650. first_err_n = (i % (m * n)) / m;
  4651. first_err_m = (i % (m * n)) % m;
  4652. }
  4653. }
  4654. avg_err /= m * n;
  4655. double tflops = 2.0*m*n*k*batch*num_it / (time_ms / 1000.0) / (1000.0*1000.0*1000.0*1000.0);
  4656. std::cerr << "TEST MMQ " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time_ms / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl;
  4657. if (avg_err > 0.01 || std::isnan(avg_err)) {
  4658. std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl;
  4659. std::cerr << "Actual result: " << std::endl << std::endl;
  4660. ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4661. std::cerr << std::endl;
  4662. std::cerr << "Expected result: " << std::endl << std::endl;
  4663. ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4664. if (split_k > 1) {
  4665. float * split_k_buf = (float *) malloc(sizeof(float) * d_ne * split_k);
  4666. ggml_vk_buffer_read(ctx->prealloc_split_k, 0, split_k_buf, sizeof(float) * d_ne * split_k);
  4667. std::cerr << "d_buf0: " << std::endl << std::endl;
  4668. ggml_vk_print_matrix_area(split_k_buf, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4669. std::cerr << "d_buf1: " << std::endl << std::endl;
  4670. ggml_vk_print_matrix_area(split_k_buf + d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4671. std::cerr << "d_buf2: " << std::endl << std::endl;
  4672. ggml_vk_print_matrix_area(split_k_buf + 2 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4673. std::cerr << "d_buf3: " << std::endl << std::endl;
  4674. ggml_vk_print_matrix_area(split_k_buf + 3 * d_ne, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b);
  4675. free(split_k_buf);
  4676. }
  4677. }
  4678. ggml_vk_destroy_buffer(qx_buf);
  4679. ggml_vk_destroy_buffer(y_buf);
  4680. ggml_vk_destroy_buffer(d_buf);
  4681. free(x);
  4682. free(qx);
  4683. free(y);
  4684. free(d);
  4685. free(d_chk);
  4686. }
  4687. #endif
  4688. static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
  4689. #if defined(GGML_VULKAN_RUN_TESTS)
  4690. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_F32);
  4691. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_0);
  4692. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_1);
  4693. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_0);
  4694. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_1);
  4695. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q8_0);
  4696. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q2_K);
  4697. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q3_K);
  4698. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_K);
  4699. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_K);
  4700. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q6_K);
  4701. ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_IQ4_NL);
  4702. ggml_vk_test_matmul<ggml_fp16_t, ggml_fp16_t>(ctx, 512, 512, 100, 32, 100, 1, 2);
  4703. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 0);
  4704. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 1);
  4705. ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 1, 2);
  4706. // ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 0);
  4707. // ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 1);
  4708. // ggml_vk_test_matmul<float, float>(ctx, 128, 512, 512, 2, 100, 4, 2);
  4709. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_0);
  4710. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_0);
  4711. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_0);
  4712. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_0);
  4713. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_0);
  4714. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_0);
  4715. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_1);
  4716. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_1);
  4717. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_1);
  4718. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_1);
  4719. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_1);
  4720. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_1);
  4721. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_0);
  4722. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_0);
  4723. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_0);
  4724. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_0);
  4725. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_0);
  4726. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_0);
  4727. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_1);
  4728. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_1);
  4729. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_1);
  4730. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_1);
  4731. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_1);
  4732. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_1);
  4733. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q8_0);
  4734. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q8_0);
  4735. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q8_0);
  4736. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q8_0);
  4737. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q8_0);
  4738. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q8_0);
  4739. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q2_K);
  4740. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q2_K);
  4741. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q2_K);
  4742. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q2_K);
  4743. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q2_K);
  4744. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q2_K);
  4745. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q3_K);
  4746. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q3_K);
  4747. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q3_K);
  4748. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q3_K);
  4749. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q3_K);
  4750. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q3_K);
  4751. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_K);
  4752. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_K);
  4753. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_K);
  4754. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_K);
  4755. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_K);
  4756. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_K);
  4757. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_K);
  4758. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_K);
  4759. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_K);
  4760. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_K);
  4761. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_K);
  4762. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_K);
  4763. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q6_K);
  4764. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q6_K);
  4765. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q6_K);
  4766. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q6_K);
  4767. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q6_K);
  4768. // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q6_K);
  4769. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_IQ4_NL);
  4770. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_IQ4_NL);
  4771. ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_IQ4_NL);
  4772. std::cerr << std::endl;
  4773. const std::vector<size_t> vals {
  4774. 8, 8, 8,
  4775. 100, 46, 576,
  4776. 623, 111, 128,
  4777. 100, 46, 558,
  4778. 512, 1, 256,
  4779. 128, 110, 622,
  4780. 511, 511, 127,
  4781. 511, 511, 7,
  4782. 511, 511, 17,
  4783. 49, 49, 128,
  4784. 128, 49, 49,
  4785. 4096, 49, 4096,
  4786. 11008, 49, 4096,
  4787. 4096, 49, 11008,
  4788. 32000, 49, 4096,
  4789. 512, 512, 128,
  4790. 128, 512, 512,
  4791. 4096, 512, 4096,
  4792. 11008, 512, 4096,
  4793. 4096, 512, 11008,
  4794. 32000, 512, 4096,
  4795. };
  4796. const size_t num_it = 1;
  4797. for (size_t i = 0; i < vals.size(); i += 3) {
  4798. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0);
  4799. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1);
  4800. ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2);
  4801. // ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0);
  4802. // ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1);
  4803. // ggml_vk_test_matmul<ggml_fp16_t, float>(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2);
  4804. std::cerr << std::endl;
  4805. }
  4806. GGML_ABORT("fatal error");
  4807. #endif
  4808. if (ctx->prealloc_x == nullptr || (ctx->prealloc_size_x > 0 && ctx->prealloc_x->size < ctx->prealloc_size_x)) {
  4809. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(x_size: " << ctx->prealloc_size_x << ")");
  4810. // Resize buffer
  4811. if (ctx->prealloc_x != nullptr) {
  4812. ggml_vk_destroy_buffer(ctx->prealloc_x);
  4813. }
  4814. ctx->prealloc_x = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_x);
  4815. }
  4816. if (ctx->prealloc_y == nullptr || (ctx->prealloc_size_y > 0 && ctx->prealloc_y->size < ctx->prealloc_size_y)) {
  4817. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(y_size: " << ctx->prealloc_size_y << ")");
  4818. // Resize buffer
  4819. if (ctx->prealloc_y != nullptr) {
  4820. ggml_vk_destroy_buffer(ctx->prealloc_y);
  4821. }
  4822. ctx->prealloc_y = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_y);
  4823. }
  4824. if (ctx->prealloc_split_k == nullptr || (ctx->prealloc_size_split_k > 0 && ctx->prealloc_split_k->size < ctx->prealloc_size_split_k)) {
  4825. VK_LOG_MEMORY("ggml_vk_preallocate_buffers(split_k_size: " << ctx->prealloc_size_split_k << ")");
  4826. // Resize buffer
  4827. if (ctx->prealloc_split_k != nullptr) {
  4828. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  4829. }
  4830. ctx->prealloc_split_k = ggml_vk_create_buffer_device(ctx->device, ctx->prealloc_size_split_k);
  4831. }
  4832. }
  4833. static bool ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_tensor* tensor, int tensor_idx, bool use_fence);
  4834. // Returns true if node has enqueued work into the queue, false otherwise
  4835. // If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution.
  4836. static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool dryrun, bool last_node, bool submit){
  4837. if (ggml_is_empty(node) || !node->buffer) {
  4838. return false;
  4839. }
  4840. VK_LOG_DEBUG("ggml_vk_build_graph(" << node << ", " << ggml_op_name(node->op) << ")");
  4841. ctx->semaphore_idx = 0;
  4842. const ggml_tensor * src0 = node->src[0];
  4843. const ggml_tensor * src1 = node->src[1];
  4844. const ggml_tensor * src2 = node->src[2];
  4845. switch (node->op) {
  4846. // Return on empty ops to avoid generating a compute_ctx and setting exit_tensor
  4847. case GGML_OP_RESHAPE:
  4848. case GGML_OP_VIEW:
  4849. case GGML_OP_PERMUTE:
  4850. case GGML_OP_TRANSPOSE:
  4851. case GGML_OP_NONE:
  4852. return false;
  4853. case GGML_OP_UNARY:
  4854. switch (ggml_get_unary_op(node)) {
  4855. case GGML_UNARY_OP_SILU:
  4856. case GGML_UNARY_OP_GELU:
  4857. case GGML_UNARY_OP_GELU_QUICK:
  4858. case GGML_UNARY_OP_RELU:
  4859. case GGML_UNARY_OP_TANH:
  4860. break;
  4861. default:
  4862. return false;
  4863. }
  4864. break;
  4865. case GGML_OP_REPEAT:
  4866. case GGML_OP_GET_ROWS:
  4867. case GGML_OP_ADD:
  4868. case GGML_OP_ACC:
  4869. case GGML_OP_MUL:
  4870. case GGML_OP_DIV:
  4871. case GGML_OP_CONCAT:
  4872. case GGML_OP_UPSCALE:
  4873. case GGML_OP_SCALE:
  4874. case GGML_OP_SQR:
  4875. case GGML_OP_SIN:
  4876. case GGML_OP_COS:
  4877. case GGML_OP_CLAMP:
  4878. case GGML_OP_PAD:
  4879. case GGML_OP_CPY:
  4880. case GGML_OP_CONT:
  4881. case GGML_OP_DUP:
  4882. case GGML_OP_NORM:
  4883. case GGML_OP_GROUP_NORM:
  4884. case GGML_OP_RMS_NORM:
  4885. case GGML_OP_DIAG_MASK_INF:
  4886. case GGML_OP_SOFT_MAX:
  4887. case GGML_OP_ROPE:
  4888. case GGML_OP_MUL_MAT:
  4889. case GGML_OP_MUL_MAT_ID:
  4890. case GGML_OP_ARGSORT:
  4891. case GGML_OP_SUM_ROWS:
  4892. case GGML_OP_IM2COL:
  4893. case GGML_OP_TIMESTEP_EMBEDDING:
  4894. case GGML_OP_LEAKY_RELU:
  4895. break;
  4896. default:
  4897. std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl;
  4898. GGML_ABORT("fatal error");
  4899. return false;
  4900. }
  4901. vk_context compute_ctx;
  4902. if (!dryrun) {
  4903. if (ctx->compute_ctx.expired()) {
  4904. compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue);
  4905. ctx->compute_ctx = compute_ctx;
  4906. ggml_vk_ctx_begin(ctx->device, compute_ctx);
  4907. } else {
  4908. compute_ctx = ctx->compute_ctx.lock();
  4909. }
  4910. }
  4911. switch (node->op) {
  4912. case GGML_OP_REPEAT:
  4913. ggml_vk_repeat(ctx, compute_ctx, src0, node, dryrun);
  4914. break;
  4915. case GGML_OP_ACC:
  4916. ggml_vk_acc(ctx, compute_ctx, src0, src1, node, dryrun);
  4917. break;
  4918. case GGML_OP_GET_ROWS:
  4919. ggml_vk_get_rows(ctx, compute_ctx, src0, src1, node, dryrun);
  4920. break;
  4921. case GGML_OP_ADD:
  4922. ggml_vk_add(ctx, compute_ctx, src0, src1, node, dryrun);
  4923. break;
  4924. case GGML_OP_MUL:
  4925. ggml_vk_mul(ctx, compute_ctx, src0, src1, node, dryrun);
  4926. break;
  4927. case GGML_OP_DIV:
  4928. ggml_vk_div(ctx, compute_ctx, src0, src1, node, dryrun);
  4929. break;
  4930. case GGML_OP_CONCAT:
  4931. ggml_vk_concat(ctx, compute_ctx, src0, src1, node, dryrun);
  4932. break;
  4933. case GGML_OP_UPSCALE:
  4934. ggml_vk_upscale(ctx, compute_ctx, src0, node, dryrun);
  4935. break;
  4936. case GGML_OP_SCALE:
  4937. ggml_vk_scale(ctx, compute_ctx, src0, node, dryrun);
  4938. break;
  4939. case GGML_OP_SQR:
  4940. ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
  4941. break;
  4942. case GGML_OP_SIN:
  4943. ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
  4944. break;
  4945. case GGML_OP_COS:
  4946. ggml_vk_cos(ctx, compute_ctx, src0, node, dryrun);
  4947. break;
  4948. case GGML_OP_CLAMP:
  4949. ggml_vk_clamp(ctx, compute_ctx, src0, node, dryrun);
  4950. break;
  4951. case GGML_OP_PAD:
  4952. ggml_vk_pad(ctx, compute_ctx, src0, node, dryrun);
  4953. break;
  4954. case GGML_OP_CPY:
  4955. case GGML_OP_CONT:
  4956. case GGML_OP_DUP:
  4957. ggml_vk_cpy(ctx, compute_ctx, src0, node, dryrun);
  4958. break;
  4959. case GGML_OP_NORM:
  4960. ggml_vk_norm(ctx, compute_ctx, src0, node, dryrun);
  4961. break;
  4962. case GGML_OP_GROUP_NORM:
  4963. ggml_vk_group_norm(ctx, compute_ctx, src0, node, dryrun);
  4964. break;
  4965. case GGML_OP_RMS_NORM:
  4966. ggml_vk_rms_norm(ctx, compute_ctx, src0, node, dryrun);
  4967. break;
  4968. case GGML_OP_UNARY:
  4969. switch (ggml_get_unary_op(node)) {
  4970. case GGML_UNARY_OP_SILU:
  4971. case GGML_UNARY_OP_GELU:
  4972. case GGML_UNARY_OP_GELU_QUICK:
  4973. case GGML_UNARY_OP_RELU:
  4974. case GGML_UNARY_OP_TANH:
  4975. ggml_vk_unary(ctx, compute_ctx, src0, node, dryrun);
  4976. break;
  4977. default:
  4978. return false;
  4979. }
  4980. break;
  4981. case GGML_OP_DIAG_MASK_INF:
  4982. ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node, dryrun);
  4983. break;
  4984. case GGML_OP_SOFT_MAX:
  4985. ggml_vk_soft_max(ctx, compute_ctx, src0, src1, node, dryrun);
  4986. break;
  4987. case GGML_OP_ROPE:
  4988. ggml_vk_rope(ctx, compute_ctx, src0, src1, src2, node, dryrun);
  4989. break;
  4990. case GGML_OP_ARGSORT:
  4991. ggml_vk_argsort(ctx, compute_ctx, src0, node, dryrun);
  4992. break;
  4993. case GGML_OP_SUM_ROWS:
  4994. ggml_vk_sum_rows(ctx, compute_ctx, src0, node, dryrun);
  4995. break;
  4996. case GGML_OP_IM2COL:
  4997. ggml_vk_im2col(ctx, compute_ctx, src0, src1, node, dryrun);
  4998. break;
  4999. case GGML_OP_TIMESTEP_EMBEDDING:
  5000. ggml_vk_timestep_embedding(ctx, compute_ctx, src0, node, dryrun);
  5001. break;
  5002. case GGML_OP_LEAKY_RELU:
  5003. ggml_vk_leaky_relu(ctx, compute_ctx, src0, node, dryrun);
  5004. break;
  5005. case GGML_OP_MUL_MAT:
  5006. ggml_vk_mul_mat(ctx, compute_ctx, src0, src1, node, dryrun);
  5007. break;
  5008. case GGML_OP_MUL_MAT_ID:
  5009. ggml_vk_mul_mat_id(ctx, compute_ctx, src0, src1, src2, node, dryrun);
  5010. break;
  5011. default:
  5012. return false;
  5013. }
  5014. if (dryrun) {
  5015. return false;
  5016. }
  5017. ctx->tensor_ctxs[node_idx] = compute_ctx;
  5018. #if defined(GGML_VULKAN_CHECK_RESULTS) || defined(GGML_VULKAN_PERF)
  5019. // Force context reset on each node so that each tensor ends up in its own context
  5020. // and can be run and compared to its CPU equivalent separately
  5021. last_node = true;
  5022. #endif
  5023. if (submit || last_node) {
  5024. ggml_vk_ctx_end(compute_ctx);
  5025. // TODO probably it'd be better to pass a exit_node flag to ggml_vk_compute_forward
  5026. if (last_node) {
  5027. compute_ctx->exit_tensor_idx = node_idx_begin;
  5028. }
  5029. else {
  5030. compute_ctx->exit_tensor_idx = -1;
  5031. }
  5032. ctx->compute_ctx.reset();
  5033. bool ok = ggml_vk_compute_forward(ctx, node_begin, node_idx_begin, false);
  5034. if (!ok) {
  5035. if (node->op == GGML_OP_UNARY) {
  5036. std::cerr << __func__ << ": error: op not supported UNARY " << node->name << " (" << ggml_unary_op_name(static_cast<ggml_unary_op>(node->op_params[0])) << ")" << std::endl;
  5037. }
  5038. else {
  5039. std::cerr << __func__ << ": error: op not supported " << node->name << " (" << ggml_op_name(node->op) << ")" << std::endl;
  5040. }
  5041. }
  5042. }
  5043. return true;
  5044. }
  5045. static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * tensor, int tensor_idx, bool use_fence = true){
  5046. ggml_backend_buffer * buf = nullptr;
  5047. switch (tensor->op) {
  5048. case GGML_OP_ADD:
  5049. case GGML_OP_ACC:
  5050. case GGML_OP_GET_ROWS:
  5051. case GGML_OP_MUL:
  5052. case GGML_OP_DIV:
  5053. case GGML_OP_CONCAT:
  5054. case GGML_OP_UPSCALE:
  5055. case GGML_OP_SCALE:
  5056. case GGML_OP_SQR:
  5057. case GGML_OP_SIN:
  5058. case GGML_OP_COS:
  5059. case GGML_OP_CLAMP:
  5060. case GGML_OP_PAD:
  5061. case GGML_OP_CPY:
  5062. case GGML_OP_CONT:
  5063. case GGML_OP_DUP:
  5064. case GGML_OP_NORM:
  5065. case GGML_OP_GROUP_NORM:
  5066. case GGML_OP_RMS_NORM:
  5067. case GGML_OP_DIAG_MASK_INF:
  5068. case GGML_OP_SOFT_MAX:
  5069. case GGML_OP_ROPE:
  5070. case GGML_OP_RESHAPE:
  5071. case GGML_OP_VIEW:
  5072. case GGML_OP_PERMUTE:
  5073. case GGML_OP_TRANSPOSE:
  5074. case GGML_OP_NONE:
  5075. case GGML_OP_ARGSORT:
  5076. case GGML_OP_SUM_ROWS:
  5077. case GGML_OP_IM2COL:
  5078. case GGML_OP_TIMESTEP_EMBEDDING:
  5079. case GGML_OP_LEAKY_RELU:
  5080. case GGML_OP_REPEAT:
  5081. buf = tensor->buffer;
  5082. break;
  5083. case GGML_OP_UNARY:
  5084. switch (ggml_get_unary_op(tensor)) {
  5085. case GGML_UNARY_OP_SILU:
  5086. case GGML_UNARY_OP_GELU:
  5087. case GGML_UNARY_OP_GELU_QUICK:
  5088. case GGML_UNARY_OP_RELU:
  5089. case GGML_UNARY_OP_TANH:
  5090. buf = tensor->buffer;
  5091. break;
  5092. default:
  5093. return false;
  5094. }
  5095. break;
  5096. case GGML_OP_MUL_MAT:
  5097. case GGML_OP_MUL_MAT_ID:
  5098. buf = tensor->buffer;
  5099. break;
  5100. default:
  5101. return false;
  5102. }
  5103. if (buf == nullptr) {
  5104. return false;
  5105. }
  5106. VK_LOG_DEBUG("ggml_vk_compute_forward(" << tensor << ", name=" << tensor->name << ", op=" << ggml_op_name(tensor->op) << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << ", view_src=" << tensor->view_src << ", view_offs=" << tensor->view_offs << ")");
  5107. vk_context subctx = ctx->tensor_ctxs[tensor_idx].lock();
  5108. // always wait for the GPU work to be done for the last submit
  5109. if (tensor_idx == subctx->exit_tensor_idx) {
  5110. use_fence = true;
  5111. }
  5112. // Only run if ctx hasn't been submitted yet
  5113. if (!subctx->seqs.empty()) {
  5114. #ifdef GGML_VULKAN_CHECK_RESULTS
  5115. ggml_vk_check_results_0(tensor);
  5116. use_fence = true;
  5117. #endif
  5118. // Do staging buffer copies
  5119. for (auto& cpy : subctx->in_memcpys) {
  5120. memcpy(cpy.dst, cpy.src, cpy.n);
  5121. }
  5122. ggml_vk_submit(subctx, use_fence ? ctx->fence : vk::Fence{});
  5123. if (use_fence) {
  5124. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_compute_forward waitForFences");
  5125. ctx->device->device.resetFences({ ctx->fence });
  5126. }
  5127. #ifdef GGML_VULKAN_CHECK_RESULTS
  5128. ggml_vk_check_results_1(tensor);
  5129. #endif
  5130. }
  5131. if (tensor_idx == subctx->exit_tensor_idx) {
  5132. // Do staging buffer copies
  5133. for (auto& cpy : subctx->out_memcpys) {
  5134. memcpy(cpy.dst, cpy.src, cpy.n);
  5135. }
  5136. subctx->in_memcpys.clear();
  5137. subctx->out_memcpys.clear();
  5138. }
  5139. return true;
  5140. }
  5141. // Clean up after graph processing is done
  5142. static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) {
  5143. VK_LOG_DEBUG("ggml_vk_graph_cleanup()");
  5144. for (auto& buffer : ctx->gc.temp_buffers) {
  5145. ggml_vk_pool_free(ctx, buffer);
  5146. }
  5147. ctx->gc.temp_buffers.clear();
  5148. for (auto& dsr : ctx->device->pipeline_descriptor_set_requirements) {
  5149. vk_pipeline_ref plr = ctx->device->pipelines[dsr.first];
  5150. if (plr.expired()) {
  5151. continue;
  5152. }
  5153. vk_pipeline pl = plr.lock();
  5154. ggml_pipeline_cleanup(pl);
  5155. }
  5156. ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue);
  5157. ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue);
  5158. for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) {
  5159. ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s });
  5160. }
  5161. ctx->gc.semaphores.clear();
  5162. for (size_t i = 0; i < ctx->gc.tl_semaphores.size(); i++) {
  5163. ctx->device->device.destroySemaphore({ ctx->gc.tl_semaphores[i].s });
  5164. }
  5165. ctx->gc.tl_semaphores.clear();
  5166. ctx->semaphore_idx = 0;
  5167. ctx->event_idx = 0;
  5168. for (auto& event : ctx->gc.events) {
  5169. ctx->device->device.resetEvent(event);
  5170. }
  5171. ctx->tensor_ctxs.clear();
  5172. ctx->gc.contexts.clear();
  5173. ctx->device->pipeline_descriptor_set_requirements.clear();
  5174. }
  5175. // Clean up on backend free
  5176. static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
  5177. VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->name << ")");
  5178. ggml_vk_graph_cleanup(ctx);
  5179. ggml_vk_destroy_buffer(ctx->prealloc_x);
  5180. ggml_vk_destroy_buffer(ctx->prealloc_y);
  5181. ggml_vk_destroy_buffer(ctx->prealloc_split_k);
  5182. for (auto& buffer : ctx->buffer_pool) {
  5183. ggml_vk_destroy_buffer(buffer);
  5184. }
  5185. ctx->prealloc_size_x = 0;
  5186. ctx->prealloc_size_y = 0;
  5187. ctx->prealloc_size_split_k = 0;
  5188. for (auto& event : ctx->gc.events) {
  5189. ctx->device->device.destroyEvent(event);
  5190. }
  5191. ctx->gc.events.clear();
  5192. ctx->device->device.destroyFence(ctx->fence);
  5193. }
  5194. static int ggml_vk_get_device_count() {
  5195. ggml_vk_instance_init();
  5196. return vk_instance.device_indices.size();
  5197. }
  5198. static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
  5199. ggml_vk_instance_init();
  5200. std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
  5201. vk::PhysicalDeviceProperties props;
  5202. devices[device].getProperties(&props);
  5203. snprintf(description, description_size, "%s", props.deviceName.data());
  5204. }
  5205. // backend interface
  5206. #define UNUSED GGML_UNUSED
  5207. // device backend
  5208. static const char * ggml_backend_vk_buffer_get_name(ggml_backend_buffer_t buffer) {
  5209. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5210. return ctx->name.c_str();
  5211. }
  5212. static bool ggml_backend_buffer_is_vk(ggml_backend_buffer_t buffer) {
  5213. return buffer->iface.get_name == ggml_backend_vk_buffer_get_name;
  5214. }
  5215. static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  5216. VK_LOG_MEMORY("ggml_backend_vk_buffer_free_buffer()");
  5217. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5218. ggml_vk_destroy_buffer(ctx->dev_buffer);
  5219. delete ctx;
  5220. }
  5221. static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) {
  5222. return vk_ptr_base;
  5223. UNUSED(buffer);
  5224. }
  5225. static void ggml_backend_vk_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
  5226. VK_LOG_DEBUG("ggml_backend_vk_buffer_init_tensor(" << buffer << " (" << buffer->context << "), " << tensor << ")");
  5227. if (tensor->view_src != nullptr) {
  5228. GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft);
  5229. }
  5230. }
  5231. static void ggml_backend_vk_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  5232. VK_LOG_DEBUG("ggml_backend_vk_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  5233. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5234. vk_buffer buf = buf_ctx->dev_buffer;
  5235. ggml_vk_buffer_write(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  5236. }
  5237. static void ggml_backend_vk_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  5238. VK_LOG_DEBUG("ggml_backend_vk_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")");
  5239. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5240. vk_buffer buf = buf_ctx->dev_buffer;
  5241. ggml_vk_buffer_read(buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  5242. }
  5243. static bool ggml_backend_vk_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
  5244. if (ggml_backend_buffer_is_vk(src->buffer)) {
  5245. ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
  5246. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  5247. vk_buffer src_buf = src_buf_ctx->dev_buffer;
  5248. vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
  5249. ggml_vk_buffer_copy(dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
  5250. return true;
  5251. }
  5252. return false;
  5253. UNUSED(buffer);
  5254. }
  5255. static void ggml_backend_vk_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  5256. ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context;
  5257. ggml_vk_buffer_memset(ctx->dev_buffer, 0, value, buffer->size);
  5258. }
  5259. static ggml_backend_buffer_i ggml_backend_vk_buffer_interface = {
  5260. /* .get_name = */ ggml_backend_vk_buffer_get_name,
  5261. /* .free_buffer = */ ggml_backend_vk_buffer_free_buffer,
  5262. /* .get_base = */ ggml_backend_vk_buffer_get_base,
  5263. /* .init_tensor = */ ggml_backend_vk_buffer_init_tensor,
  5264. /* .memset_tensor = */ NULL,
  5265. /* .set_tensor = */ ggml_backend_vk_buffer_set_tensor,
  5266. /* .get_tensor = */ ggml_backend_vk_buffer_get_tensor,
  5267. /* .cpy_tensor = */ ggml_backend_vk_buffer_cpy_tensor,
  5268. /* .clear = */ ggml_backend_vk_buffer_clear,
  5269. /* .reset = */ NULL,
  5270. };
  5271. // vk buffer type
  5272. static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft) {
  5273. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  5274. return ctx->name.c_str();
  5275. }
  5276. static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  5277. VK_LOG_MEMORY("ggml_backend_vk_buffer_type_alloc_buffer(" << size << ")");
  5278. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5279. vk_buffer dev_buffer = nullptr;
  5280. try {
  5281. dev_buffer = ggml_vk_create_buffer_device(ctx->device, size);
  5282. } catch (const vk::SystemError& e) {
  5283. return nullptr;
  5284. }
  5285. ggml_backend_vk_buffer_context * bufctx = new ggml_backend_vk_buffer_context(ctx->device, std::move(dev_buffer), ctx->name);
  5286. return ggml_backend_buffer_init(buft, ggml_backend_vk_buffer_interface, bufctx, size);
  5287. }
  5288. static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  5289. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5290. return ctx->device->properties.limits.minStorageBufferOffsetAlignment;
  5291. }
  5292. static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  5293. ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context;
  5294. return ctx->device->max_memory_allocation_size;
  5295. }
  5296. static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  5297. return ggml_nbytes(tensor);
  5298. UNUSED(buft);
  5299. }
  5300. ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
  5301. ggml_vk_instance_init();
  5302. VK_LOG_DEBUG("ggml_backend_vk_buffer_type(" << dev_num << ")");
  5303. vk_device dev = ggml_vk_get_device(dev_num);
  5304. return &dev->buffer_type;
  5305. }
  5306. // host buffer type
  5307. static const char * ggml_backend_vk_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
  5308. return GGML_VK_NAME "_Host";
  5309. UNUSED(buft);
  5310. }
  5311. static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffer) {
  5312. return GGML_VK_NAME "_Host";
  5313. UNUSED(buffer);
  5314. }
  5315. static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  5316. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()");
  5317. ggml_vk_host_free(vk_instance.devices[0], buffer->context);
  5318. }
  5319. static ggml_backend_buffer_t ggml_backend_vk_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  5320. VK_LOG_MEMORY("ggml_backend_vk_host_buffer_type_alloc_buffer(" << size << ")");
  5321. size += 32; // Behave like the CPU buffer type
  5322. void * ptr = nullptr;
  5323. try {
  5324. ptr = ggml_vk_host_malloc(vk_instance.devices[0], size);
  5325. } catch (vk::SystemError& e) {
  5326. std::cerr << "ggml_vulkan: Failed to allocate pinned memory." << std::endl;
  5327. std::cerr << "ggml_vulkan: " << e.what() << std::endl;
  5328. // fallback to cpu buffer
  5329. return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
  5330. }
  5331. ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
  5332. buffer->buft = buft;
  5333. buffer->iface.get_name = ggml_backend_vk_host_buffer_name;
  5334. buffer->iface.free_buffer = ggml_backend_vk_host_buffer_free_buffer;
  5335. return buffer;
  5336. UNUSED(buft);
  5337. }
  5338. static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  5339. return vk_instance.devices[0]->properties.limits.minMemoryMapAlignment;
  5340. UNUSED(buft);
  5341. }
  5342. // Should be changed to return device-specific host buffer type
  5343. // but that probably requires changes in llama.cpp
  5344. ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() {
  5345. static struct ggml_backend_buffer_type ggml_backend_vk_buffer_type_host = {
  5346. /* .iface = */ {
  5347. /* .get_name = */ ggml_backend_vk_host_buffer_type_name,
  5348. /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer,
  5349. /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment,
  5350. /* .get_max_size = */ NULL, // defaults to SIZE_MAX
  5351. /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
  5352. /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
  5353. },
  5354. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), 0),
  5355. /* .context = */ nullptr,
  5356. };
  5357. // Make sure device 0 is initialized
  5358. ggml_vk_instance_init();
  5359. ggml_vk_get_device(0);
  5360. return &ggml_backend_vk_buffer_type_host;
  5361. }
  5362. // backend
  5363. static const char * ggml_backend_vk_name(ggml_backend_t backend) {
  5364. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5365. return ctx->name.c_str();
  5366. }
  5367. static void ggml_backend_vk_free(ggml_backend_t backend) {
  5368. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5369. VK_LOG_DEBUG("ggml_backend_vk_free(" << ctx->name << ")");
  5370. ggml_vk_cleanup(ctx);
  5371. delete ctx;
  5372. delete backend;
  5373. }
  5374. static ggml_backend_buffer_type_t ggml_backend_vk_get_default_buffer_type(ggml_backend_t backend) {
  5375. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5376. return &ctx->device->buffer_type;
  5377. }
  5378. static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  5379. VK_LOG_DEBUG("ggml_backend_vk_set_tensor_async(" << size << ")");
  5380. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5381. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  5382. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  5383. vk_context transfer_ctx;
  5384. if (ctx->transfer_ctx.expired()) {
  5385. // Initialize new transfer context
  5386. transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5387. ctx->transfer_ctx = transfer_ctx;
  5388. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  5389. } else {
  5390. transfer_ctx = ctx->transfer_ctx.lock();
  5391. }
  5392. vk_buffer buf = buf_ctx->dev_buffer;
  5393. ggml_vk_buffer_write_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  5394. }
  5395. static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  5396. VK_LOG_DEBUG("ggml_backend_vk_get_tensor_async(" << size << ")");
  5397. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5398. GGML_ASSERT((tensor->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || tensor->buffer->buft == ggml_backend_vk_host_buffer_type()) && "unsupported buffer type");
  5399. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  5400. vk_context transfer_ctx;
  5401. if (ctx->transfer_ctx.expired()) {
  5402. // Initialize new transfer context
  5403. transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5404. ctx->transfer_ctx = transfer_ctx;
  5405. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  5406. } else {
  5407. transfer_ctx = ctx->transfer_ctx.lock();
  5408. }
  5409. vk_buffer buf = buf_ctx->dev_buffer;
  5410. ggml_vk_buffer_read_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size);
  5411. }
  5412. static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) {
  5413. VK_LOG_DEBUG("ggml_backend_vk_cpy_tensor_async()");
  5414. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5415. if ((dst->buffer->buft == ggml_backend_vk_get_default_buffer_type(backend) || dst->buffer->buft == ggml_backend_vk_host_buffer_type()) && ggml_backend_buffer_is_vk(src->buffer)) {
  5416. ggml_backend_vk_buffer_context * src_buf_ctx = (ggml_backend_vk_buffer_context *)src->buffer->context;
  5417. ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
  5418. vk_context transfer_ctx;
  5419. if (ctx->transfer_ctx.expired()) {
  5420. // Initialize new transfer context
  5421. transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue);
  5422. ctx->transfer_ctx = transfer_ctx;
  5423. ggml_vk_ctx_begin(ctx->device, transfer_ctx);
  5424. } else {
  5425. transfer_ctx = ctx->transfer_ctx.lock();
  5426. }
  5427. vk_buffer src_buf = src_buf_ctx->dev_buffer;
  5428. vk_buffer dst_buf = dst_buf_ctx->dev_buffer;
  5429. ggml_vk_buffer_copy_async(transfer_ctx, dst_buf, vk_tensor_offset(dst) + dst->view_offs, src_buf, vk_tensor_offset(src) + src->view_offs, ggml_nbytes(src));
  5430. return true;
  5431. }
  5432. return false;
  5433. }
  5434. static void ggml_backend_vk_synchronize(ggml_backend_t backend) {
  5435. VK_LOG_DEBUG("ggml_backend_vk_synchronize()");
  5436. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5437. if(ctx->transfer_ctx.expired()) {
  5438. return;
  5439. }
  5440. vk_context transfer_ctx = ctx->transfer_ctx.lock();
  5441. ggml_vk_ctx_end(transfer_ctx);
  5442. for (auto& cpy : transfer_ctx->in_memcpys) {
  5443. memcpy(cpy.dst, cpy.src, cpy.n);
  5444. }
  5445. ggml_vk_submit(transfer_ctx, ctx->fence);
  5446. VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_backend_vk_synchronize waitForFences");
  5447. ctx->device->device.resetFences({ ctx->fence });
  5448. for (auto& cpy : transfer_ctx->out_memcpys) {
  5449. memcpy(cpy.dst, cpy.src, cpy.n);
  5450. }
  5451. ctx->transfer_ctx.reset();
  5452. }
  5453. static bool ggml_vk_is_empty(ggml_tensor * node) {
  5454. return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE;
  5455. }
  5456. static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
  5457. VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)");
  5458. ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context;
  5459. for (int i = 0; i < cgraph->n_nodes; i++) {
  5460. ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false);
  5461. }
  5462. ggml_vk_preallocate_buffers(ctx);
  5463. ggml_pipeline_allocate_descriptor_sets(ctx->device);
  5464. int last_node = cgraph->n_nodes - 1;
  5465. // If the last op in the cgraph isn't backend GPU, the command buffer doesn't get closed properly
  5466. while (last_node > 0 && ggml_vk_is_empty(cgraph->nodes[last_node])) {
  5467. last_node -= 1;
  5468. }
  5469. // Reserve tensor context space for all nodes
  5470. ctx->tensor_ctxs.resize(cgraph->n_nodes);
  5471. bool first_node_in_batch = true; // true if next node will be first node in a batch
  5472. int submit_node_idx = 0; // index to first node in a batch
  5473. // submit work every submit_count node to overlap CPU cmdbuffer generation with GPU execution
  5474. constexpr int submit_count = 100;
  5475. int submitted_nodes = 0;
  5476. for (int i = 0; i < cgraph->n_nodes; i++) {
  5477. if (first_node_in_batch) {
  5478. submit_node_idx = i;
  5479. }
  5480. bool submit = (submitted_nodes >= submit_count) || (i == last_node);
  5481. bool enqueued = ggml_vk_build_graph(ctx, cgraph->nodes[i], i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i == last_node, submit);
  5482. if (enqueued) {
  5483. ++submitted_nodes;
  5484. #ifndef GGML_VULKAN_CHECK_RESULTS
  5485. if (first_node_in_batch) {
  5486. first_node_in_batch = false;
  5487. }
  5488. #endif
  5489. }
  5490. if (submit) {
  5491. first_node_in_batch = true;
  5492. submitted_nodes = 0;
  5493. }
  5494. }
  5495. #ifdef GGML_VULKAN_PERF
  5496. ctx->device->perf_logger->print_timings();
  5497. #endif
  5498. ggml_vk_graph_cleanup(ctx);
  5499. return GGML_STATUS_SUCCESS;
  5500. UNUSED(backend);
  5501. }
  5502. // TODO: enable async and synchronize
  5503. static ggml_backend_i ggml_backend_vk_interface = {
  5504. /* .get_name = */ ggml_backend_vk_name,
  5505. /* .free = */ ggml_backend_vk_free,
  5506. /* .get_default_buffer_type = */ ggml_backend_vk_get_default_buffer_type,
  5507. /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async,
  5508. /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async,
  5509. /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async,
  5510. /* .synchronize = */ NULL, // ggml_backend_vk_synchronize,
  5511. /* .graph_plan_create = */ NULL,
  5512. /* .graph_plan_free = */ NULL,
  5513. /* .graph_plan_update = */ NULL,
  5514. /* .graph_plan_compute = */ NULL,
  5515. /* .graph_compute = */ ggml_backend_vk_graph_compute,
  5516. /* .supports_op = */ NULL,
  5517. /* .supports_buft = */ NULL,
  5518. /* .offload_op = */ NULL,
  5519. /* .event_record = */ NULL,
  5520. /* .event_wait = */ NULL,
  5521. };
  5522. static ggml_guid_t ggml_backend_vk_guid() {
  5523. static ggml_guid guid = { 0xb8, 0xf7, 0x4f, 0x86, 0x40, 0x3c, 0xe1, 0x02, 0x91, 0xc8, 0xdd, 0xe9, 0x02, 0x3f, 0xc0, 0x2b };
  5524. return &guid;
  5525. }
  5526. ggml_backend_t ggml_backend_vk_init(size_t dev_num) {
  5527. VK_LOG_DEBUG("ggml_backend_vk_init(" << dev_num << ")");
  5528. ggml_backend_vk_context * ctx = new ggml_backend_vk_context;
  5529. ggml_vk_init(ctx, dev_num);
  5530. ggml_backend_t vk_backend = new ggml_backend {
  5531. /* .guid = */ ggml_backend_vk_guid(),
  5532. /* .interface = */ ggml_backend_vk_interface,
  5533. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_vk_reg(), dev_num),
  5534. /* .context = */ ctx,
  5535. };
  5536. return vk_backend;
  5537. }
  5538. bool ggml_backend_is_vk(ggml_backend_t backend) {
  5539. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_vk_guid());
  5540. }
  5541. int ggml_backend_vk_get_device_count() {
  5542. return ggml_vk_get_device_count();
  5543. }
  5544. void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size) {
  5545. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  5546. int dev_idx = vk_instance.device_indices[device];
  5547. ggml_vk_get_device_description(dev_idx, description, description_size);
  5548. }
  5549. void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
  5550. GGML_ASSERT(device < (int) vk_instance.device_indices.size());
  5551. vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
  5552. vk::PhysicalDeviceMemoryProperties memprops = vkdev.getMemoryProperties();
  5553. for (const vk::MemoryHeap& heap : memprops.memoryHeaps) {
  5554. if (heap.flags & vk::MemoryHeapFlagBits::eDeviceLocal) {
  5555. *total = heap.size;
  5556. *free = heap.size;
  5557. break;
  5558. }
  5559. }
  5560. }
  5561. //////////////////////////
  5562. struct ggml_backend_vk_device_context {
  5563. int device;
  5564. std::string name;
  5565. std::string description;
  5566. };
  5567. static const char * ggml_backend_vk_device_get_name(ggml_backend_dev_t dev) {
  5568. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  5569. return ctx->name.c_str();
  5570. }
  5571. static const char * ggml_backend_vk_device_get_description(ggml_backend_dev_t dev) {
  5572. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  5573. return ctx->description.c_str();
  5574. }
  5575. static void ggml_backend_vk_device_get_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
  5576. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)device->context;
  5577. ggml_backend_vk_get_device_memory(ctx->device, free, total);
  5578. }
  5579. static ggml_backend_buffer_type_t ggml_backend_vk_device_get_buffer_type(ggml_backend_dev_t dev) {
  5580. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  5581. return ggml_backend_vk_buffer_type(ctx->device);
  5582. }
  5583. static ggml_backend_buffer_type_t ggml_backend_vk_device_get_host_buffer_type(ggml_backend_dev_t dev) {
  5584. UNUSED(dev);
  5585. return ggml_backend_vk_host_buffer_type();
  5586. }
  5587. static enum ggml_backend_dev_type ggml_backend_vk_device_get_type(ggml_backend_dev_t dev) {
  5588. UNUSED(dev);
  5589. return GGML_BACKEND_DEVICE_TYPE_GPU_FULL;
  5590. }
  5591. static void ggml_backend_vk_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
  5592. props->name = ggml_backend_vk_device_get_name(dev);
  5593. props->description = ggml_backend_vk_device_get_description(dev);
  5594. props->type = ggml_backend_vk_device_get_type(dev);
  5595. ggml_backend_vk_device_get_memory(dev, &props->memory_free, &props->memory_total);
  5596. props->caps = {
  5597. /* async */ false,
  5598. /* host_buffer */ true,
  5599. /* events */ false,
  5600. };
  5601. }
  5602. static ggml_backend_t ggml_backend_vk_device_init(ggml_backend_dev_t dev, const char * params) {
  5603. UNUSED(params);
  5604. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  5605. return ggml_backend_vk_init(ctx->device);
  5606. }
  5607. static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  5608. switch (op->op) {
  5609. case GGML_OP_UNARY:
  5610. switch (ggml_get_unary_op(op)) {
  5611. case GGML_UNARY_OP_GELU:
  5612. case GGML_UNARY_OP_GELU_QUICK:
  5613. case GGML_UNARY_OP_SILU:
  5614. case GGML_UNARY_OP_RELU:
  5615. case GGML_UNARY_OP_TANH:
  5616. return ggml_is_contiguous(op->src[0]);
  5617. default:
  5618. return false;
  5619. }
  5620. break;
  5621. case GGML_OP_MUL_MAT:
  5622. case GGML_OP_MUL_MAT_ID:
  5623. {
  5624. switch (op->src[0]->type) {
  5625. case GGML_TYPE_F32:
  5626. case GGML_TYPE_F16:
  5627. case GGML_TYPE_Q4_0:
  5628. case GGML_TYPE_Q4_1:
  5629. case GGML_TYPE_Q5_0:
  5630. case GGML_TYPE_Q5_1:
  5631. case GGML_TYPE_Q8_0:
  5632. case GGML_TYPE_Q2_K:
  5633. case GGML_TYPE_Q3_K:
  5634. case GGML_TYPE_Q4_K:
  5635. case GGML_TYPE_Q5_K:
  5636. case GGML_TYPE_Q6_K:
  5637. case GGML_TYPE_IQ4_NL:
  5638. break;
  5639. default:
  5640. return false;
  5641. }
  5642. struct ggml_tensor * a;
  5643. struct ggml_tensor * b;
  5644. if (op->op == GGML_OP_MUL_MAT) {
  5645. a = op->src[0];
  5646. b = op->src[1];
  5647. } else {
  5648. a = op->src[2];
  5649. b = op->src[1];
  5650. }
  5651. if (a->ne[3] != b->ne[3]) {
  5652. return false;
  5653. }
  5654. return true;
  5655. } break;
  5656. case GGML_OP_GET_ROWS:
  5657. {
  5658. switch (op->src[0]->type) {
  5659. case GGML_TYPE_F32:
  5660. case GGML_TYPE_F16:
  5661. case GGML_TYPE_Q4_0:
  5662. case GGML_TYPE_Q4_1:
  5663. case GGML_TYPE_Q5_0:
  5664. case GGML_TYPE_Q5_1:
  5665. case GGML_TYPE_Q8_0:
  5666. case GGML_TYPE_IQ4_NL:
  5667. return true;
  5668. default:
  5669. return false;
  5670. }
  5671. } break;
  5672. case GGML_OP_CONT:
  5673. case GGML_OP_CPY:
  5674. case GGML_OP_DUP:
  5675. {
  5676. ggml_type src0_type = op->src[0]->type;
  5677. ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
  5678. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
  5679. return true;
  5680. }
  5681. if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
  5682. return true;
  5683. }
  5684. if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
  5685. return true;
  5686. }
  5687. return false;
  5688. } break;
  5689. case GGML_OP_REPEAT:
  5690. return ggml_type_size(op->type) == sizeof(float) && ggml_type_size(op->src[0]->type) == sizeof(float);
  5691. case GGML_OP_ROPE:
  5692. return ggml_is_contiguous(op->src[0]);
  5693. case GGML_OP_NONE:
  5694. case GGML_OP_RESHAPE:
  5695. case GGML_OP_VIEW:
  5696. case GGML_OP_PERMUTE:
  5697. case GGML_OP_TRANSPOSE:
  5698. case GGML_OP_NORM:
  5699. case GGML_OP_GROUP_NORM:
  5700. case GGML_OP_RMS_NORM:
  5701. case GGML_OP_ADD:
  5702. case GGML_OP_ACC:
  5703. case GGML_OP_MUL:
  5704. case GGML_OP_DIV:
  5705. case GGML_OP_CONCAT:
  5706. case GGML_OP_UPSCALE:
  5707. case GGML_OP_SCALE:
  5708. case GGML_OP_SQR:
  5709. case GGML_OP_SIN:
  5710. case GGML_OP_COS:
  5711. case GGML_OP_CLAMP:
  5712. case GGML_OP_PAD:
  5713. case GGML_OP_DIAG_MASK_INF:
  5714. case GGML_OP_SOFT_MAX:
  5715. case GGML_OP_ARGSORT:
  5716. case GGML_OP_SUM_ROWS:
  5717. case GGML_OP_IM2COL:
  5718. case GGML_OP_TIMESTEP_EMBEDDING:
  5719. case GGML_OP_LEAKY_RELU:
  5720. return true;
  5721. default:
  5722. return false;
  5723. }
  5724. UNUSED(dev);
  5725. }
  5726. static bool ggml_backend_vk_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
  5727. if (buft->iface.get_name != ggml_backend_vk_buffer_type_name) {
  5728. return false;
  5729. }
  5730. ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
  5731. ggml_backend_vk_buffer_type_context * buft_ctx = (ggml_backend_vk_buffer_type_context *)buft->context;
  5732. return buft_ctx->device->idx == ctx->device;
  5733. }
  5734. static bool ggml_backend_vk_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
  5735. const int min_batch_size = 32;
  5736. return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
  5737. (op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
  5738. UNUSED(dev);
  5739. }
  5740. static const struct ggml_backend_device_i ggml_backend_vk_device_i = {
  5741. /* .get_name = */ ggml_backend_vk_device_get_name,
  5742. /* .get_description = */ ggml_backend_vk_device_get_description,
  5743. /* .get_memory = */ ggml_backend_vk_device_get_memory,
  5744. /* .get_type = */ ggml_backend_vk_device_get_type,
  5745. /* .get_props = */ ggml_backend_vk_device_get_props,
  5746. /* .init_backend = */ ggml_backend_vk_device_init,
  5747. /* .get_buffer_type = */ ggml_backend_vk_device_get_buffer_type,
  5748. /* .get_host_buffer_type = */ ggml_backend_vk_device_get_host_buffer_type,
  5749. /* .buffer_from_host_ptr = */ NULL,
  5750. /* .supports_op = */ ggml_backend_vk_device_supports_op,
  5751. /* .supports_buft = */ ggml_backend_vk_device_supports_buft,
  5752. /* .offload_op = */ ggml_backend_vk_device_offload_op,
  5753. /* .event_new = */ NULL,
  5754. /* .event_free = */ NULL,
  5755. /* .event_synchronize = */ NULL,
  5756. };
  5757. static const char * ggml_backend_vk_reg_get_name(ggml_backend_reg_t reg) {
  5758. UNUSED(reg);
  5759. return GGML_VK_NAME;
  5760. }
  5761. static size_t ggml_backend_vk_reg_get_device_count(ggml_backend_reg_t reg) {
  5762. UNUSED(reg);
  5763. return ggml_backend_vk_get_device_count();
  5764. }
  5765. static ggml_backend_dev_t ggml_backend_vk_reg_get_device(ggml_backend_reg_t reg, size_t device) {
  5766. static std::vector<ggml_backend_dev_t> devices;
  5767. static bool initialized = false;
  5768. {
  5769. static std::mutex mutex;
  5770. std::lock_guard<std::mutex> lock(mutex);
  5771. if (!initialized) {
  5772. for (size_t i = 0; i < ggml_backend_vk_get_device_count(); i++) {
  5773. ggml_backend_vk_device_context * ctx = new ggml_backend_vk_device_context;
  5774. char desc[256];
  5775. ggml_backend_vk_get_device_description(i, desc, sizeof(desc));
  5776. ctx->device = i;
  5777. ctx->name = GGML_VK_NAME + std::to_string(i);
  5778. ctx->description = desc;
  5779. devices.push_back(new ggml_backend_device {
  5780. /* .iface = */ ggml_backend_vk_device_i,
  5781. /* .reg = */ reg,
  5782. /* .context = */ ctx,
  5783. });
  5784. }
  5785. initialized = true;
  5786. }
  5787. }
  5788. GGML_ASSERT(device < devices.size());
  5789. return devices[device];
  5790. }
  5791. static const struct ggml_backend_reg_i ggml_backend_vk_reg_i = {
  5792. /* .get_name = */ ggml_backend_vk_reg_get_name,
  5793. /* .get_device_count = */ ggml_backend_vk_reg_get_device_count,
  5794. /* .get_device = */ ggml_backend_vk_reg_get_device,
  5795. /* .get_proc_address = */ NULL,
  5796. };
  5797. ggml_backend_reg_t ggml_backend_vk_reg() {
  5798. static ggml_backend_reg reg = {
  5799. /* .iface = */ ggml_backend_vk_reg_i,
  5800. /* .context = */ nullptr,
  5801. };
  5802. return &reg;
  5803. }
  5804. // Extension availability
  5805. static bool ggml_vk_instance_validation_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  5806. #ifdef GGML_VULKAN_VALIDATE
  5807. bool portability_enumeration_ext = false;
  5808. // Check for portability enumeration extension for MoltenVK support
  5809. for (const auto& properties : instance_extensions) {
  5810. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  5811. return true;
  5812. }
  5813. }
  5814. if (!portability_enumeration_ext) {
  5815. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  5816. }
  5817. #endif
  5818. return false;
  5819. UNUSED(instance_extensions);
  5820. }
  5821. static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector<vk::ExtensionProperties>& instance_extensions) {
  5822. #ifdef __APPLE__
  5823. bool portability_enumeration_ext = false;
  5824. // Check for portability enumeration extension for MoltenVK support
  5825. for (const auto& properties : instance_extensions) {
  5826. if (strcmp("VK_KHR_portability_enumeration", properties.extensionName) == 0) {
  5827. return true;
  5828. }
  5829. }
  5830. if (!portability_enumeration_ext) {
  5831. std::cerr << "ggml_vulkan: WARNING: Instance extension VK_KHR_portability_enumeration not found." << std::endl;
  5832. }
  5833. #endif
  5834. return false;
  5835. UNUSED(instance_extensions);
  5836. }
  5837. // checks
  5838. #ifdef GGML_VULKAN_CHECK_RESULTS
  5839. static void ggml_vk_print_graph_origin(const ggml_tensor * tensor, std::vector<const ggml_tensor *>& done, int level = 0) {
  5840. if (std::find(done.begin(), done.end(), tensor) != done.end() || level > 10) {
  5841. return;
  5842. }
  5843. for (int j = 0; j < level; j++) {
  5844. std::cerr << " ";
  5845. }
  5846. std::cerr << ggml_op_name(tensor->op) << " gpu=" << (tensor->extra != nullptr) << std::endl;
  5847. done.push_back(tensor);
  5848. for (int i = 0; i < GGML_MAX_SRC; i++) {
  5849. if (tensor->src[i] != nullptr) {
  5850. ggml_vk_print_graph_origin(tensor->src[i], done, level + 1);
  5851. }
  5852. }
  5853. }
  5854. static void ggml_vk_print_tensor_area(const ggml_tensor * tensor, const void * data, int i0, int i1, int i2, int i3) {
  5855. if (tensor->type != GGML_TYPE_F32 && tensor->type != GGML_TYPE_F16 && tensor->type != GGML_TYPE_I32) {
  5856. return;
  5857. }
  5858. i0 = std::max(i0, 5);
  5859. i1 = std::max(i1, 5);
  5860. i2 = std::max(i2, 0);
  5861. i3 = std::max(i3, 0);
  5862. fprintf(stderr, " ");
  5863. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  5864. fprintf(stderr, "%7d ", idx1);
  5865. }
  5866. fprintf(stderr, "\n");
  5867. for (int idx0 = i0 - 5; idx0 < i0 + 5; idx0++) {
  5868. fprintf(stderr, "%7d: ", idx0);
  5869. for (int idx1 = i1 - 5; idx1 < i1 + 5; idx1++) {
  5870. if (idx0 >= 0 && idx0 < tensor->ne[0] && idx1 >= 0 && idx1 < tensor->ne[1] && i2 >= 0 && i2 < tensor->ne[2] && i3 >= 0 && i3 < tensor->ne[3]) {
  5871. float val;
  5872. if (tensor->type == GGML_TYPE_F32) {
  5873. val = *(const float *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  5874. } else if (tensor->type == GGML_TYPE_F16) {
  5875. val = ggml_fp16_to_fp32(*(const ggml_fp16_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]));
  5876. } else if (tensor->type == GGML_TYPE_I32) {
  5877. val = *(const int32_t *) ((const char *) data + i3*tensor->nb[3] + i2*tensor->nb[2] + idx1*tensor->nb[1] + idx0*tensor->nb[0]);
  5878. } else {
  5879. GGML_ABORT("fatal error");
  5880. }
  5881. fprintf(stderr, "% 7.2f ", val);
  5882. } else {
  5883. fprintf(stderr, " ");
  5884. }
  5885. }
  5886. fprintf(stderr, "\n");
  5887. }
  5888. }
  5889. static void ggml_vk_print_tensor(const ggml_tensor * tensor, const char * name) {
  5890. void * tensor_data = tensor->data;
  5891. const bool is_gpu = tensor->buffer != nullptr && ggml_backend_buffer_is_vk(tensor->buffer);
  5892. if (is_gpu) {
  5893. const size_t tensor_size = ggml_nbytes(tensor);
  5894. tensor_data = malloc(tensor_size);
  5895. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  5896. vk_buffer buffer_gpu = buf_ctx->dev_buffer;
  5897. ggml_vk_buffer_read(buffer_gpu, vk_tensor_offset(tensor) + tensor->view_offs, tensor_data, tensor_size);
  5898. }
  5899. std::cerr << "TENSOR CHECK " << name << " (" << tensor->name << "): " << ggml_op_name(tensor->op) << std::endl;
  5900. std::cerr << "tensor=" << tensor << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << std::endl;
  5901. if (tensor->src[0] != nullptr) {
  5902. std::cerr << "tensor->src[0]=" << tensor->src[0] << " name=" << tensor->src[0]->name << " op=" << ggml_op_name(tensor->src[0]->op) << " type=" << ggml_type_name(tensor->src[0]->type) << " ne0=" << tensor->src[0]->ne[0] << " nb0=" << tensor->src[0]->nb[0] << " ne1=" << tensor->src[0]->ne[1] << " nb1=" << tensor->src[0]->nb[1] << " ne2=" << tensor->src[0]->ne[2] << " nb2=" << tensor->src[0]->nb[2] << " ne3=" << tensor->src[0]->ne[3] << " nb3=" << tensor->src[0]->nb[3] << std::endl;
  5903. }
  5904. if (tensor->src[1] != nullptr) {
  5905. std::cerr << "tensor->src[1]=" << tensor->src[1] << " name=" << tensor->src[1]->name << " op=" << ggml_op_name(tensor->src[1]->op) << " type=" << ggml_type_name(tensor->src[1]->type) << " ne0=" << tensor->src[1]->ne[0] << " nb0=" << tensor->src[1]->nb[0] << " ne1=" << tensor->src[1]->ne[1] << " nb1=" << tensor->src[1]->nb[1] << " ne2=" << tensor->src[1]->ne[2] << " nb2=" << tensor->src[1]->nb[2] << " ne3=" << tensor->src[1]->ne[3] << " nb3=" << tensor->src[1]->nb[3] << std::endl;
  5906. }
  5907. std::cerr << std::endl << "Result:" << std::endl;
  5908. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  5909. std::cerr << std::endl;
  5910. std::vector<const ggml_tensor *> done;
  5911. ggml_vk_print_graph_origin(tensor, done);
  5912. if (is_gpu) {
  5913. free(tensor_data);
  5914. }
  5915. }
  5916. void * comp_result;
  5917. size_t comp_size;
  5918. size_t comp_nb[GGML_MAX_DIMS];
  5919. size_t check_counter = 0;
  5920. static void ggml_vk_check_results_0(ggml_tensor * tensor) {
  5921. if (tensor->op == GGML_OP_TRANSPOSE) {
  5922. return;
  5923. }
  5924. check_counter++;
  5925. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  5926. return;
  5927. }
  5928. VK_LOG_DEBUG("ggml_vk_check_results_0(" << tensor->name << ")");
  5929. ggml_tensor * src0 = tensor->src[0];
  5930. ggml_tensor * src1 = tensor->src[1];
  5931. ggml_tensor * src2 = tensor->src[2];
  5932. struct ggml_init_params iparams = {
  5933. /*.mem_size =*/ 2ul*1024ul*1024ul*1024ul,
  5934. /*.mem_buffer =*/ NULL,
  5935. /*.no_alloc =*/ false,
  5936. };
  5937. struct ggml_context * ggml_ctx = ggml_init(iparams);
  5938. struct ggml_tensor * src0_clone = nullptr;
  5939. struct ggml_tensor * src1_clone = nullptr;
  5940. struct ggml_tensor * src2_clone = nullptr;
  5941. struct ggml_tensor * tensor_clone = nullptr;
  5942. size_t src0_size;
  5943. size_t src1_size;
  5944. size_t src2_size;
  5945. void * src0_buffer = nullptr;
  5946. void * src1_buffer = nullptr;
  5947. void * src2_buffer = nullptr;
  5948. if (src0 != nullptr) {
  5949. src0_clone = ggml_dup_tensor(ggml_ctx, src0);
  5950. src0_size = ggml_nbytes(src0);
  5951. src0_buffer = malloc(src0_size);
  5952. src0_clone->data = src0_buffer;
  5953. if (ggml_backend_buffer_is_host(src0->buffer)) {
  5954. memcpy(src0_clone->data, src0->data, src0_size);
  5955. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5956. } else if (ggml_backend_buffer_is_vk(src0->buffer)) {
  5957. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context;
  5958. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  5959. uint64_t offset = vk_tensor_offset(src0) + src0->view_offs;
  5960. if (!ggml_is_contiguous(src0) && ggml_vk_dim01_contiguous(src0)) {
  5961. for (int i3 = 0; i3 < src0->ne[3]; i3++) {
  5962. for (int i2 = 0; i2 < src0->ne[2]; i2++) {
  5963. const int idx = i3*src0->ne[2] + i2;
  5964. ggml_vk_buffer_read(buffer_gpu, offset + idx * src0->nb[2], ((char *)src0_clone->data + idx * src0_clone->nb[2]), src0->ne[1] * src0->nb[1]);
  5965. }
  5966. }
  5967. src0_clone->nb[0] = src0->nb[0];
  5968. src0_clone->nb[1] = src0->nb[1];
  5969. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  5970. src0_clone->nb[i] = src0_clone->nb[i - 1]*src0_clone->ne[i - 1];
  5971. }
  5972. } else {
  5973. if (offset + src0_size >= buffer_gpu->size) {
  5974. src0_size = buffer_gpu->size - offset;
  5975. }
  5976. ggml_vk_buffer_read(buffer_gpu, offset, src0_clone->data, src0_size);
  5977. memcpy(src0_clone->nb, src0->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5978. }
  5979. } else {
  5980. GGML_ABORT("fatal error");
  5981. }
  5982. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  5983. ggml_vk_print_tensor(src0, "src0");
  5984. }
  5985. }
  5986. if (src1 != nullptr) {
  5987. src1_clone = ggml_dup_tensor(ggml_ctx, src1);
  5988. src1_size = ggml_nbytes(src1);
  5989. src1_buffer = malloc(src1_size);
  5990. src1_clone->data = src1_buffer;
  5991. if (ggml_backend_buffer_is_host(src1->buffer)) {
  5992. memcpy(src1_clone->data, src1->data, src1_size);
  5993. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  5994. } else if (ggml_backend_buffer_is_vk(src1->buffer)) {
  5995. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context;
  5996. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  5997. uint64_t offset = vk_tensor_offset(src1) + src1->view_offs;
  5998. if (!ggml_is_contiguous(src1) && ggml_vk_dim01_contiguous(src1)) {
  5999. for (int i3 = 0; i3 < src1->ne[3]; i3++) {
  6000. for (int i2 = 0; i2 < src1->ne[2]; i2++) {
  6001. const int idx = i3*src1->ne[2] + i2;
  6002. ggml_vk_buffer_read(buffer_gpu, offset + idx * src1->nb[2], ((char *)src1_clone->data + idx * src1_clone->nb[2]), src1->ne[1] * src1->nb[1]);
  6003. }
  6004. }
  6005. src1_clone->nb[0] = src1->nb[0];
  6006. src1_clone->nb[1] = src1->nb[1];
  6007. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  6008. src1_clone->nb[i] = src1_clone->nb[i - 1]*src1_clone->ne[i - 1];
  6009. }
  6010. } else {
  6011. if (offset + src1_size >= buffer_gpu->size) {
  6012. src1_size = buffer_gpu->size - offset;
  6013. }
  6014. ggml_vk_buffer_read(buffer_gpu, offset, src1_clone->data, src1_size);
  6015. memcpy(src1_clone->nb, src1->nb, sizeof(size_t) * GGML_MAX_DIMS);
  6016. }
  6017. } else {
  6018. GGML_ABORT("fatal error");
  6019. }
  6020. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  6021. ggml_vk_print_tensor(src1, "src1");
  6022. }
  6023. }
  6024. if (src2 != nullptr) {
  6025. src2_clone = ggml_dup_tensor(ggml_ctx, src2);
  6026. src2_size = ggml_nbytes(src2);
  6027. src2_buffer = malloc(src2_size);
  6028. src2_clone->data = src2_buffer;
  6029. if (ggml_backend_buffer_is_host(src2->buffer)) {
  6030. memcpy(src2_clone->data, src2->data, src2_size);
  6031. memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
  6032. } else if (ggml_backend_buffer_is_vk(src2->buffer)) {
  6033. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src2->buffer->context;
  6034. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  6035. uint64_t offset = vk_tensor_offset(src2) + src2->view_offs;
  6036. if (!ggml_is_contiguous(src2) && ggml_vk_dim01_contiguous(src2)) {
  6037. for (int i3 = 0; i3 < src2->ne[3]; i3++) {
  6038. for (int i2 = 0; i2 < src2->ne[2]; i2++) {
  6039. const int idx = i3*src2->ne[2] + i2;
  6040. ggml_vk_buffer_read(buffer_gpu, offset + idx * src2->nb[2], ((char *)src2_clone->data + idx * src2_clone->nb[2]), src2->ne[1] * src2->nb[1]);
  6041. }
  6042. }
  6043. src2_clone->nb[0] = src2->nb[0];
  6044. src2_clone->nb[1] = src2->nb[1];
  6045. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  6046. src2_clone->nb[i] = src2_clone->nb[i - 1]*src2_clone->ne[i - 1];
  6047. }
  6048. } else {
  6049. if (offset + src2_size >= buffer_gpu->size) {
  6050. src2_size = buffer_gpu->size - offset;
  6051. }
  6052. ggml_vk_buffer_read(buffer_gpu, offset, src2_clone->data, src2_size);
  6053. memcpy(src2_clone->nb, src2->nb, sizeof(size_t) * GGML_MAX_DIMS);
  6054. }
  6055. } else {
  6056. GGML_ABORT("fatal error");
  6057. }
  6058. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  6059. ggml_vk_print_tensor(src2, "src2");
  6060. }
  6061. }
  6062. if (tensor->op == GGML_OP_MUL_MAT) {
  6063. tensor_clone = ggml_mul_mat(ggml_ctx, src0_clone, src1_clone);
  6064. } else if (tensor->op == GGML_OP_MUL_MAT_ID) {
  6065. tensor_clone = ggml_mul_mat_id(ggml_ctx, src0_clone, src1_clone, src2_clone);
  6066. } else if (tensor->op == GGML_OP_MUL) {
  6067. tensor_clone = ggml_mul(ggml_ctx, src0_clone, src1_clone);
  6068. } else if (tensor->op == GGML_OP_DIV) {
  6069. tensor_clone = ggml_div(ggml_ctx, src0_clone, src1_clone);
  6070. } else if (tensor->op == GGML_OP_CONCAT) {
  6071. tensor_clone = ggml_concat(ggml_ctx, src0_clone, src1_clone, *(int *)tensor->op_params);
  6072. } else if (tensor->op == GGML_OP_UPSCALE) {
  6073. tensor_clone = ggml_upscale_ext(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  6074. } else if (tensor->op == GGML_OP_SCALE) {
  6075. tensor_clone = ggml_scale(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0]);
  6076. } else if (tensor->op == GGML_OP_SQR) {
  6077. tensor_clone = ggml_sqr(ggml_ctx, src0_clone);
  6078. } else if (tensor->op == GGML_OP_SIN) {
  6079. tensor_clone = ggml_sin(ggml_ctx, src0_clone);
  6080. } else if (tensor->op == GGML_OP_COS) {
  6081. tensor_clone = ggml_cos(ggml_ctx, src0_clone);
  6082. } else if (tensor->op == GGML_OP_CLAMP) {
  6083. tensor_clone = ggml_clamp(ggml_ctx, src0_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  6084. } else if (tensor->op == GGML_OP_PAD) {
  6085. tensor_clone = ggml_pad(ggml_ctx, src0_clone, tensor->ne[0] - src0_clone->ne[0], tensor->ne[1] - src0_clone->ne[1], tensor->ne[2] - src0_clone->ne[2], tensor->ne[3] - src0_clone->ne[3]);
  6086. } else if (tensor->op == GGML_OP_REPEAT) {
  6087. tensor_clone = ggml_repeat(ggml_ctx, src0_clone, tensor);
  6088. } else if (tensor->op == GGML_OP_ADD) {
  6089. tensor_clone = ggml_add(ggml_ctx, src0_clone, src1_clone);
  6090. } else if (tensor->op == GGML_OP_ACC) {
  6091. tensor_clone = ggml_acc(ggml_ctx, src0_clone, src1_clone, tensor->op_params[0], tensor->op_params[1], tensor->op_params[2], tensor->op_params[3]);
  6092. } else if (tensor->op == GGML_OP_NORM) {
  6093. tensor_clone = ggml_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  6094. } else if (tensor->op == GGML_OP_GROUP_NORM) {
  6095. tensor_clone = ggml_group_norm(ggml_ctx, src0_clone, *(int *)tensor->op_params, ((float *)tensor->op_params)[1]);
  6096. } else if (tensor->op == GGML_OP_RMS_NORM) {
  6097. tensor_clone = ggml_rms_norm(ggml_ctx, src0_clone, *(float *)tensor->op_params);
  6098. } else if (tensor->op == GGML_OP_SOFT_MAX) {
  6099. if (src1 != nullptr) {
  6100. tensor_clone = ggml_soft_max_ext(ggml_ctx, src0_clone, src1_clone, ((float *)tensor->op_params)[0], ((float *)tensor->op_params)[1]);
  6101. } else {
  6102. tensor_clone = ggml_soft_max(ggml_ctx, src0_clone);
  6103. }
  6104. } else if (tensor->op == GGML_OP_DIAG_MASK_INF) {
  6105. tensor_clone = ggml_diag_mask_inf(ggml_ctx, src0_clone, *(int *)tensor->op_params);
  6106. } else if (tensor->op == GGML_OP_ROPE) {
  6107. const int n_dims = ((int32_t *) tensor->op_params)[1];
  6108. const int mode = ((int32_t *) tensor->op_params)[2];
  6109. //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
  6110. const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
  6111. const float freq_base = ((float *) tensor->op_params)[5];
  6112. const float freq_scale = ((float *) tensor->op_params)[6];
  6113. const float ext_factor = ((float *) tensor->op_params)[7];
  6114. const float attn_factor = ((float *) tensor->op_params)[8];
  6115. const float beta_fast = ((float *) tensor->op_params)[9];
  6116. const float beta_slow = ((float *) tensor->op_params)[10];
  6117. tensor_clone = ggml_rope_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
  6118. } else if (tensor->op == GGML_OP_UNARY) {
  6119. switch (ggml_get_unary_op(tensor)) {
  6120. case GGML_UNARY_OP_SILU:
  6121. tensor_clone = ggml_silu(ggml_ctx, src0_clone);
  6122. break;
  6123. case GGML_UNARY_OP_GELU:
  6124. tensor_clone = ggml_gelu(ggml_ctx, src0_clone);
  6125. break;
  6126. case GGML_UNARY_OP_GELU_QUICK:
  6127. tensor_clone = ggml_gelu_quick(ggml_ctx, src0_clone);
  6128. break;
  6129. case GGML_UNARY_OP_RELU:
  6130. tensor_clone = ggml_relu(ggml_ctx, src0_clone);
  6131. break;
  6132. case GGML_UNARY_OP_TANH:
  6133. tensor_clone = ggml_tanh(ggml_ctx, src0_clone);
  6134. break;
  6135. default:
  6136. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  6137. GGML_ABORT("fatal error");
  6138. }
  6139. } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) {
  6140. if (src1 == nullptr) {
  6141. tensor_clone = ggml_dup(ggml_ctx, src0_clone);
  6142. tensor_clone->type = tensor->type;
  6143. } else {
  6144. tensor_clone = ggml_cpy(ggml_ctx, src0_clone, src1_clone);
  6145. }
  6146. } else if (tensor->op == GGML_OP_CONT) {
  6147. tensor_clone = ggml_cont_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  6148. } else if (tensor->op == GGML_OP_RESHAPE) {
  6149. tensor_clone = ggml_reshape_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
  6150. } else if (tensor->op == GGML_OP_VIEW) {
  6151. tensor_clone = ggml_view_4d(ggml_ctx, src0_clone, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->nb[1], tensor->nb[2], tensor->nb[3], ((int32_t *) tensor->op_params)[0]);
  6152. } else if (tensor->op == GGML_OP_PERMUTE) {
  6153. int32_t * params = (int32_t *)tensor->op_params;
  6154. tensor_clone = ggml_permute(ggml_ctx, src0_clone, params[0], params[1], params[2], params[3]);
  6155. } else if (tensor->op == GGML_OP_TRANSPOSE) {
  6156. tensor_clone = ggml_transpose(ggml_ctx, src0_clone);
  6157. } else if (tensor->op == GGML_OP_GET_ROWS) {
  6158. tensor_clone = ggml_get_rows(ggml_ctx, src0_clone, src1_clone);
  6159. } else if (tensor->op == GGML_OP_ARGSORT) {
  6160. tensor_clone = ggml_argsort(ggml_ctx, src0_clone, (ggml_sort_order) *(int *)tensor->op_params);
  6161. } else if (tensor->op == GGML_OP_SUM_ROWS) {
  6162. tensor_clone = ggml_sum_rows(ggml_ctx, src0_clone);
  6163. } else if (tensor->op == GGML_OP_IM2COL) {
  6164. const int32_t s0 = tensor->op_params[0];
  6165. const int32_t s1 = tensor->op_params[1];
  6166. const int32_t p0 = tensor->op_params[2];
  6167. const int32_t p1 = tensor->op_params[3];
  6168. const int32_t d0 = tensor->op_params[4];
  6169. const int32_t d1 = tensor->op_params[5];
  6170. const bool is_2D = tensor->op_params[6] == 1;
  6171. tensor_clone = ggml_im2col(ggml_ctx, src0_clone, src1_clone, s0, s1, p0, p1, d0, d1, is_2D, tensor->type);
  6172. } else if (tensor->op == GGML_OP_TIMESTEP_EMBEDDING) {
  6173. const int32_t dim = tensor->op_params[0];
  6174. const int32_t max_period = tensor->op_params[1];
  6175. tensor_clone = ggml_timestep_embedding(ggml_ctx, src0_clone, dim, max_period);
  6176. } else if (tensor->op == GGML_OP_LEAKY_RELU) {
  6177. const float * op_params = (const float *)tensor->op_params;
  6178. tensor_clone = ggml_leaky_relu(ggml_ctx, src0_clone, op_params[0], false);
  6179. } else {
  6180. std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
  6181. GGML_ABORT("fatal error");
  6182. }
  6183. ggml_cgraph * cgraph = ggml_new_graph(ggml_ctx);
  6184. ggml_build_forward_expand(cgraph, tensor_clone);
  6185. ggml_graph_compute_with_ctx(ggml_ctx, cgraph, 8);
  6186. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  6187. ggml_vk_print_tensor(tensor_clone, "tensor_clone");
  6188. }
  6189. comp_size = ggml_nbytes(tensor_clone);
  6190. comp_result = malloc(comp_size);
  6191. memcpy(comp_result, tensor_clone->data, comp_size);
  6192. memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
  6193. if (src0 != nullptr) {
  6194. free(src0_buffer);
  6195. }
  6196. if (src1 != nullptr) {
  6197. free(src1_buffer);
  6198. }
  6199. ggml_free(ggml_ctx);
  6200. VK_LOG_DEBUG("END ggml_vk_check_results_0(" << tensor->name << ")");
  6201. }
  6202. static void ggml_vk_check_results_1(ggml_tensor * tensor) {
  6203. if (tensor->op == GGML_OP_TRANSPOSE) {
  6204. return;
  6205. }
  6206. if (!(vk_output_tensor > 0 && vk_output_tensor == check_counter) && check_counter <= vk_skip_checks) {
  6207. return;
  6208. }
  6209. VK_LOG_DEBUG("ggml_vk_check_results_1(" << tensor->name << ")");
  6210. ggml_tensor * src0 = tensor->src[0];
  6211. ggml_tensor * src1 = tensor->src[1];
  6212. ggml_tensor * src2 = tensor->src[2];
  6213. void * tensor_data = tensor->data;
  6214. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  6215. size_t tensor_size = ggml_nbytes(tensor);
  6216. tensor_data = malloc(tensor_size);
  6217. ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)tensor->buffer->context;
  6218. vk_buffer& buffer_gpu = buf_ctx->dev_buffer;
  6219. uint64_t offset = vk_tensor_offset(tensor) + tensor->view_offs;
  6220. if (offset + tensor_size >= buffer_gpu->size) {
  6221. tensor_size = buffer_gpu->size - offset;
  6222. }
  6223. ggml_vk_buffer_read(buffer_gpu, offset, tensor_data, tensor_size);
  6224. }
  6225. float first_error_result = -1.0f;
  6226. float first_error_correct = -1.0f;
  6227. std::array<int, 4> first_error = { -1, -1, -1, -1 };
  6228. double avg_err = 0.0;
  6229. size_t counter = 0;
  6230. for (int i3 = 0; i3 < tensor->ne[3]; i3++) {
  6231. for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
  6232. for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
  6233. for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
  6234. const bool buffer_size_fit = i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0] < comp_size;
  6235. float correct = 0.0f;
  6236. float result = 0.0f;
  6237. if (buffer_size_fit) {
  6238. if (tensor->type == GGML_TYPE_F32) {
  6239. correct = *(float *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  6240. result = *(float *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  6241. } else if (tensor->type == GGML_TYPE_F16) {
  6242. correct = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]));
  6243. result = ggml_fp16_to_fp32(*(ggml_fp16_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]));
  6244. } else if (tensor->type == GGML_TYPE_I32) {
  6245. correct = *(int32_t *) ((char *) comp_result + i3*comp_nb[3] + i2*comp_nb[2] + i1*comp_nb[1] + i0*comp_nb[0]);
  6246. result = *(int32_t *) ((char *) tensor_data + i3*tensor->nb[3] + i2*tensor->nb[2] + i1*tensor->nb[1] + i0*tensor->nb[0]);
  6247. } else {
  6248. std::cerr << "Results check not implemented for type " << ggml_type_name(tensor->type) << std::endl;
  6249. }
  6250. } else {
  6251. std::cerr << "Missing debug code for type " << ggml_type_name(tensor->type) << std::endl;
  6252. GGML_ABORT("fatal error");
  6253. }
  6254. if ((std::isnan(correct) != std::isnan(result)) || (std::isinf(correct) != std::isinf(result)) || !buffer_size_fit) {
  6255. std::cerr << "ERROR: Invalid value in " << ggml_op_name(tensor->op) << " i3=" << i3 << " i2=" << i2 << " i1=" << i1 << " i0=" << i0 << " result=" << result << " correct=" << correct << " avg_err=" << (avg_err / counter) << std::endl;
  6256. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6257. if (src0 != nullptr) {
  6258. std::cerr << "src0=" << src0 << " src0->name=" << src0->name << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6259. }
  6260. if (src1 != nullptr) {
  6261. std::cerr << "src1=" << src1 << " src1->name=" << src1->name << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6262. }
  6263. if (src2 != nullptr) {
  6264. std::cerr << "src2=" << src2 << " src2->name=" << src2->name << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6265. }
  6266. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6267. std::cerr << std::endl << "Result:" << std::endl;
  6268. ggml_vk_print_tensor_area(tensor, tensor_data, i0, i1, i2, i3);
  6269. std::cerr << std::endl << "Correct:" << std::endl;
  6270. ggml_vk_print_tensor_area(tensor, comp_result, i0, i1, i2, i3);
  6271. std::cerr << std::endl;
  6272. std::vector<const ggml_tensor *> done;
  6273. ggml_vk_print_graph_origin(tensor, done);
  6274. GGML_ABORT("fatal error");
  6275. }
  6276. if (first_error[0] == -1 && std::fabs(correct - result) > 0.1f) {
  6277. first_error[0] = i0;
  6278. first_error[1] = i1;
  6279. first_error[2] = i2;
  6280. first_error[3] = i3;
  6281. first_error_result = result;
  6282. first_error_correct = correct;
  6283. }
  6284. // Special case, value is infinite, avoid NaN result in avg_err
  6285. // NaN also appears in results, if both are nan error is 0
  6286. if (!std::isinf(correct) && !std::isinf(result) && !std::isnan(correct) && !std::isnan(result)) {
  6287. avg_err += std::fabs(correct - result);
  6288. }
  6289. counter++;
  6290. }
  6291. }
  6292. }
  6293. }
  6294. avg_err /= counter;
  6295. if (vk_output_tensor > 0 && vk_output_tensor == check_counter) {
  6296. std::cerr << "TENSOR CHECK: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  6297. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6298. if (src0 != nullptr) {
  6299. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6300. }
  6301. if (src1 != nullptr) {
  6302. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6303. }
  6304. if (src2 != nullptr) {
  6305. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6306. }
  6307. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6308. std::cerr << std::endl << "Result:" << std::endl;
  6309. ggml_vk_print_tensor_area(tensor, tensor_data, 5, 5, 0, 0);
  6310. std::cerr << std::endl << "Correct:" << std::endl;
  6311. ggml_vk_print_tensor_area(tensor, comp_result, 5, 5, 0, 0);
  6312. std::cerr << std::endl;
  6313. std::vector<const ggml_tensor *> done;
  6314. ggml_vk_print_graph_origin(tensor, done);
  6315. }
  6316. if (avg_err > 0.05 || std::isnan(avg_err)) {
  6317. std::cerr << "ERROR: avg_err=" << avg_err << " in " << ggml_op_name(tensor->op) << " (check " << check_counter << ")" << std::endl;
  6318. std::cerr << "tensor=" << tensor << " tensor->name=" << tensor->name << " tensor->type: " << ggml_type_name(tensor->type) << " ne0=" << tensor->ne[0] << " nb0=" << tensor->nb[0] << " ne1=" << tensor->ne[1] << " nb1=" << tensor->nb[1] << " ne2=" << tensor->ne[2] << " nb2=" << tensor->nb[2] << " ne3=" << tensor->ne[3] << " nb3=" << tensor->nb[3] << " offset=" << tensor->view_offs << std::endl;
  6319. if (src0 != nullptr) {
  6320. std::cerr << "src0=" << src0 << " op=" << ggml_op_name(src0->op) << " type=" << ggml_type_name(src0->type) << " ne0=" << src0->ne[0] << " nb0=" << src0->nb[0] << " ne1=" << src0->ne[1] << " nb1=" << src0->nb[1] << " ne2=" << src0->ne[2] << " nb2=" << src0->nb[2] << " ne3=" << src0->ne[3] << " nb3=" << src0->nb[3] << " offset=" << src0->view_offs << std::endl;
  6321. }
  6322. if (src1 != nullptr) {
  6323. std::cerr << "src1=" << src1 << " op=" << ggml_op_name(src1->op) << " type=" << ggml_type_name(src1->type) << " ne0=" << src1->ne[0] << " nb0=" << src1->nb[0] << " ne1=" << src1->ne[1] << " nb1=" << src1->nb[1] << " ne2=" << src1->ne[2] << " nb2=" << src1->nb[2] << " ne3=" << src1->ne[3] << " nb3=" << src1->nb[3] << " offset=" << src1->view_offs << std::endl;
  6324. }
  6325. if (src2 != nullptr) {
  6326. std::cerr << "src2=" << src2 << " op=" << ggml_op_name(src2->op) << " type=" << ggml_type_name(src2->type) << " ne0=" << src2->ne[0] << " nb0=" << src2->nb[0] << " ne1=" << src2->ne[1] << " nb1=" << src2->nb[1] << " ne2=" << src2->ne[2] << " nb2=" << src2->nb[2] << " ne3=" << src2->ne[3] << " nb3=" << src2->nb[3] << " offset=" << src2->view_offs << std::endl;
  6327. }
  6328. std::cerr << "First error: result=" << first_error_result << " correct=" << first_error_correct << " i3=" << first_error[3] << " i2=" << first_error[2] << " i1=" << first_error[1] << " i0=" << first_error[0] << std::endl;
  6329. std::cerr << std::endl << "Result:" << std::endl;
  6330. ggml_vk_print_tensor_area(tensor, tensor_data, first_error[0], first_error[1], first_error[2], first_error[3]);
  6331. std::cerr << std::endl << "Correct:" << std::endl;
  6332. ggml_vk_print_tensor_area(tensor, comp_result, first_error[0], first_error[1], first_error[2], first_error[3]);
  6333. std::cerr << std::endl;
  6334. std::vector<const ggml_tensor *> done;
  6335. ggml_vk_print_graph_origin(tensor, done);
  6336. GGML_ABORT("fatal error");
  6337. } else {
  6338. std::cerr << check_counter << " " << tensor->name << " op=" << ggml_op_name(tensor->op) << " avg_err=" << avg_err << std::endl;
  6339. }
  6340. free(comp_result);
  6341. comp_result = nullptr;
  6342. comp_size = 0;
  6343. if (ggml_backend_buffer_is_vk(tensor->buffer)) {
  6344. free(tensor_data);
  6345. }
  6346. VK_LOG_DEBUG("END ggml_vk_check_results_1(" << tensor->name << ")");
  6347. }
  6348. #endif